using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using miew.Tokenization;
using miew.Debugging;
using miew.Enumerable;
using miew.ReadOnly;
using miew.Concurrency;
using miew.BitArray;
namespace agree
{
using u_stats = ParseControl.Stats._unification;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// The 'count' of IParseChartEdge items encapsulated in the LexicalAnalysis enumerator is *not* necessarily the
/// required size of the parse chart due to overlapping tokens or multiple tokens for a given chart position;
/// use the ChartSize property for this.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
[DebuggerDisplay("{ToString(),nq}")]
public sealed partial class LexicalAnalysis : IEnumerable<IParseObj>
{
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
readonly ParseControl ctrl;
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
readonly TokenSet ts;
readonly ConcurrentList<AnalysisStack> token_analyses = new ConcurrentList<AnalysisStack>();
public TokenSet SourceItem { get { return ts; } }
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public String SourceText { get { return ts.Source.Text; } }
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public int ChartSize { get { return ts.MinimalSpanCount; } }
public int[] del_daughters;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// Performs pre-parsing analysis of the tokens in a tokenization hypothesis, including lexical and irregular
/// stem lookup, application of morphological and lexical rules, filtering, etc. The hypothesis may be either
/// single coverage (i.e. 'TokenizationHypothesis') or ad-hoc where token selection is deferred to the
/// parsing phase (i.e. 'TokenizedString')
///
/// Morpholexical analysis happens in two passes. The surface-to-stem inbound pass ("downtrace") permutes affixation--
/// regular or irregular--fanning downwards until one or more stems are reached. Then, for each discovered downtrace,
/// an outbound pass follows the downtrace in reverse checking the license requirements of the downtrace (aborting
/// if failing), interleaving non-affixing rules where possible, and permuting the skipping of downtrace elements.
/// Thus, at each node of the outbound pass, zero or more stacks fan out upwards.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public LexicalAnalysis(ParseControl ctrl, Lexicon lex, TokenSet ts)
{
this.ctrl = ctrl;
this.ts = ts;
this.del_daughters = ctrl.parser_config.DeletedDaughters;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Find zero or more analysis stacks for each source token and add them to a bag. Note that we are mostly operating
/// from the surface form only, so the token 'tok' that is passed in is not referenced unless we need to check when
/// postulating a multi-word stem. Otherwise the analyzer doesn't care about how the tokens are laid out,
/// overlapping, or gapped in the source, and they are returned as a bag that is, in principle, unordered.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public Task Analyze()
{
Action[] rga = new Action[ts.Count];
for (int ix = 0; ix < ts.Count; ix++)
{
var tok = ts[ix];
rga[ix] = () =>
{
DownTrace dt = new DownTraceSurface(this, tok.Text);
token_analyses.AddRange(dt.TryNonAffixingRulesOutbound(tok));
foreach (var z in dt)
ctrl.stats.Morphology.Unification.Add(z.u_stats);
};
}
return ctrl.WhenAll(rga);
//if (ctrl.tw_debug != null)
// DebugOutput();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void DebugOutput()
{
TextWriter tw = ctrl.config.system.tw_debug;
int i = 0;
foreach (AnalysisStack stk in token_analyses)
{
tw.WriteLineColorSync("$red analysis #{0}", i++);
int j = stk.Count;
foreach (LexicalTransform lx in ((IEnumerable<LexicalTransform>)stk).Reverse())
{
tw.WriteLineColorSync("\t{0,2} $darkyellow {1,-11}$ {2,19}\t{3,-20}",
--j,
"\"" + lx.CurrentForm.StringJoin(" ") + "\"",
lx.GetType().Name,
lx.license.Name);
}
tw.WriteLine();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// DownTrace : inverted tree of morphological transforms from the surface (top) towards the stem (leaves)
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
[DebuggerDisplay("{ToString(),nq}")]
abstract class DownTrace : IEnumerable<DownTrace>
{
internal readonly LexicalAnalysis la;
internal readonly ParseControl ctrl;
protected readonly DownTrace m_prev;
protected String form;
internal u_stats u_stats;
public DownTrace(LexicalAnalysis la, DownTrace prev, String form)
{
this.la = la;
this.ctrl = la.ctrl;
this.m_prev = prev;
this.form = form;
this.u_stats = new u_stats(ParseControl.Stats.Visibility.Private);
}
Lexicon Lexicon { get { return ctrl.lexicon; } }
TypeMgr tm { get { return ctrl.tm; } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// Stacks which permute all licensed orthographic changes are generated on the way down from the surface towards
/// the stem, and recorded in an (inverted) tree of DownTrace objects. Here, when unwinding, non-affixing changes are
/// inserted into these trees, possibly multiplying branches upwards at any point.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public IEnumerable<AnalysisStack> TryNonAffixingRulesOutbound(TokenSpanToken tok)
{
return OutboundMultiplier(TryAffixingRulesInbound(tok));
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// An arbitrary number of non-affixing rules can interleave. As long as we find any, multiply the returned
/// stacks recursively upwards.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
IEnumerable<AnalysisStack> OutboundMultiplier(IEnumerable<AnalysisStack> seq)
{
foreach (AnalysisStack analysis_stack in seq)
{
foreach (var new_stk in OutboundMultiplier(FindNonAffixingRules(analysis_stack)))
yield return new_stk;
/// also return the stack without any non-affixing lexical rules applied
/// todo: is this only valid if it matches the surface form?
yield return analysis_stack;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// Return new stacks corresponding to any valid non-affixing lexical rules to the top of the specified stack.
/// If no further transformation can be generated on this stack, return an empty sequence.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
IEnumerable<AnalysisStack> FindNonAffixingRules(AnalysisStack analysis_stack)
{
LexicalTransform lx = analysis_stack.Top;
Tfs candidate = lx.feature_structure;
LexicalRule lxr = lx.license as LexicalRule;
foreach (LexicalRule lex_rule in lxr != null ?
lxr.CompatibleKeyMothers.OfExactType<LexicalRule>() :
Lexicon.non_morph_lexrules)
{
if (ctrl.QuickCheckMorph(lex_rule.RuleDaughters[0], candidate))
continue;
Tfs full = ctrl.UnifySection(
lex_rule.RuleDaughters[0],
candidate,
la.del_daughters != null,
false,
ref u_stats);
if (full != null)
{
/// we are outbound and need to fan out upwards so make a copy of the stack below, add the
/// new result, and return the "extra" result
AnalysisStack newstk = new AnalysisStack(analysis_stack);
newstk.Add(new LxNonAffixing(newstk.Top.CurrentForm, lex_rule, full));
yield return newstk;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// Return zero or more lexical analysis stacks for the specified surface form. Each lexical analysis stack that is
/// gathered has exactly one stem, and is built from the stem (at index 0) "upwards." The source token that is
/// supplied is advisory only in the sense that its text does not reflect the current state of morphological
/// processing. It is only used for matching the stem against multi-word lexical entries.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
IEnumerable<AnalysisStack> TryAffixingRulesInbound(TokenSpanToken tok)
{
/// 0. mere availability of an irregular form will block all regular affixing rules if
/// irregular-forms-only-p is enabled.
List<Lexicon.Irreg> rg_irreg_info = null;
if (Lexicon.irreg_dict != null && !this.HasIrregular)
{
if (Lexicon.irreg_dict.TryGetValue(form, out rg_irreg_info))
Debug.Assert(rg_irreg_info.Count > 0);
}
/// 1. Irregular affix (or non-affixing transform)
/// At this point we are only allowing a single irregular inflection to be added to the downtrace, but
/// this is now easy to reconfigure.
if (rg_irreg_info != null)
{
foreach (Lexicon.Irreg irreg_info in rg_irreg_info)
{
LexicalRule lex_rule = irreg_info.rule;
DownTraceIrregular dti = new DownTraceIrregular(this, irreg_info.stem, lex_rule);
DownTraceSurface dts = this.Surface;
dts.irregs = dts.irregs ?? new List<DownTraceIrregular>();
dts.irregs.Add(dti);
/// propagate downwards towards stem first, and only unify with the stacks that come back, if any
foreach (AnalysisStack analysis_stack in dti.TryNonAffixingRulesOutbound(tok))
{
/// rule check pre-filter
LexicalRule lr = analysis_stack.Top.license as LexicalRule;
if (lr == null || lr.CompatibleKeyMothers.Contains(lex_rule))
{
if (ctrl.QuickCheckMorph(lex_rule.RuleDaughters[0], analysis_stack.Top.feature_structure))
continue;
/// try the unification
Tfs full = ctrl.UnifySection(
lex_rule.RuleDaughters[0],
analysis_stack.Top.feature_structure,
la.del_daughters != null,
false,
ref u_stats);
if (full != null)
{
analysis_stack.Add(new LxIrregular(form, lex_rule, full));
yield return analysis_stack;
}
}
}
u_stats.Add(dti.u_stats);
}
}
/// 2. Regular, affixing lexical rules
foreach (MorphologicalRule morph_rule in Lexicon.morph_lexrules)
{
foreach (MorphologySubrule subrule in morph_rule.Subrules)
{
String newform = subrule.regex.Replace(form, subrule.replace);
if (newform != form)
{
/// irregular forms only?
if (rg_irreg_info != null && ctrl.parser_config.IrregularFormsOnly)
{
List<DownTraceIrregular> ldti = Surface.irregs;
if (ldti != null && ldti.Any(dti => dti.form == newform && dti.r == morph_rule))
continue;
}
DownTrace dt = new DownTraceRegular(this, newform, morph_rule, subrule);
/// propagate downwards towards stem first, and only unify with the stacks that come back, if any
foreach (AnalysisStack analysis_stack in dt.TryNonAffixingRulesOutbound(tok))
{
/// rule check pre-filter
LexicalRule lr = analysis_stack.Top.license as LexicalRule;
if (lr == null || lr.CompatibleKeyMothers.Contains(morph_rule))
{
if (ctrl.QuickCheckMorph(morph_rule.RuleDaughters[0], analysis_stack.Top.feature_structure))
continue;
Tfs full = ctrl.UnifySection(
morph_rule.RuleDaughters[0],
analysis_stack.Top.feature_structure,
la.del_daughters != null,
false,
ref u_stats);
if (full != null)
{
analysis_stack.Add(new LxRegularAffixing(form, morph_rule, full));
yield return analysis_stack;
}
}
}
u_stats.Add(dt.u_stats);
}
}
}
/// 3. Stem
TokenSpanToken[][] rg_seq = null;
foreach (LexicalEntry le in Lexicon[form])
{
int lex_entry_arity = le.Lemmata.Count;
if (lex_entry_arity > 1)
{
rg_seq = rg_seq ?? tok.RightAdjacencySequences().Select(ie => ie.ToArray()).ToArray();
if (rg_seq.Length > 1)
{
Nop.CodeCoverage();
// probably need to filter the sequences for distinctness here (array-value equality) since there may be dups after truncating
//var yz = rg_seq.Where(rgt => rgt.Length >= lex_entry_arity - 1).Select(ie => ie.Take(lex_entry_arity - 1).ToArray()).ToArray();
//if (yz.Distinct(rgt => rgt.StringJoin("")).Count() != yz.Length)
}
/// ensure that the all parts of a multi-word lexical entry match an available adjacency
/// sequence in the surface input
foreach (var adj_seq in rg_seq)
{
/// first token, which is not included in adj_seq, has already been checked
if (1 + adj_seq.Length >= lex_entry_arity)
{
for (int i = 1; i < lex_entry_arity; i++)
if (!Lexicon.TextComparer.Equals(adj_seq[i - 1].Text, le.Lemmata[i]))
goto no_match;
Span sp = new Span(tok.TokenSpan.StartIndex, adj_seq[lex_entry_arity - 2].TokenSpan.EndIndex);
yield return new AnalysisStack(la, new LxMultiTokenStem(sp, le));
/// we have to count all LexicalEntry expansions--which may include weak reference recoveries--
/// as unifications, or else the regression test results for number of unifications might not
/// exactly match
u_stats.RecordEvent(true);
}
no_match:
;
}
}
else
{
yield return new AnalysisStack(la, new LxSingleTokenStem(tok, le));
u_stats.RecordEvent(true);
}
}
/// 4. Check non-initial lemma in multi-word lexeme (only if there is a spelling change, otherwise handled
/// by step #3. Using HasSpellingChange means non-affixing irregulars are blocked here, so "world series"
/// will not generate a stack for plural 'series' in this step. If the requirement were changed to HasTransform
/// instead, the plural "world series" would be generated but at the expense of creating also another singular
/// one, duplicating a (presumably) singular one from step #3. The duplicates scenario seems like the worse
/// problem.
if (HasSpellingChange)
{
foreach (Lexicon.NonInitialMwe mwe in Lexicon.mwe_lookup[form])
{
LexicalEntry le = mwe.lex_entry;
/// check to the left. There will always be at least one.
foreach (var adj_left in tok.LeftAdjacencySequences().Select(ie => ie.TakeLast(mwe.index).ToArray()).Where(rgt => rgt.Length == mwe.index))
{
for (int j = 0; j < mwe.index; j++)
if (!Lexicon.TextComparer.Equals(adj_left[j].Text, le.Lemmata[j]))
goto no_match;
/// check to the right. There may be zero or more
int rem = le.Lemmata.Count - (mwe.index + 1);
if (rem == 0)
{
Span sp = new Span(adj_left[0].TokenSpan.StartIndex, tok.TokenSpan.EndIndex);
yield return new AnalysisStack(la, new LxMultiTokenStem(sp, le));
u_stats.RecordEvent(true);
}
else
{
foreach (var adj_right in tok.RightAdjacencySequences().Select(ie => ie.Take(rem).ToArray()).Where(rgt => rgt.Length == rem))
{
for (int j = 0; j < rem; j++)
if (!Lexicon.TextComparer.Equals(adj_right[j].Text, le.Lemmata[mwe.index + 1 + j]))
goto no_match;
Nop.CodeCoverage();
Span sp = new Span(adj_left[0].TokenSpan.StartIndex, adj_right[rem - 1].TokenSpan.EndIndex);
yield return new AnalysisStack(la, new LxMultiTokenStem(sp, le));
u_stats.RecordEvent(true);
}
}
no_match:
;
}
}
}
}
/// <summary>
/// Are there any irregular inflections in this downtrace?
/// </summary>
public bool HasIrregular { get { return this.Any(dt => dt is DownTraceIrregular); } }
/// <summary>
/// Are there any affixation in this downtrace?
/// </summary>
public bool HasTransform { get { return this.Any(dt => dt is DownTraceTransform); } }
/// <summary>
/// Are there any spelling changes in this downtrace?
/// </summary>
public bool HasSpellingChange { get { return this.Any(dt => dt.form != form); } }
public DownTraceSurface Surface
{
get
{
DownTrace dt = this;
while (!(dt is DownTraceSurface))
dt = dt.m_prev;
return (DownTraceSurface)dt;
}
}
/// <summary>
/// Each DownTrace is polymorphic with the (reverse of) a linked list that it is at the end of.
/// This linked list which represents the path so far--in a tree of all such paths--from the surface
/// token in the current analysis descent.
/// </summary>
public IEnumerator<DownTrace> GetEnumerator()
{
DownTrace dt = this;
do
yield return dt;
while ((dt = dt.m_prev) != null);
}
IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); }
#if DEBUG
[DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
protected DownTrace[] _dbg_display { get { return this.ToArray(); } }
#endif
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// DownTrace implementations:
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sealed class DownTraceSurface : DownTrace
{
/// any/all irregular form/rule tuples are recorded in the originating surface, where all derived downtrace
/// branches can see the common set, so that regular affixing doesn't duplicate
public List<DownTraceIrregular> irregs = null;
public DownTraceSurface(LexicalAnalysis la, String form) : base(la, null, form) { }
public override string ToString()
{
return String.Format("{0} (surface)", form);
}
};
abstract class DownTraceTransform : DownTrace
{
public LexicalRule r;
public DownTraceTransform(DownTrace prev, String form, LexicalRule r)
: base(prev.la, prev, form)
{
this.r = r;
}
};
sealed class DownTraceIrregular : DownTraceTransform
{
public DownTraceIrregular(DownTrace prev, String form, LexicalRule r) : base(prev, form, r) { }
public override string ToString()
{
return String.Format("{0} {1} irreg", form, r.Name);
}
};
sealed class DownTraceRegular : DownTraceTransform
{
MorphologySubrule sr;
public DownTraceRegular(DownTrace prev, String form, LexicalRule r, MorphologySubrule sr)
: base(prev, form, r)
{
this.sr = sr;
}
public override string ToString()
{
return String.Format("{0} {1} {2}", form, r.Name, sr);
}
};
public IEnumerator<IParseObj> GetEnumerator()
{
return token_analyses.GetEnumerableUnsafe().GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); }
public override string ToString()
{
return String.Format("tokens: {0} chart size: {1} [{2}]", token_analyses.Count, ChartSize, ts.Source.Text);
}
#if DEBUG
[DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
public AnalysisStack[] _RGTOKA { get { return token_analyses.ToArray(); } }
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// AnalysisStacks are built from the bottom up only, so you must supply the stem here in order to create
/// a stack. There must be exactly one stem.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
[DebuggerDisplay("{ToString(),nq}")]
public sealed class AnalysisStack : List<LexicalTransform>, IParseObj, IDerivation, IList<IDerivation>
{
public AnalysisStack(LexicalAnalysis la)
{
this.la = la;
Interlocked.Increment(ref ctrl.stats.Morphology.c_analysis_stacks);
}
public AnalysisStack(LexicalAnalysis analysis, LxStem mx)
: this(analysis)
{
base.Add(mx);
}
public AnalysisStack(AnalysisStack to_copy)
: this(to_copy.la)
{
base.AddRange(to_copy);
}
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
readonly LexicalAnalysis la;
ParseControl ctrl { get { return la.ctrl; } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// IParseObj.ITokenSpan
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public new void Add(LexicalTransform lx)
{
base.Add(lx);
Interlocked.Increment(ref ctrl.stats.Morphology.c_lexical_transforms);
}
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public LxStem Stem { get { return (LxStem)this[0]; } }
public String Text { get { return la.SourceItem.Source.MinimalSpanText(TokenSpan); } }
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public Span TokenSpan { get { return Stem.ChartSpan; } }
public Span CharacterSpan { get { throw new NotImplementedException(); } }
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public LexicalTransform Top { get { return this[this.Count - 1]; } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// IParseObj.IAtomicSequence
/// To be allowed into the parse chart, you must be able to participate in sequence list atomic stamping
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int i_seq;
public int SequenceId { get { return i_seq; } }
public void SetSequenceId(int id) { i_seq = id; }
int s_state;
public int State { get { return s_state; } }
public void SetStateRaw(int s) { s_state = s; }
public bool BeginTransact() { throw new NotImplementedException(); }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// IParseObj
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public Entry License { get { return Top.license; } }
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public Tfs Tfs { get { return this[this.Count - 1].feature_structure; } }
public IList<IDerivation> Derivations { get { return this; } }
public void AddDerivedParent(PassiveEdge.Derived dpe) { }
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public IEnumerable<PassiveEdge.Derived> DerivedEdges { get { throw new NotImplementedException(); } }
IParseObj m_next;
public IParseObj Next { get { return m_next; } }
public bool TrySetTail(IParseObj po)
{
return Interlocked.CompareExchange(ref m_next, po, null) == null;
}
public bool TrySetNext(IParseObj po_to_set, IParseObj po_expected)
{
return Interlocked.CompareExchange(ref m_next, po_to_set, po_expected) == po_expected;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// IDerivation
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public Tfs UnpackedTfs { get { return this.Tfs; } }
ulong _dh;
public ulong DerivationHash
{
get
{
if (_dh == 0)
{
ulong ul = (ulong)this.Count | 0xA00;
for (int i = 0; i < this.Count; i++)
{
int rot = i + 14;
ul = (ul << rot) | (ul >> (64 - rot));
ul += (ulong)this[i].license.Name.GetHashCode();
}
_dh = ul;
}
return _dh;
}
}
public String TreeDisplay()
{
return String.Format("{0} {{{1}}} {2} {3} {{{4}}}",// {5:X}",
TokenSpan,
Tfs.id.ToString("X"),
Tfs.Type.Name,
this.Reverse<LexicalTransform>().StringJoin(" <- "),
UnpackedTfs.id.ToString("X")
/*, DerivationHash*/) + Environment.NewLine;
}
public IParseObj Source { get { return this; } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public override string ToString()
{
//return String.Format("{0} transforms: {1} stem: {2} fs: {3}", this.ChartSpan, this.Count, this.Stem, this.Self);
return String.Format("{0} {1} {2} <- {3}",
this.TokenSpan,
this.Tfs.ToString(false),
this.Tfs.Type.Name,
this.Reverse<LexicalTransform>().StringJoin(" <- "));
}
#if DEBUG
//[DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
//public LexicalTransform[] _TRANSFORMS
//{
// get { return this.AsEnumerable().Reverse().ToArray(); }
//}
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// 'this' as a singleton IList of IDerivation
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
IDerivation IList<IDerivation>.this[int index]
{
get
{
if (index != 0)
throw new ArgumentException();
return this;
}
set { throw new InvalidOperationException(); }
}
void IList<IDerivation>.Insert(int index, IDerivation item) { throw new InvalidOperationException(); }
void IList<IDerivation>.RemoveAt(int index) { throw new InvalidOperationException(); }
int IList<IDerivation>.IndexOf(IDerivation item) { return item == this ? 0 : -1; }
bool ICollection<IDerivation>.Contains(IDerivation item) { return item == this; }
bool ICollection<IDerivation>.IsReadOnly { get { return true; } }
bool ICollection<IDerivation>.Remove(IDerivation item) { throw new InvalidOperationException(); }
void ICollection<IDerivation>.CopyTo(IDerivation[] array, int arrayIndex) { array[arrayIndex] = this; }
void ICollection<IDerivation>.Add(IDerivation item) { throw new InvalidOperationException(); }
void ICollection<IDerivation>.Clear() { throw new InvalidOperationException(); }
int ICollection<IDerivation>.Count { get { return 1; } }
IEnumerator<IDerivation> IEnumerable<IDerivation>.GetEnumerator() { return new _enum(this); }
IEnumerator IEnumerable.GetEnumerator() { return new _enum(this); }
struct _enum : IEnumerator<IDerivation>
{
IDerivation t;
int ix;
public _enum(IDerivation t)
{
this.t = t;
this.ix = -1;
}
public IDerivation Current { get { return t; } }
object IEnumerator.Current { get { return t; } }
public bool MoveNext() { return ++ix == 0; }
public void Reset() { ix = -1; }
public void Dispose() { }
};
};
};
}