Index: editor/util/manifest.mf =================================================================== RCS file: /cvs/editor/util/manifest.mf,v retrieving revision 1.14 diff -c -r1.14 manifest.mf *** editor/util/manifest.mf 17 Oct 2006 02:28:44 -0000 1.14 --- editor/util/manifest.mf 28 Nov 2006 14:19:00 -0000 *************** *** 1,4 **** Manifest-Version: 1.0 OpenIDE-Module: org.netbeans.modules.editor.util/1 OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/editor/util/Bundle.properties ! OpenIDE-Module-Specification-Version: 1.13 --- 1,4 ---- Manifest-Version: 1.0 OpenIDE-Module: org.netbeans.modules.editor.util/1 OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/editor/util/Bundle.properties ! OpenIDE-Module-Specification-Version: 1.14 Index: editor/util/api/apichanges.xml =================================================================== RCS file: /cvs/editor/util/api/apichanges.xml,v retrieving revision 1.7 diff -c -r1.7 apichanges.xml *** editor/util/api/apichanges.xml 17 Oct 2006 02:28:44 -0000 1.7 --- editor/util/api/apichanges.xml 28 Nov 2006 14:19:00 -0000 *************** *** 82,87 **** --- 82,101 ---- + Added ArrayUtilities.unmodifiableList(). + + + + + +

+ ArrayUtilities.unmodifiableList() return simple unmodifiable list + for the given object array. +

+
+
+ + ListenerList.getListeners() return type changed. Index: editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java =================================================================== RCS file: /cvs/editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java,v retrieving revision 1.2 diff -c -r1.2 ArrayUtilities.java *** editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java 4 Oct 2006 17:02:53 -0000 1.2 --- editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java 28 Nov 2006 14:19:00 -0000 *************** *** 19,24 **** --- 19,28 ---- package org.netbeans.lib.editor.util; + import java.util.AbstractList; + import java.util.List; + import java.util.RandomAccess; + /** * Utility methods related to arrays. * *************** *** 183,188 **** --- 187,204 ---- sb.append("]: "); } + /** + * Return unmodifiable list for the given array. + *
+ * Unlike Collections.unmodifiableList() this method + * does not use any extra wrappers etc. + * + * @since 1.14 + */ + public static List unmodifiableList(E[] array) { + return new UnmodifiableList(array); + } + public static String toString(Object[] array) { StringBuilder sb = new StringBuilder(); int maxDigitCount = digitCount(array.length); *************** *** 203,208 **** --- 219,265 ---- sb.append('\n'); } return sb.toString(); + } + + private static final class UnmodifiableList extends AbstractList + implements RandomAccess { + + private E[] array; + + UnmodifiableList(E[] array) { + this.array = array; + } + + public E get(int index) { + if (index >= 0 && index < array.length) { + return array[index]; + } else { + throw new IndexOutOfBoundsException("index = " + index + ", size = " + array.length); //NOI18N + } + } + + public int size() { + return array.length; + } + + + public Object[] toArray() { + return array.clone(); + } + + public T[] toArray(T[] a) { + if (a.length < array.length) { + @SuppressWarnings("unchecked") + T[] aa = (T[])java.lang.reflect.Array. + newInstance(a.getClass().getComponentType(), array.length); + a = aa; + } + System.arraycopy(array, 0, a, 0, array.length); + if (a.length > array.length) + a[array.length] = null; + return a; + } + } } Index: editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java =================================================================== RCS file: editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java diff -N editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java 28 Nov 2006 14:19:00 -0000 *************** *** 0 **** --- 1,52 ---- + /* + * The contents of this file are subject to the terms of the Common Development + * and Distribution License (the License). You may not use this file except in + * compliance with the License. + * + * You can obtain a copy of the License at http://www.netbeans.org/cddl.html + * or http://www.netbeans.org/cddl.txt. + * + * When distributing Covered Code, include this CDDL Header Notice in each file + * and include the License file at http://www.netbeans.org/cddl.txt. + * If applicable, add the following below the CDDL Header, with the fields + * enclosed by brackets [] replaced by your own identifying information: + * "Portions Copyrighted [year] [name of copyright owner]" + * + * The Original Software is NetBeans. The Initial Developer of the Original + * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.editor.util; + + import java.util.Arrays; + import java.util.List; + import org.netbeans.junit.NbTestCase; + + public class ArrayUtilitiesTest extends NbTestCase { + + public ArrayUtilitiesTest(String testName) { + super(testName); + } + + public void testUnmodifiableList() throws Exception { + String[] arr = new String[] { "haf", "cau", "test" }; + List l = ArrayUtilities.unmodifiableList(arr); + assertEquals("haf", l.get(0)); + assertEquals("cau", l.get(1)); + assertEquals("test", l.get(2)); + try { + l.add("no"); + fail("Modifiable!"); + } catch (UnsupportedOperationException e) { + // Expected + } + assertEquals(3, l.size()); + Object a[] = l.toArray(); + assertTrue(Arrays.equals(arr, a)); + a = l.toArray(new String[2]); + assertTrue(Arrays.equals(arr, a)); + + } + + } Index: html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java =================================================================== RCS file: /cvs/html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java,v retrieving revision 1.3 diff -c -r1.3 HTMLLexer.java *** html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java 28 Nov 2006 02:21:06 -0000 1.3 --- html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java 28 Nov 2006 14:19:07 -0000 *************** *** 30,36 **** * @version 1.00 */ ! public class HTMLLexer implements Lexer { private static final int EOF = LexerInput.EOF; --- 30,36 ---- * @version 1.00 */ ! public final class HTMLLexer implements Lexer { private static final int EOF = LexerInput.EOF; *************** *** 39,45 **** private TokenFactory tokenFactory; public Object state() { ! return subState * 1000000 + state * 1000 + scriptState; } --- 39,45 ---- private TokenFactory tokenFactory; public Object state() { ! return null; } Index: html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java =================================================================== RCS file: /cvs/html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java,v retrieving revision 1.3 diff -c -r1.3 HTMLLexerBatchTest.java *** html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java 23 Oct 2006 14:50:47 -0000 1.3 --- html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java 28 Nov 2006 14:19:07 -0000 *************** *** 22,27 **** --- 22,28 ---- import junit.framework.TestCase; import org.netbeans.api.html.lexer.HTMLTokenId; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 47,54 **** public void testJspTags() { String text = "abc>def"; ! TokenHierarchy hi = TokenHierarchy.create(text, HTMLTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN_SYMBOL, "<"); LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN, "jsp:useBean"); LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.WS, " "); --- 48,55 ---- public void testJspTags() { String text = "abc>def"; ! TokenHierarchy hi = TokenHierarchy.create(text, HTMLTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN_SYMBOL, "<"); LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN, "jsp:useBean"); LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.WS, " "); Index: java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java =================================================================== RCS file: /cvs/java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java,v retrieving revision 1.5 diff -c -r1.5 JavaTokenId.java *** java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java 26 Oct 2006 20:45:19 -0000 1.5 --- java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java 28 Nov 2006 14:19:49 -0000 *************** *** 238,277 **** return new JavaLexer(info); } ! protected LanguageEmbedding embedding( ! Token token, boolean tokenComplete, ! LanguagePath languagePath, InputAttributes inputAttributes) { // Test language embedding in the block comment switch (token.id()) { case JAVADOC_COMMENT: ! return new LanguageEmbedding() { ! public Language language() { ! return JavadocTokenId.language(); ! } ! ! public int startSkipLength() { ! return 3; ! } ! ! public int endSkipLength() { ! return 2; ! } ! }; case STRING_LITERAL: case STRING_LITERAL_INCOMPLETE: ! return new LanguageEmbedding() { ! public Language language() { ! return JavaStringTokenId.language(); ! } ! ! public int startSkipLength() { ! return 1; ! } ! ! public int endSkipLength() { ! return 1; ! } ! }; } return null; // No embedding } --- 238,252 ---- return new JavaLexer(info); } ! protected LanguageEmbedding embedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { // Test language embedding in the block comment switch (token.id()) { case JAVADOC_COMMENT: ! return LanguageEmbedding.create(JavadocTokenId.language(), 3, 2); case STRING_LITERAL: case STRING_LITERAL_INCOMPLETE: ! return LanguageEmbedding.create(JavaStringTokenId.language(), 1, 1); } return null; // No embedding } Index: java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java =================================================================== RCS file: /cvs/java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java,v retrieving revision 1.1 diff -c -r1.1 JavaFlyTokensTest.java *** java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java 18 Oct 2006 11:35:58 -0000 1.1 --- java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java 28 Nov 2006 14:19:49 -0000 *************** *** 25,30 **** --- 25,31 ---- import java.nio.CharBuffer; import org.netbeans.api.java.lexer.JavaTokenId; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.junit.NbTestCase; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 57,64 **** r.read(cb); cb.rewind(); String text = cb.toString(); ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); System.err.println("Flyweight tokens: " + LexerTestUtilities.flyweightTokenCount(ts) + "\nTotal tokens: " + ts.tokenCount() --- 58,65 ---- r.read(cb); cb.rewind(); String text = cb.toString(); ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); System.err.println("Flyweight tokens: " + LexerTestUtilities.flyweightTokenCount(ts) + "\nTotal tokens: " + ts.tokenCount() Index: java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java =================================================================== RCS file: /cvs/java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java,v retrieving revision 1.1 diff -c -r1.1 JavaLexerBatchTest.java *** java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java 18 Oct 2006 11:35:58 -0000 1.1 --- java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java 28 Nov 2006 14:19:49 -0000 *************** *** 24,29 **** --- 24,30 ---- import org.netbeans.api.java.lexer.JavaTokenId; import org.netbeans.api.java.lexer.JavadocTokenId; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 48,55 **** public void testComments() { String text = "/*ml-comment*//**//***//**\n*javadoc-comment*//* a"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/*ml-comment*/"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/**/"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/***/"); --- 49,56 ---- public void testComments() { String text = "/*ml-comment*//**//***//**\n*javadoc-comment*//* a"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/*ml-comment*/"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/**/"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/***/"); *************** *** 59,66 **** public void testIdentifiers() { String text = "a ab aB2 2a x\nyZ\r\nz"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "a"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ab"); --- 60,67 ---- public void testIdentifiers() { String text = "a ab aB2 2a x\nyZ\r\nz"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "a"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ab"); *************** *** 79,86 **** public void testCharLiterals() { String text = "'' 'a''' '\\'' '\\\\' '\\\\\\'' '\\n' 'a"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "''"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "'a'"); --- 80,87 ---- public void testCharLiterals() { String text = "'' 'a''' '\\'' '\\\\' '\\\\\\'' '\\n' 'a"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "''"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "'a'"); *************** *** 99,106 **** public void testStringLiterals() { String text = "\"\" \"a\"\"\" \"\\\"\" \"\\\\\" \"\\\\\\\"\" \"\\n\" \"a"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"\""); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"a\""); --- 100,107 ---- public void testStringLiterals() { String text = "\"\" \"a\"\"\" \"\\\"\" \"\\\\\" \"\\\\\\\"\" \"\\n\" \"a"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"\""); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"a\""); *************** *** 120,127 **** public void testNumberLiterals() { String text = "0 00 09 1 12 0L 1l 12L 0x1 0xf 0XdE 0Xbcy" + " 09.5 1.5f 2.5d 6d 7e3 6.1E-7f 0xa.5dp+12d .3"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "0"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "00"); --- 121,128 ---- public void testNumberLiterals() { String text = "0 00 09 1 12 0L 1l 12L 0x1 0xf 0XdE 0Xbcy" + " 09.5 1.5f 2.5d 6d 7e3 6.1E-7f 0xa.5dp+12d .3"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "0"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "00"); *************** *** 166,173 **** public void testOperators() { String text = "^ ^= % %= * *= / /= = =="; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARET, "^"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARETEQ, "^="); --- 167,174 ---- public void testOperators() { String text = "^ ^= % %= * *= / /= = =="; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARET, "^"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARETEQ, "^="); *************** *** 197,204 **** "synchronized this throw throws transient try void volatile while " + "null true false"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ABSTRACT, "abstract"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ASSERT, "assert"); --- 198,205 ---- "synchronized this throw throws transient try void volatile while " + "null true false"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ABSTRACT, "abstract"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ASSERT, "assert"); *************** *** 310,317 **** public void testNonKeywords() { String text = "abstracta assertx b br car dou doubl finall im i ifa inti throwsx"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "abstracta"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "assertx"); --- 311,318 ---- public void testNonKeywords() { String text = "abstracta assertx b br car dou doubl finall im i ifa inti throwsx"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "abstracta"); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " "); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "assertx"); *************** *** 342,349 **** public void testEmbedding() { String text = "ddx \"d\\t\\br\" /** @see X */"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ddx"); assertEquals(0, ts.offset()); --- 343,350 ---- public void testEmbedding() { String text = "ddx \"d\\t\\br\" /** @see X */"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ddx"); assertEquals(0, ts.offset()); *************** *** 352,358 **** LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"d\\t\\br\""); assertEquals(4, ts.offset()); ! TokenSequence es = ts.embedded(); LexerTestUtilities.assertNextTokenEquals(es, JavaStringTokenId.TEXT, "d"); assertEquals(5, es.offset()); --- 353,359 ---- LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"d\\t\\br\""); assertEquals(4, ts.offset()); ! TokenSequence es = ts.embedded(); LexerTestUtilities.assertNextTokenEquals(es, JavaStringTokenId.TEXT, "d"); assertEquals(5, es.offset()); *************** *** 370,376 **** LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/** @see X */"); assertEquals(13, ts.offset()); ! TokenSequence ds = ts.embedded(); LexerTestUtilities.assertNextTokenEquals(ds, JavadocTokenId.OTHER_TEXT, " "); assertEquals(16, ds.offset()); --- 371,377 ---- LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/** @see X */"); assertEquals(13, ts.offset()); ! TokenSequence ds = ts.embedded(); LexerTestUtilities.assertNextTokenEquals(ds, JavadocTokenId.OTHER_TEXT, " "); assertEquals(16, ds.offset()); Index: java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java =================================================================== RCS file: /cvs/java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java,v retrieving revision 1.4 diff -c -r1.4 JavaStringLexerTest.java *** java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java 7 Nov 2006 16:31:43 -0000 1.4 --- java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java 28 Nov 2006 14:19:49 -0000 *************** *** 20,25 **** --- 20,26 ---- import org.netbeans.api.java.lexer.JavaStringTokenId; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.junit.NbTestCase; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 40,55 **** public void testNextToken1() { String text = "t"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaStringTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, "t"); } public void testNextToken2() { String text = "\\t\\b\\b\\t \\tabc\\rsddfdsffffffffff"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaStringTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t"); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b"); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b"); --- 41,56 ---- public void testNextToken1() { String text = "t"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaStringTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, "t"); } public void testNextToken2() { String text = "\\t\\b\\b\\t \\tabc\\rsddfdsffffffffff"; ! TokenHierarchy hi = TokenHierarchy.create(text, JavaStringTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t"); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b"); LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b"); Index: languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java =================================================================== RCS file: /cvs/languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java,v retrieving revision 1.1 diff -c -r1.1 SLanguageProvider.java *** languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java 7 Nov 2006 12:17:28 -0000 1.1 --- languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java 28 Nov 2006 14:20:05 -0000 *************** *** 35,41 **** return null; } ! public LanguageEmbedding findLanguageEmbedding (LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes) { return null; } } --- 35,41 ---- return null; } ! public LanguageEmbedding findLanguageEmbedding (Token token, LanguagePath tokenLanguage, InputAttributes inputAttributes) { return null; } } Index: lexer/arch.xml =================================================================== RCS file: /cvs/lexer/arch.xml,v retrieving revision 1.5 diff -c -r1.5 arch.xml *** lexer/arch.xml 8 Nov 2006 18:49:18 -0000 1.5 --- lexer/arch.xml 28 Nov 2006 14:20:12 -0000 *************** *** 4,10 **** ]> --- 4,10 ---- ]> *************** *** 1582,1587 **** --- 1582,1613 ----
There are no plans to deprecated any part of the present API and it should be evolved in a compatible way. +

+ + + + + + + +

+ No.

Index: lexer/manifest.mf =================================================================== RCS file: /cvs/lexer/manifest.mf,v retrieving revision 1.11 diff -c -r1.11 manifest.mf *** lexer/manifest.mf 17 Oct 2006 22:36:33 -0000 1.11 --- lexer/manifest.mf 28 Nov 2006 14:20:12 -0000 *************** *** 1,4 **** OpenIDE-Module: org.netbeans.modules.lexer/2 OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/lexer/Bundle.properties ! OpenIDE-Module-Specification-Version: 1.10.0 OpenIDE-Module-Recommends: org.netbeans.spi.lexer.LanguageProvider --- 1,4 ---- OpenIDE-Module: org.netbeans.modules.lexer/2 OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/lexer/Bundle.properties ! OpenIDE-Module-Specification-Version: 1.11.0 OpenIDE-Module-Recommends: org.netbeans.spi.lexer.LanguageProvider Index: lexer/api/apichanges.xml =================================================================== RCS file: /cvs/lexer/api/apichanges.xml,v retrieving revision 1.8 diff -c -r1.8 apichanges.xml *** lexer/api/apichanges.xml 17 Oct 2006 22:36:33 -0000 1.8 --- lexer/api/apichanges.xml 28 Nov 2006 14:20:12 -0000 *************** *** 91,121 **** ! --- 91,143 ---- ! ! ! Adding custom embedding creation TokenSequence.createEmbedding() ! ! ! !

! Extracted TokenHierarchyEvent.Type inner class ! into TokenHierarchyEventType top-level class. !
! Adding TokenSequence.createEmbedding() ! method for creation of a custom embedding. ! TokenHierarchyEventType.EMBEDDING fired ! after embedding creation. !
! Affected offset information (affectedStartOffset() ! and affectedEndOffset()) moved ! from TokenChange to TokenHierarchyEvent !
! There can be now more than one embedded change in a TokenChange. !
! Removed tokenComplete parameter from ! LanguageHierarchy.embedding() because the token incompletness ! will be handled in a different way. !
! Swapped order of token and languagePath ! parameters in LanguageProvider to be in sync with ! LanguageHierarchy.embedding(). !
! LanguageEmbedding is now a final class ! (instead of abstract class) with private constructor ! and static create() method. That allows better control ! over the evolution of the class and it also allows to cache the created embeddings ! to save memory. !
! LanguageEmbedding is now generified with the ! T extends TokenId which is a generification ! of the language which it contains. !
! TokenHierarchy.languagePaths() set contains all language paths ! used in the token hierarchy. TokenHierarchyEventType.LANGUAGE_PATHS ! fired after change of that set.

Index: lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java =================================================================== RCS file: /cvs/lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java,v retrieving revision 1.6 diff -c -r1.6 LexerLayer.java *** lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java 8 Nov 2006 16:44:10 -0000 1.6 --- lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java 28 Nov 2006 14:20:13 -0000 *************** *** 336,343 **** public void tokenHierarchyChanged(TokenHierarchyEvent evt) { javax.swing.plaf.TextUI ui = (javax.swing.plaf.TextUI)component.getUI(); ! int startRepaintOffset = evt.tokenChange().modifiedTokensStartOffset(); ! int endRepaintOffset = Math.max(evt.tokenChange().addedTokensEndOffset(), startRepaintOffset + 1); ui.damageRange(component, startRepaintOffset, endRepaintOffset); } --- 336,343 ---- public void tokenHierarchyChanged(TokenHierarchyEvent evt) { javax.swing.plaf.TextUI ui = (javax.swing.plaf.TextUI)component.getUI(); ! int startRepaintOffset = evt.affectedStartOffset(); ! int endRepaintOffset = Math.max(evt.affectedEndOffset(), startRepaintOffset + 1); ui.damageRange(component, startRepaintOffset, endRepaintOffset); } Index: lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java =================================================================== RCS file: /cvs/lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java,v retrieving revision 1.3 diff -c -r1.3 LanguagesEmbeddingMap.java *** lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java 12 Oct 2006 03:28:45 -0000 1.3 --- lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java 28 Nov 2006 14:20:13 -0000 *************** *** 20,25 **** --- 20,26 ---- package org.netbeans.modules.lexer.nbbridge; import java.util.Map; + import org.netbeans.api.lexer.TokenId; import org.netbeans.spi.lexer.LanguageEmbedding; /** *************** *** 28,41 **** */ public final class LanguagesEmbeddingMap { ! private Map map; /** Creates a new instance of LanguagesEmbeddingMap */ ! public LanguagesEmbeddingMap(Map map) { this.map = map; } ! public synchronized LanguageEmbedding getLanguageEmbeddingForTokenName(String tokenName) { return map.get(tokenName); } } --- 29,42 ---- */ public final class LanguagesEmbeddingMap { ! private Map> map; /** Creates a new instance of LanguagesEmbeddingMap */ ! public LanguagesEmbeddingMap(Map> map) { this.map = map; } ! public synchronized LanguageEmbedding getLanguageEmbeddingForTokenName(String tokenName) { return map.get(tokenName); } } Index: lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java =================================================================== RCS file: /cvs/lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java,v retrieving revision 1.5 diff -c -r1.5 MimeLookupFolderInfo.java *** lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java 7 Nov 2006 17:15:13 -0000 1.5 --- lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java 28 Nov 2006 14:20:13 -0000 *************** *** 60,66 **** } public Object createInstance(List fileObjectList) { ! HashMap map = new HashMap(); for(Object o : fileObjectList) { assert o instanceof FileObject : "fileObjectList should contain FileObjects and not " + o; //NOI18N --- 60,67 ---- } public Object createInstance(List fileObjectList) { ! HashMap> map ! = new HashMap>(); for(Object o : fileObjectList) { assert o instanceof FileObject : "fileObjectList should contain FileObjects and not " + o; //NOI18N *************** *** 75,81 **** if (isMimeTypeValid(mimeType)) { Language language = LanguageManager.getInstance().findLanguage(mimeType); if (language != null) { ! map.put(f.getName(), new EL(language, startSkipLength, endSkipLength)); } else { LOG.warning("Can't find Language for mime type '" + mimeType + "', ignoring."); //NOI18N } --- 76,82 ---- if (isMimeTypeValid(mimeType)) { Language language = LanguageManager.getInstance().findLanguage(mimeType); if (language != null) { ! map.put(f.getName(), LanguageEmbedding.create(language, startSkipLength, endSkipLength)); } else { LOG.warning("Can't find Language for mime type '" + mimeType + "', ignoring."); //NOI18N } *************** *** 136,163 **** } } - private static final class EL extends LanguageEmbedding { - - private Language language; - private int startSkipLength; - private int endSkipLength; - - public EL(Language language, int startSkipLength, int endSkipLength) { - this.language = language; - this.startSkipLength = startSkipLength; - this.endSkipLength = endSkipLength; - } - - public Language language() { - return language; - } - - public int startSkipLength() { - return startSkipLength; - } - - public int endSkipLength() { - return endSkipLength; - } - } // End of EL class } --- 137,140 ---- Index: lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java =================================================================== RCS file: /cvs/lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java,v retrieving revision 1.5 diff -c -r1.5 MimeLookupLanguageProvider.java *** lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java 26 Oct 2006 20:45:21 -0000 1.5 --- lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java 28 Nov 2006 14:20:13 -0000 *************** *** 45,52 **** return (Language)lookup.lookup(Language.class); } ! public LanguageEmbedding findLanguageEmbedding(LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes) { ! Lookup lookup = MimeLookup.getLookup(MimePath.parse(tokenLanguage.mimePath())); LanguagesEmbeddingMap map = lookup.lookup(LanguagesEmbeddingMap.class); return map == null ? null : map.getLanguageEmbeddingForTokenName(token.id().name()); } --- 45,53 ---- return (Language)lookup.lookup(Language.class); } ! public LanguageEmbedding findLanguageEmbedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { ! Lookup lookup = MimeLookup.getLookup(MimePath.parse(languagePath.mimePath())); LanguagesEmbeddingMap map = lookup.lookup(LanguagesEmbeddingMap.class); return map == null ? null : map.getLanguageEmbeddingForTokenName(token.id().name()); } Index: lexer/nbproject/project.xml =================================================================== RCS file: /cvs/lexer/nbproject/project.xml,v retrieving revision 1.10 diff -c -r1.10 project.xml *** lexer/nbproject/project.xml 7 Nov 2006 23:43:47 -0000 1.10 --- lexer/nbproject/project.xml 28 Nov 2006 14:20:14 -0000 *************** *** 29,35 **** 1 ! 1.12 --- 29,35 ---- 1 ! 1.14 Index: lexer/src/org/netbeans/api/lexer/Language.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/Language.java,v retrieving revision 1.5 diff -c -r1.5 Language.java *** lexer/src/org/netbeans/api/lexer/Language.java 26 Oct 2006 20:45:22 -0000 1.5 --- lexer/src/org/netbeans/api/lexer/Language.java 28 Nov 2006 14:20:14 -0000 *************** *** 32,37 **** --- 32,39 ---- import org.netbeans.lib.lexer.LexerSpiPackageAccessor; import org.netbeans.lib.lexer.TokenIdSet; import org.netbeans.lib.lexer.TokenHierarchyOperation; + import org.netbeans.lib.lexer.inc.TokenChangeInfo; + import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo; import org.netbeans.lib.lexer.inc.TokenListChange; import org.netbeans.spi.lexer.LanguageHierarchy; *************** *** 404,410 **** // List.get(0) is a Map[category, list-of-[category]]. // List.get(1) is a Map[category1, Map[category2, list-of-[category1;category2]]]. // etc. ! List> catMapsList = new ArrayList>(4); // All categories for a single token id List idCats = new ArrayList(4); for (T id : ids) { --- 406,412 ---- // List.get(0) is a Map[category, list-of-[category]]. // List.get(1) is a Map[category1, Map[category2, list-of-[category1;category2]]]. // etc. ! List> catMapsList = new ArrayList>(4); // All categories for a single token id List idCats = new ArrayList(4); for (T id : ids) { *************** *** 431,439 **** * * @param catMapsList non-null list of cached maps. *
! * List.get(0) is a Map[category, list-of-[category]]. *
! * List.get(1) is a Map[category1, Map[category2, list-of-[category1;category2]]]. *
* etc. * --- 433,441 ---- * * @param catMapsList non-null list of cached maps. *
! * List.get(0) is a Map[category, list-containing-[category]]. *
! * List.get(1) is a Map[category1, Map[category2, list-containing-[category1;category2]]]. *
* etc. * *************** *** 443,475 **** * of all categories or 1 for returning non-primary categories. * @return non-null cached list of categories with contents equal to idCats. */ ! @SuppressWarnings("unchecked") ! private static List findCatList(List> catMapsList, List idCats, int startIndex) { int size = idCats.size() - startIndex; if (size <= 0) { return Collections.emptyList(); } while (catMapsList.size() < size) { ! catMapsList.add(new HashMap>()); } // Find the catList as the last item in the cascaded search through the maps ! Map m = catMapsList.get(--size); ! for (int i = startIndex; i < size; i++) { ! Map catMap = (Map)m.get(idCats.get(i)); if (catMap == null) { ! catMap = new HashMap(); ! ((Map)m).put(idCats.get(i), catMap); } m = catMap; } List catList = (List)m.get(idCats.get(size)); if (catList == null) { catList = new ArrayList(idCats.size() - startIndex); catList.addAll((startIndex > 0) ? idCats.subList(startIndex, idCats.size()) : idCats); ! ((Map>)m).put(idCats.get(size), catList); } return catList; } --- 445,479 ---- * of all categories or 1 for returning non-primary categories. * @return non-null cached list of categories with contents equal to idCats. */ ! private static List findCatList(List> catMapsList, List idCats, int startIndex) { int size = idCats.size() - startIndex; if (size <= 0) { return Collections.emptyList(); } while (catMapsList.size() < size) { ! catMapsList.add(new HashMap()); } // Find the catList as the last item in the cascaded search through the maps ! Map m = catMapsList.get(--size); ! for (int i = startIndex; i < size; i++) { ! @SuppressWarnings("unchecked") ! Map catMap = (Map)m.get(idCats.get(i)); if (catMap == null) { ! catMap = new HashMap(); ! // Map> ! m.put(idCats.get(i), catMap); } m = catMap; } + @SuppressWarnings("unchecked") List catList = (List)m.get(idCats.get(size)); if (catList == null) { catList = new ArrayList(idCats.size() - startIndex); catList.addAll((startIndex > 0) ? idCats.subList(startIndex, idCats.size()) : idCats); ! m.put(idCats.get(size), catList); } return catList; } *************** *** 507,513 **** } public String toString() { ! return "LH: " + languageHierarchy; } private void checkMemberId(T id) { --- 511,517 ---- } public String toString() { ! return mimeType + ", LH: " + languageHierarchy; } private void checkMemberId(T id) { *************** *** 549,571 **** return new Language(languageHierarchy); } ! public LanguageHierarchy languageHierarchy( ! Language language) { return language.languageHierarchy(); } public TokenHierarchy createTokenHierarchy( ! TokenHierarchyOperation tokenHierarchyOperation) { return new TokenHierarchy(tokenHierarchyOperation); } public TokenHierarchyEvent createTokenChangeEvent( ! TokenHierarchy tokenHierarchy, TokenListChange change) { ! return new TokenHierarchyEvent(change); } ! public TokenHierarchyOperation tokenHierarchyOperation( ! TokenHierarchy tokenHierarchy) { return tokenHierarchy.operation(); } --- 553,585 ---- return new Language(languageHierarchy); } ! public LanguageHierarchy languageHierarchy( ! Language language) { return language.languageHierarchy(); } public TokenHierarchy createTokenHierarchy( ! TokenHierarchyOperation tokenHierarchyOperation) { return new TokenHierarchy(tokenHierarchyOperation); } public TokenHierarchyEvent createTokenChangeEvent( ! TokenHierarchyEventInfo info) { ! return new TokenHierarchyEvent(info); } ! public TokenChange createTokenChange( ! TokenChangeInfo info) { ! return new TokenChange(info); ! } ! ! public TokenChangeInfo tokenChangeInfo( ! TokenChange tokenChange) { ! return tokenChange.info(); ! } ! ! public TokenHierarchyOperation tokenHierarchyOperation( ! TokenHierarchy tokenHierarchy) { return tokenHierarchy.operation(); } Index: lexer/src/org/netbeans/api/lexer/LanguagePath.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/LanguagePath.java,v retrieving revision 1.5 diff -c -r1.5 LanguagePath.java *** lexer/src/org/netbeans/api/lexer/LanguagePath.java 27 Oct 2006 12:59:07 -0000 1.5 --- lexer/src/org/netbeans/api/lexer/LanguagePath.java 28 Nov 2006 14:20:14 -0000 *************** *** 324,330 **** } } - @SuppressWarnings("unchecked") private Language[] allocateLanguageArray(int length) { return (Language[])(new Language[length]); } --- 324,329 ---- Index: lexer/src/org/netbeans/api/lexer/Token.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/Token.java,v retrieving revision 1.4 diff -c -r1.4 Token.java *** lexer/src/org/netbeans/api/lexer/Token.java 4 Oct 2006 17:03:12 -0000 1.4 --- lexer/src/org/netbeans/api/lexer/Token.java 28 Nov 2006 14:20:14 -0000 *************** *** 170,176 **** * @return >=0 offset of the token in the input or -1 * if this token is flyweight. */ ! public abstract int offset(TokenHierarchy tokenHierarchy); /** * Checks whether this token instance is used for multiple occurrences --- 170,176 ---- * @return >=0 offset of the token in the input or -1 * if this token is flyweight. */ ! public abstract int offset(TokenHierarchy tokenHierarchy); /** * Checks whether this token instance is used for multiple occurrences Index: lexer/src/org/netbeans/api/lexer/TokenChange.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenChange.java,v retrieving revision 1.4 diff -c -r1.4 TokenChange.java *** lexer/src/org/netbeans/api/lexer/TokenChange.java 26 Oct 2006 20:45:22 -0000 1.4 --- lexer/src/org/netbeans/api/lexer/TokenChange.java 28 Nov 2006 14:20:14 -0000 *************** *** 19,26 **** package org.netbeans.api.lexer; import org.netbeans.lib.lexer.TokenList; ! import org.netbeans.lib.lexer.inc.TokenListChange; /** * Token change describes modification on one level of a token hierarchy. --- 19,27 ---- package org.netbeans.api.lexer; + import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenList; ! import org.netbeans.lib.lexer.inc.TokenChangeInfo; /** * Token change describes modification on one level of a token hierarchy. *************** *** 28,34 **** * If there is only one token that was modified * and there was a language embedding in that token then * most of the embedded tokens can usually be retained. ! * This defines an embedded change accessible by {@link #embedded()}. *
* There may possibly be multiple levels of the embedded changes. * --- 29,35 ---- * If there is only one token that was modified * and there was a language embedding in that token then * most of the embedded tokens can usually be retained. ! * This defines an embedded change accessible by {@link #embeddedChange(int)}. *
* There may possibly be multiple levels of the embedded changes. * *************** *** 38,79 **** public final class TokenChange { ! private final TokenListChange tokenListChange; ! TokenChange(TokenListChange tokenListChange) { ! this.tokenListChange = tokenListChange; } /** ! * Get embedded token change. ! *
! * If there is only one token that was modified ! * and there was a language embedding in that token then ! * there is possibility that the new token will be similar ! * to the old one and the embedded tokens can be retained ! * and just updated by another token change. ! *
! * In such case there will be an embedded token change. * ! * @return valid embedded token change or null if there ! * is no embedded token change. */ ! public TokenChange embedded() { ! return null; // TODO } /** ! * Get embedded token change of the given type ! * only if it's of the given language. * * @return non-null token change or null if the embedded token change * satisfies the condition (embedded().language() == language). * Null is returned otherwise. */ ! public TokenChange embedded(Language language) { ! @SuppressWarnings("unchecked") ! TokenChange e = (TokenChange)embedded(); ! return (e != null && e.language() == language) ? e : null; } /** --- 39,87 ---- public final class TokenChange { ! private final TokenChangeInfo info; ! TokenChange(TokenChangeInfo info) { ! this.info = info; } /** ! * Get number of embedded changes contained in this change. * ! * @return >=0 number of embedded changes. */ ! public int embeddedChangeCount() { ! return info.embeddedChanges().length; ! } ! ! /** ! * Get embedded change at the given index. ! * ! * @param index 0 <= index <= embeddedChangeCount() index of the embedded change. ! * @return non-null embedded token change. ! */ ! public TokenChange embeddedChange(int index) { ! return info.embeddedChanges()[index]; } /** ! * Get embedded token change of the given type. * * @return non-null token change or null if the embedded token change * satisfies the condition (embedded().language() == language). * Null is returned otherwise. */ ! public TokenChange embeddedChange(Language language) { ! TokenChange[] ecs = info.embeddedChanges(); ! for (int i = ecs.length - 1; i >= 0; i--) { ! TokenChange c = ecs[i]; ! if (c.language() == language) { ! @SuppressWarnings("unchecked") ! TokenChange ec = (TokenChange)c; ! return ec; ! } ! } ! return null; } /** *************** *** 81,91 **** * used by tokens contained in this token change. */ public Language language() { ! // No need to check as the token sequence should already ! // be obtained originally for the inner language ! @SuppressWarnings("unchecked") Language l ! = (Language)languagePath().innerLanguage(); ! return l; } /** --- 89,95 ---- * used by tokens contained in this token change. */ public Language language() { ! return LexerUtilsConstants.mostEmbeddedLanguage(languagePath()); } /** *************** *** 93,172 **** * in this token sequence (containing outer language levels as well). */ public LanguagePath languagePath() { ! return tokenListChange.languagePath(); } /** - * Get start offset of the modification - * that caused this token change. - *
- * For token hierarchy rebuilds this is the start offset - * of the area being rebuilt. - */ - public int offset() { - return tokenListChange.offset(); - } - - /** - * Get number of characters inserted by the text modification - * that caused this token change. - *
- * For token hierarchy rebuilds this is the length - * of the area being rebuilt. - */ - public int insertedLength() { - return tokenListChange.insertedLength(); - } - - /** - * Get number of characters removed by the text modification - * that caused this token change. - *
- * For token hierarchy rebuilds this is the length - * of the area being rebuilt. - */ - public int removedLength() { - return tokenListChange.removedLength(); - } - - /** * Get index of the first token being modified. */ ! public int tokenIndex() { ! return tokenListChange.tokenIndex(); ! } ! ! /** ! * Get number of tokens removed. ! */ ! public int removedTokenCount() { ! return tokenListChange.removedTokenList().tokenCount(); } /** * Get offset of the first token that was modified. *
! * The returned value is always equal or below the {@link #offset()} value. ! *
! * If there were any removed tokens then this is a start offset ! * of the first removed token. ! *
! * If there were only added tokens (no removed tokens) ! * then this is the start offset of the first added token. */ ! public int modifiedTokensStartOffset() { ! return tokenListChange.modifiedTokensStartOffset(); } /** ! * Get end offset of the last token that was removed ! * (in the original offset space before the removal was done). ! *
! * If there were no removed tokens then the result of this method ! * is equal to {@link #modifiedTokensStartOffset()}. */ ! public int removedTokensEndOffset() { ! return tokenListChange.removedTokensEndOffset(); } /** --- 97,128 ---- * in this token sequence (containing outer language levels as well). */ public LanguagePath languagePath() { ! return info.currentTokenList().languagePath(); } /** * Get index of the first token being modified. */ ! public int index() { ! return info.index(); } /** * Get offset of the first token that was modified. *
! * If there were any added/removed tokens then this is a start offset ! * of the first added/removed token. */ ! public int offset() { ! return info.offset(); } /** ! * Get number of removed tokens contained in this token change. */ ! public int removedTokenCount() { ! TokenList rtl = info.removedTokenList(); ! return (rtl != null) ? rtl.tokenCount() : 0; } /** *************** *** 182,208 **** * or null if there were no removed tokens. */ public TokenSequence removedTokenSequence() { ! return new TokenSequence(tokenListChange.removedTokenList()); } /** * Get number of the tokens added by this token change. */ public int addedTokenCount() { ! return tokenListChange.addedTokenCount(); } /** - * Get end offset of the last token that was added. - *
- * If there were no added tokens then the result of this method - * is equal to {@link #modifiedTokensStartOffset()}. - */ - public int addedTokensEndOffset() { - return tokenListChange.addedTokensEndOffset(); - } - - /** * Get the token sequence that corresponds to the current state * of the token hierarchy. *
--- 138,154 ---- * or null if there were no removed tokens. */ public TokenSequence removedTokenSequence() { ! return new TokenSequence(info.removedTokenList()); } /** * Get number of the tokens added by this token change. */ public int addedTokenCount() { ! return info.addedTokenCount(); } /** * Get the token sequence that corresponds to the current state * of the token hierarchy. *
*************** *** 210,227 **** * the token sequence at the corresponding embedded level. */ public TokenSequence currentTokenSequence() { ! return new TokenSequence(tokenListChange.currentTokenList()); } ! /** ! * Get token hierarchy where this change occurred. */ ! public TokenHierarchy tokenHierarchy() { ! return tokenListChange.tokenHierarchyOperation().tokenHierarchy(); ! } ! ! TokenListChange tokenListChange() { ! return tokenListChange; } } --- 156,169 ---- * the token sequence at the corresponding embedded level. */ public TokenSequence currentTokenSequence() { ! return new TokenSequence(info.currentTokenList()); } ! /** ! * Used by package-private accessor. */ ! TokenChangeInfo info() { ! return info; } } Index: lexer/src/org/netbeans/api/lexer/TokenHierarchy.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenHierarchy.java,v retrieving revision 1.3 diff -c -r1.3 TokenHierarchy.java *** lexer/src/org/netbeans/api/lexer/TokenHierarchy.java 26 Oct 2006 20:45:23 -0000 1.3 --- lexer/src/org/netbeans/api/lexer/TokenHierarchy.java 28 Nov 2006 14:20:15 -0000 *************** *** 109,115 **** CharSequence inputText, boolean copyInputText, Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! return new TokenHierarchyOperation(inputText, copyInputText, language, skipTokenIds, inputAttributes).tokenHierarchy(); } --- 109,115 ---- CharSequence inputText, boolean copyInputText, Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! return new TokenHierarchyOperation(inputText, copyInputText, language, skipTokenIds, inputAttributes).tokenHierarchy(); } *************** *** 139,152 **** Reader inputReader, Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! return new TokenHierarchyOperation(inputReader, language, skipTokenIds, inputAttributes).tokenHierarchy(); } ! private TokenHierarchyOperation operation; ! TokenHierarchy(TokenHierarchyOperation operation) { this.operation = operation; } --- 139,152 ---- Reader inputReader, Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! return new TokenHierarchyOperation(inputReader, language, skipTokenIds, inputAttributes).tokenHierarchy(); } ! private TokenHierarchyOperation operation; ! TokenHierarchy(TokenHierarchyOperation operation) { this.operation = operation; } *************** *** 159,166 **** * @return non-null token sequence of the top level of the token hierarchy. */ public TokenSequence tokenSequence() { ! @SuppressWarnings("unchecked") TokenSequence ts ! = new TokenSequence(operation.checkedTokenList()); return ts; } --- 159,167 ---- * @return non-null token sequence of the top level of the token hierarchy. */ public TokenSequence tokenSequence() { ! @SuppressWarnings("unchecked") ! TokenSequence ts = new TokenSequence( ! (TokenList)operation.checkedTokenList()); return ts; } *************** *** 174,198 **** * */ public TokenSequence tokenSequence(Language language) { ! TokenList tokenList = operation.checkedTokenList(); ! @SuppressWarnings("unchecked") TokenSequence ts = (tokenList.languagePath().topLanguage() == language) ! ? new TokenSequence(tokenList) : null; return ts; } /** ! * Whether this provider supports token changes (upon change of the underlying ! * text input) or not. *
! * If changes are not supported then it has no sense ! * to attach token change listeners (though it's allowed) ! * as they would never be fired. ! *
! * Token hierarchy snapshots do not fire token change events. * ! * @return true if this provider supports token changes or false otherwise. */ public boolean isMutable() { return operation.isMutable(); --- 175,206 ---- * */ public TokenSequence tokenSequence(Language language) { ! TokenList tokenList = operation.checkedTokenList(); ! @SuppressWarnings("unchecked") ! TokenSequence ts = (tokenList.languagePath().topLanguage() == language) ! ? new TokenSequence((TokenList)tokenList) : null; return ts; } /** ! * Get a set of language paths used by this token hierarchy. *
! * The set includes "static" paths that are those reachable by traversing ! * token ids of the top language and searching for the default embeddings ! * that could be created by ! * {@link org.netbeans.spi.lexer.LanguageHierarchy#embedding(Token,LanguagePath,InputAttributes)}. ! * ! */ ! public Set languagePaths() { ! return operation.languagePaths(); ! } ! ! /** ! * Whether input text of this token hierarchy is mutable or not. * ! * @return true if the input text is mutable or false otherwise. */ public boolean isMutable() { return operation.isMutable(); *************** *** 213,220 **** * was not created over mutable input source. */ public I mutableInputSource() { ! @SuppressWarnings("unchecked") I input = (I)operation.mutableInputSource(); ! return input; } /** --- 221,227 ---- * was not created over mutable input source. */ public I mutableInputSource() { ! return operation.mutableInputSource(); } /** *************** *** 397,403 **** * Obtaining of token hierarchy operation is only intended to be done * by package accessor. */ ! TokenHierarchyOperation operation() { return operation; } --- 404,410 ---- * Obtaining of token hierarchy operation is only intended to be done * by package accessor. */ ! TokenHierarchyOperation operation() { return operation; } Index: lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java,v retrieving revision 1.4 diff -c -r1.4 TokenHierarchyEvent.java *** lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java 26 Oct 2006 20:45:23 -0000 1.4 --- lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java 28 Nov 2006 14:20:15 -0000 *************** *** 19,25 **** package org.netbeans.api.lexer; ! import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.inc.TokenListChange; /** --- 19,25 ---- package org.netbeans.api.lexer; ! import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo; import org.netbeans.lib.lexer.inc.TokenListChange; /** *************** *** 31,56 **** public final class TokenHierarchyEvent extends java.util.EventObject { ! private final TokenChange tokenChange; ! TokenHierarchyEvent(TokenListChange tokenListChange) { ! super(tokenListChange.tokenHierarchyOperation().tokenHierarchy()); ! this.tokenChange = new TokenChange(tokenListChange); } /** * Get source of this event as a token hierarchy instance. */ ! public TokenHierarchy tokenHierarchy() { ! return (TokenHierarchy)getSource(); } /** * Get the token change that occurred in the tokens * at the top-level of the token hierarchy. */ public TokenChange tokenChange() { ! return tokenChange; } /** --- 31,63 ---- public final class TokenHierarchyEvent extends java.util.EventObject { ! private final TokenHierarchyEventInfo info; ! TokenHierarchyEvent(TokenHierarchyEventInfo info) { ! super(info.tokenHierarchyOperation().tokenHierarchy()); ! this.info = info; } /** * Get source of this event as a token hierarchy instance. */ ! public TokenHierarchy tokenHierarchy() { ! return (TokenHierarchy)getSource(); } /** + * Get reason why a token hierarchy event was fired. + */ + public TokenHierarchyEventType type() { + return info.type(); + } + + /** * Get the token change that occurred in the tokens * at the top-level of the token hierarchy. */ public TokenChange tokenChange() { ! return info.tokenChange(); } /** *************** *** 63,139 **** * Returns null otherwise. */ public TokenChange tokenChange(Language language) { @SuppressWarnings("unchecked") ! TokenChange tc = (TokenChange)tokenChange(); ! return (tc != null && tc.language() == language) ? tc : null; } ! /** ! * Get reason why a token hierarchy event was fired. */ ! public Type type() { ! return tokenChange.tokenListChange().type(); } /** ! * Token hierarchy event type determines the reason ! * why token hierarchy modification happened. */ ! public enum Type { ! ! /** ! * The token change was caused by modification (insert/remove) of the characters ! * in the underlying character sequence. ! */ ! TEXT_MODIFY, ! ! /** ! * The token change was caused by a partial rebuilding ! * of the token hierarchy. ! *
! * The partial rebuilding may be caused by changes in input attributes. ! *
! * This change is notified under modification lock (write lock) ! * of the corresponding input source. ! */ ! PARTIAL_REBUILD, ! ! /** ! * The token change was caused by a complete rebuild ! * of the token hierarchy. ! *
! * That may be necessary because of changes ! * in input attributes that influence the lexing. ! *
! * When the whole hierarchy is rebuilt only the removed tokens ! * will be notified. There will be no added tokens ! * because they will be created lazily when asked. ! *
! * This change is notified under modification lock (write lock) ! * of the corresponding input source. ! */ ! FULL_REBUILD, ! ! /** ! * The token change was caused by change in activity ! * of the token hierarchy. ! *
! * The current activity state can be determined by {@link TokenHierarchy#isActive()}. ! *
! * Firing with this token change type may happen because the input source ! * (for which the token hierarchy was created) has not been used for a long time ! * and its token hierarchy is being deactivated. Or the token hierarchy is just going ! * to be activated again. ! *
! * The hierarchy will only notify the tokens being removed (for the case when ! * the hierarchy is going to be deactivated). There will be no added tokens ! * because they will be created lazily when asked. ! *
! * This change is notified under modification lock (write lock) ! * of the corresponding input source. ! */ ! ACTIVATION; ! } } --- 70,135 ---- * Returns null otherwise. */ public TokenChange tokenChange(Language language) { + TokenChange tc = tokenChange(); @SuppressWarnings("unchecked") ! TokenChange tcl = (tc != null && tc.language() == language) ? (TokenChange)tc : null; ! return tcl; } ! /** ! * Get start offset of the area that was affected by the attached ! * token change(s). ! */ ! public int affectedStartOffset() { ! return info.affectedStartOffset(); ! } ! ! /** ! * Get end offset of the area that was affected by the attached ! * token change(s). ! *
! * If there was a text modification the offsets are related ! * to the state after the modification. */ ! public int affectedEndOffset() { ! return info.affectedEndOffset(); } /** ! * Get offset in the input source where the modification occurred. ! * ! * @return modification offset or -1 ! * if this event's type is not {@link TokenHierarchyEventType#MODIFICATION}. ! */ ! public int modificationOffset() { ! return info.modificationOffset(); ! } ! ! /** ! * Get number of characters inserted by the text modification ! * that caused this token change. ! * ! * @return number of inserted characters by the modification. ! *
! * Returns 0 ! * if this event's type is not {@link TokenHierarchyEventType#MODIFICATION}. */ ! public int insertedLength() { ! return info.insertedLength(); } + + /** + * Get number of characters removed by the text modification + * that caused this token change. + * + * @return number of inserted characters by the modification. + *
+ * Returns 0 + * if this event's type is not {@link TokenHierarchyEventType#MODIFICATION}. + */ + public int removedLength() { + return info.removedLength(); + } + } Index: lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java =================================================================== RCS file: lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java diff -N lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java 28 Nov 2006 14:20:15 -0000 *************** *** 0 **** --- 1,101 ---- + /* + * The contents of this file are subject to the terms of the Common Development + * and Distribution License (the License). You may not use this file except in + * compliance with the License. + * + * You can obtain a copy of the License at http://www.netbeans.org/cddl.html + * or http://www.netbeans.org/cddl.txt. + * + * When distributing Covered Code, include this CDDL Header Notice in each file + * and include the License file at http://www.netbeans.org/cddl.txt. + * If applicable, add the following below the CDDL Header, with the fields + * enclosed by brackets [] replaced by your own identifying information: + * "Portions Copyrighted [year] [name of copyright owner]" + * + * The Original Software is NetBeans. The Initial Developer of the Original + * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.api.lexer; + + /** + * Token hierarchy event type determines the reason + * why token hierarchy modification described by {@link TokenHierarchyEvent} + * happened. + * + * @author Miloslav Metelka + * @version 1.00 + */ + + public enum TokenHierarchyEventType { + + /** + * The token change was caused by modification (insert/remove) of the characters + * in the underlying character sequence. + */ + MODIFICATION, + + /** + * The token change was caused by relexing of a part of the token hierarchy + * without any text modification. + *
+ * This change is notified under modification lock (write lock) + * of the corresponding input source. + */ + RELEX, + + /** + * The token change was caused by a complete rebuild + * of the token hierarchy. + *
+ * That may be necessary because of changes + * in input attributes that influence the lexing. + *
+ * When the whole hierarchy is rebuilt only the removed tokens + * will be notified. There will be no added tokens + * because they will be created lazily when asked. + *
+ * This change is notified under modification lock (write lock) + * of the corresponding input source. + */ + REBUILD, + + /** + * The token change was caused by change in activity + * of the token hierarchy. + *
+ * The current activity state can be determined by {@link TokenHierarchy#isActive()}. + *
+ * Firing an event with this type may happen because the input source + * (for which the token hierarchy was created) has not been used for a long time + * and its token hierarchy is being deactivated. Or the token hierarchy is just going + * to be activated again. + *
+ * The hierarchy will only notify the tokens being removed (for the case when + * the hierarchy is going to be deactivated). There will be no added tokens + * because they will be created lazily when asked. + *
+ * This change is notified under modification lock (write lock) + * of the corresponding input source. + */ + ACTIVITY, + + /** + * Custom language embedding was created by + * {@link TokenSequence#createEmbedding(Language,int,int)}. + *
+ * The {@link TokenHierarchyEvent#tokenChange()} contains the token + * where the embedding was created and the embedded change + * {@link TokenChange#embeddedChange(int)} that describes the added + * embedded language. + */ + EMBEDDING, + + /** + * Notification that result of + * {@link TokenHierarchy#languagePaths()} has changed. + */ + LANGUAGE_PATHS; + + } \ No newline at end of file Index: lexer/src/org/netbeans/api/lexer/TokenSequence.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenSequence.java,v retrieving revision 1.6 diff -c -r1.6 TokenSequence.java *** lexer/src/org/netbeans/api/lexer/TokenSequence.java 26 Oct 2006 20:45:23 -0000 1.6 --- lexer/src/org/netbeans/api/lexer/TokenSequence.java 28 Nov 2006 14:20:15 -0000 *************** *** 20,26 **** package org.netbeans.api.lexer; import java.util.ConcurrentModificationException; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.SubSequenceTokenList; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenList; --- 20,26 ---- package org.netbeans.api.lexer; import java.util.ConcurrentModificationException; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.SubSequenceTokenList; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenList; *************** *** 61,67 **** public final class TokenSequence { ! private TokenList tokenList; // 8 + 4 = 12 bytes private AbstractToken token; // 16 bytes --- 61,67 ---- public final class TokenSequence { ! private TokenList tokenList; // 8 + 4 = 12 bytes private AbstractToken token; // 16 bytes *************** *** 78,88 **** * changes (by modification) this token sequence will become invalid. */ private final int modCount; // 28 bytes /** * Package-private constructor used by API accessor. */ ! TokenSequence(TokenList tokenList) { this.tokenList = tokenList; this.modCount = tokenList.modCount(); } --- 78,97 ---- * changes (by modification) this token sequence will become invalid. */ private final int modCount; // 28 bytes + + /** + * Parent token indexes allow to effectively determine parent tokens + * in the tree token hierarchy. + *
+ * The first index corresponds to the top language in the hierarchy + * and the ones that follow point to subsequent embedded levels. + */ + private int[] parentTokenIndexes; // 32 bytes /** * Package-private constructor used by API accessor. */ ! TokenSequence(TokenList tokenList) { this.tokenList = tokenList; this.modCount = tokenList.modCount(); } *************** *** 92,102 **** * used by tokens in this token sequence. */ public Language language() { ! // No need to check as the token sequence should already ! // be obtained originally for the inner language ! @SuppressWarnings("unchecked") Language l ! = (Language)languagePath().innerLanguage(); ! return l; } /** --- 101,107 ---- * used by tokens in this token sequence. */ public Language language() { ! return LexerUtilsConstants.mostEmbeddedLanguage(languagePath()); } /** *************** *** 157,163 **** public Token offsetToken() { checkToken(); if (token.isFlyweight()) { ! token = tokenList.createNonFlyToken(tokenIndex, token, offset()); } return token; } --- 162,168 ---- public Token offsetToken() { checkToken(); if (token.isFlyweight()) { ! token = tokenList.replaceFlyToken(tokenIndex, token, offset()); } return token; } *************** *** 195,204 **** return tokenIndex; } ! /** ! * Get the embedded token sequence if the token * to which this token sequence is currently positioned * has a language embedding. * * @return embedded sequence or null if no embedding exists for this token. * @throws IllegalStateException if this token sequence was not positioned --- 200,215 ---- return tokenIndex; } ! /** ! * Get embedded token sequence if the token * to which this token sequence is currently positioned * has a language embedding. + *
+ * If there is a custom embedding created by + * {@link #createEmbedding(Language,int,int)} it will be returned + * instead of the default embedding + * (the one created by LanguageHierarchy.embedding() + * or LanguageProvider). * * @return embedded sequence or null if no embedding exists for this token. * @throws IllegalStateException if this token sequence was not positioned *************** *** 206,240 **** */ public TokenSequence embedded() { checkToken(); ! TokenList branchTokenList = BranchTokenList.getOrCreate(tokenList, tokenIndex); ! if (branchTokenList != null) { ! TokenList tl = tokenList; if (tokenList.getClass() == SubSequenceTokenList.class) { ! tl = ((SubSequenceTokenList)tokenList).delegate(); } if (tl.getClass() == FilterSnapshotTokenList.class) { ! branchTokenList = new FilterSnapshotTokenList(branchTokenList, ! ((FilterSnapshotTokenList)tl).tokenOffsetDiff()); } else if (tl.getClass() == SnapshotTokenList.class) { ! branchTokenList = new FilterSnapshotTokenList(branchTokenList, offset() - token().offset(null)); } ! return new TokenSequence(branchTokenList); } else // Embedded token list does not exist return null; } /** ! * Created embedded token sequence of the given type or return null ! * if the embedded token sequence does not exist or it has a different type. */ public TokenSequence embedded(Language embeddedLanguage) { ! @SuppressWarnings("unchecked") ! TokenSequence ets = (TokenSequence)embedded(); ! return (ets != null && ets.language() == embeddedLanguage) ? ets : null; } /** --- 217,302 ---- */ public TokenSequence embedded() { checkToken(); ! return embeddedImpl(null); ! } ! ! private TokenSequence embeddedImpl(Language embeddedLanguage) { ! TokenList embeddedTokenList ! = EmbeddingContainer.getEmbedding(tokenList, tokenIndex, embeddedLanguage); ! if (embeddedTokenList != null) { ! TokenList tl = tokenList; if (tokenList.getClass() == SubSequenceTokenList.class) { ! tl = ((SubSequenceTokenList)tokenList).delegate(); } if (tl.getClass() == FilterSnapshotTokenList.class) { ! embeddedTokenList = new FilterSnapshotTokenList(embeddedTokenList, ! ((FilterSnapshotTokenList)tl).tokenOffsetDiff()); } else if (tl.getClass() == SnapshotTokenList.class) { ! embeddedTokenList = new FilterSnapshotTokenList(embeddedTokenList, offset() - token().offset(null)); } ! return new TokenSequence(embeddedTokenList); } else // Embedded token list does not exist return null; } /** ! * Get embedded token sequence if the token ! * to which this token sequence is currently positioned ! * has a language embedding. */ public TokenSequence embedded(Language embeddedLanguage) { ! checkToken(); ! return embeddedImpl(embeddedLanguage); ! } ! ! /** ! * Create language embedding without joining of the embedded sections. ! * ! * @see #createEmbedding(Language, int, int, boolean) ! */ ! public boolean createEmbedding(Language embeddedLanguage, ! int startSkipLength, int endSkipLength) { ! return createEmbedding(embeddedLanguage, startSkipLength, endSkipLength, false); ! } ! ! /** ! * Create language embedding described by the given parameters. ! *
! * If the underying text input is mutable then this method should only be called ! * within a read lock over the text input. ! * ! * @param embeddedLanguage non-null embedded language ! * @param startSkipLength >=0 number of characters in an initial part of the token ! * for which the language embedding is defined that should be excluded ! * from the embedded section. The excluded characters will not be lexed ! * and there will be no tokens created for them. ! * @param endSkipLength >=0 number of characters at the end of the token ! * for which the language embedding is defined that should be excluded ! * from the embedded section. The excluded characters will not be lexed ! * and there will be no tokens created for them. ! * @param joinSections whether sections with this embedding should be joined ! * across the input source or whether they should stay separate. ! *
! * For example for HTML sections embedded in JSP this flag should be true: ! *
!      *   <!-- HTML comment start
!      *       <% System.out.println("Hello"); %>
!             still in HTML comment --<
!      *  
! *
! * Only the embedded sections with the same language path can be joined. ! * @return true if the embedding was created successfully or false if an embedding ! * with the given language already exists for this token. ! */ ! public boolean createEmbedding(Language embeddedLanguage, ! int startSkipLength, int endSkipLength, boolean joinSections) { ! checkToken(); ! return EmbeddingContainer.createEmbedding(tokenList, tokenIndex, ! embeddedLanguage, startSkipLength, endSkipLength, joinSections); } /** *************** *** 253,262 **** public boolean moveNext() { checkModCount(); tokenIndex++; ! Object tokenOrBranch = tokenList.tokenOrBranch(tokenIndex); ! if (tokenOrBranch != null) { AbstractToken origToken = token; ! assignToken(tokenOrBranch); if (tokenOffset != -1) { // If the token list is continuous or the fetched token // is flyweight (there cannot be a gap before flyweight token) --- 315,324 ---- public boolean moveNext() { checkModCount(); tokenIndex++; ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(tokenIndex); ! if (tokenOrEmbeddingContainer != null) { AbstractToken origToken = token; ! assignToken(tokenOrEmbeddingContainer); if (tokenOffset != -1) { // If the token list is continuous or the fetched token // is flyweight (there cannot be a gap before flyweight token) *************** *** 328,337 **** if (index < 0) { return false; } ! Object tokenOrBranch = tokenList.tokenOrBranch(index); ! if (tokenOrBranch != null) { // enough tokens this.tokenIndex = index; ! assignToken(tokenOrBranch); tokenOffset = -1; return true; --- 390,399 ---- if (index < 0) { return false; } ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index); ! if (tokenOrEmbeddingContainer != null) { // enough tokens this.tokenIndex = index; ! assignToken(tokenOrEmbeddingContainer); tokenOffset = -1; return true; *************** *** 409,415 **** // when asked by clients. int tokenCount = tokenList.tokenCountCurrent(); // presently created token count if (tokenCount == 0) { // no tokens yet -> attempt to create at least one ! if (tokenList.tokenOrBranch(0) == null) { // really no tokens at all // In this case the token sequence could not be positioned yet // so no need to reset "index" or other vars return Integer.MAX_VALUE; --- 471,477 ---- // when asked by clients. int tokenCount = tokenList.tokenCountCurrent(); // presently created token count if (tokenCount == 0) { // no tokens yet -> attempt to create at least one ! if (tokenList.tokenOrEmbeddingContainer(0) == null) { // really no tokens at all // In this case the token sequence could not be positioned yet // so no need to reset "index" or other vars return Integer.MAX_VALUE; *************** *** 426,434 **** // there may be gaps between tokens due to token id filter use. int tokenLength = LexerUtilsConstants.token(tokenList, tokenCount - 1).length(); while (offset >= prevTokenOffset + tokenLength) { // above present token ! Object tokenOrBranch = tokenList.tokenOrBranch(tokenCount); ! if (tokenOrBranch != null) { ! AbstractToken t = LexerUtilsConstants.token(tokenOrBranch); if (t.isFlyweight()) { // need to use previous tokenLength prevTokenOffset += tokenLength; } else { // non-flyweight token - retrieve offset --- 488,496 ---- // there may be gaps between tokens due to token id filter use. int tokenLength = LexerUtilsConstants.token(tokenList, tokenCount - 1).length(); while (offset >= prevTokenOffset + tokenLength) { // above present token ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(tokenCount); ! if (tokenOrEmbeddingContainer != null) { ! AbstractToken t = LexerUtilsConstants.token(tokenOrEmbeddingContainer); if (t.isFlyweight()) { // need to use previous tokenLength prevTokenOffset += tokenLength; } else { // non-flyweight token - retrieve offset *************** *** 547,578 **** */ public TokenSequence subSequence(int startOffset, int endOffset) { checkModCount(); // Ensure subsequences on valid token sequences only ! TokenList tl; if (tokenList.getClass() == SubSequenceTokenList.class) { ! SubSequenceTokenList stl = (SubSequenceTokenList)tokenList; tl = stl.delegate(); startOffset = Math.max(startOffset, stl.limitStartOffset()); endOffset = Math.min(endOffset, stl.limitEndOffset()); } else // Regular token list tl = tokenList; ! return new TokenSequence(new SubSequenceTokenList(tl, startOffset, endOffset)); } public String toString() { return LexerUtilsConstants.appendTokenList(null, tokenList, tokenIndex).toString(); } ! @SuppressWarnings("unchecked") ! private void assignToken(Object tokenOrBranch) { ! if (tokenOrBranch.getClass() == BranchTokenList.class) { ! token = (AbstractToken)((BranchTokenList)tokenOrBranch).branchToken(); ! } else { ! token = (AbstractToken)tokenOrBranch; ! } } private void assignToken() { ! assignToken(tokenList.tokenOrBranch(tokenIndex)); } private void checkToken() { --- 609,639 ---- */ public TokenSequence subSequence(int startOffset, int endOffset) { checkModCount(); // Ensure subsequences on valid token sequences only ! TokenList tl; if (tokenList.getClass() == SubSequenceTokenList.class) { ! SubSequenceTokenList stl = (SubSequenceTokenList)tokenList; tl = stl.delegate(); startOffset = Math.max(startOffset, stl.limitStartOffset()); endOffset = Math.min(endOffset, stl.limitEndOffset()); } else // Regular token list tl = tokenList; ! return new TokenSequence(new SubSequenceTokenList(tl, startOffset, endOffset)); } public String toString() { return LexerUtilsConstants.appendTokenList(null, tokenList, tokenIndex).toString(); } + + int[] parentTokenIndexes() { + return parentTokenIndexes; + } ! private void assignToken(Object tokenOrEmbeddingContainer) { ! token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); } private void assignToken() { ! assignToken(tokenList.tokenOrEmbeddingContainer(tokenIndex)); } private void checkToken() { Index: lexer/src/org/netbeans/lib/lexer/BranchTokenList.java =================================================================== RCS file: lexer/src/org/netbeans/lib/lexer/BranchTokenList.java diff -N lexer/src/org/netbeans/lib/lexer/BranchTokenList.java *** lexer/src/org/netbeans/lib/lexer/BranchTokenList.java 26 Oct 2006 20:45:23 -0000 1.5 --- /dev/null 1 Jan 1970 00:00:00 -0000 *************** *** 1,422 **** - /* - * The contents of this file are subject to the terms of the Common Development - * and Distribution License (the License). You may not use this file except in - * compliance with the License. - * - * You can obtain a copy of the License at http://www.netbeans.org/cddl.html - * or http://www.netbeans.org/cddl.txt. - * - * When distributing Covered Code, include this CDDL Header Notice in each file - * and include the License file at http://www.netbeans.org/cddl.txt. - * If applicable, add the following below the CDDL Header, with the fields - * enclosed by brackets [] replaced by your own identifying information: - * "Portions Copyrighted [year] [name of copyright owner]" - * - * The Original Software is NetBeans. The Initial Developer of the Original - * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun - * Microsystems, Inc. All Rights Reserved. - */ - - package org.netbeans.lib.lexer; - - import java.util.List; - import java.util.Set; - import org.netbeans.api.lexer.Language; - import org.netbeans.api.lexer.LanguagePath; - import org.netbeans.lib.editor.util.FlyOffsetGapList; - import org.netbeans.lib.lexer.inc.MutableTokenList; - import org.netbeans.api.lexer.InputAttributes; - import org.netbeans.api.lexer.Token; - import org.netbeans.api.lexer.TokenId; - import org.netbeans.lib.lexer.inc.TokenListChange; - import org.netbeans.spi.lexer.LanguageEmbedding; - import org.netbeans.spi.lexer.LanguageHierarchy; - import org.netbeans.lib.lexer.token.AbstractToken; - import org.netbeans.lib.lexer.token.TextToken; - - - /** - * Branch token list maintains a list of tokens - * on a particular embedded language level . - *
- * The physical storage contains a gap to speed up list modifications - * during typing in a document when tokens are typically added/removed - * at the same index in the list. - * - *

- * There is an intent to not degrade performance significantly - * with each extra language embedding level so the token list maintains direct - * link to the root level. - * - * @author Miloslav Metelka - * @version 1.00 - */ - - public final class BranchTokenList extends FlyOffsetGapList implements MutableTokenList { - - /** Flag for additional correctness checks (may degrade performance). */ - private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); - - /** - * Get or create branch token list. - *
- * This method also calls updateStartOffset(). - * - */ - public static BranchTokenList getOrCreate(TokenList tokenList, int index) { - Object tokenOrBranch = tokenList.tokenOrBranch(index); - if (tokenOrBranch.getClass() == BranchTokenList.class) { // already exists - BranchTokenList embeddedList = (BranchTokenList)tokenOrBranch; - embeddedList.updateStartOffset(); - return embeddedList; - } - AbstractToken branchToken = (AbstractToken)tokenOrBranch; - if (branchToken.isFlyweight()) { // branching cannot exist for this flyweight token - return null; - } - // Create branch token list now - LanguagePath languagePath = tokenList.languagePath(); - LanguageHierarchy languageHierarchy = LexerUtilsConstants.languageHierarchy(languagePath); - LanguageEmbedding embedding = LexerSpiPackageAccessor.get().embedding( - languageHierarchy, branchToken, true, languagePath, tokenList.inputAttributes()); - - if (embedding == null) { - // try language embeddings registered in Lookup - embedding = LanguageManager.getInstance().findLanguageEmbedding(languagePath, branchToken, tokenList.inputAttributes()); - } - - if (embedding != null) { - LanguagePath embeddedLanguagePath = LanguagePath.get(languagePath, - embedding.language()); - // updateStartOffset() called in constructor - BranchTokenList embeddedList = new BranchTokenList((AbstractToken)branchToken, - embeddedLanguagePath, embedding); - tokenList.wrapToken(index, embeddedList); - return embeddedList; - } else { - return null; - } - } - - private final AbstractToken branchToken; // 36 bytes (32-super + 4) - - private final LanguagePath languagePath; // 40 bytes - - /** - * For mutable environment this field contains root token list of the hierarchy. - * - */ - private final TokenList root; // 44 bytes - - /** - * Cached modification count allows to determine whether the start offset - * needs to be recomputed. - */ - private int cachedModCount; // 48 bytes - - /** - * Cached start offset of the first token in this token list. - *
- * It consists of start ofsset of the branch token plus embedding start shift. - *
- * It must be added to the real offset for each child token - * (the child's real offset is only extra shift against startOffset). - */ - private int startOffset; // 52 bytes - - private final LanguageEmbedding embedding; // 56 bytes - - /** - * The branch token at the root level (root token list). - *
- * For first-level embedding it is the same like value of branchToken variable - * but for deeper embeddings it points to the corresponding branch token - * in the root token list. - *
- * It's used for getting of the start offset of the contained tokens - * and for getting of their text. - */ - private final AbstractToken rootBranchToken; // 60 bytes - - /** - * Difference between start offset of the first token in this token list - * against the start offset of the corresponding root branch token. - *
- * The offset gets refreshed upon updateStartOffset(). - */ - private int startOffsetShift; // 64 bytes - - /** - * Storage for lookaheads and states for the lexed tokens. - *
- * It's only initialized for mutable token lists - * or when in testing environment. - */ - private LAState laState; // 68 bytes - - - public BranchTokenList(AbstractToken branchToken, LanguagePath languagePath, LanguageEmbedding embedding) { - this.branchToken = branchToken; - this.languagePath = languagePath; - this.embedding = embedding; - TokenList branchTokenList = branchToken.tokenList(); - this.root = branchTokenList.root(); - this.rootBranchToken = (branchTokenList.getClass() == BranchTokenList.class) - ? ((BranchTokenList)branchTokenList).rootBranchToken() - : branchToken; - - if (root.modCount() != -1 || testing) { - this.laState = LAState.empty(); // Store lookaheads and states - } - - // Set cachedModCount to value that will force its update (can't be 0 or -1) - this.cachedModCount = -2; - updateStartOffset(); // update startOffset - - init(); - } - - private void init() { - // Lex all the input represented by branch token at once - LexerInputOperation lexerInputOperation = createLexerInputOperation( - 0, startOffset, null); - Token token = lexerInputOperation.nextToken(); - while (token != null) { - updateElementOffsetAdd(token); // must subtract startOffset() - add(token); - if (laState != null) { - laState = laState.add(lexerInputOperation.lookahead(), - lexerInputOperation.lexerState()); - } - token = lexerInputOperation.nextToken(); - } - - trimToSize(); // Compact storage - if (laState != null) - laState.trimToSize(); - } - - public AbstractToken branchToken() { - return branchToken; - } - - public AbstractToken rootBranchToken() { - return rootBranchToken; - } - - public LanguagePath languagePath() { - return languagePath; - } - - public int tokenCount() { - // initialized at once so no need to check whether lexing is finished - return size(); - } - - public synchronized Object tokenOrBranch(int index) { - // Assuming all the token are lexed since begining and after updates - return (index < size()) ? get(index) : null; - } - - private Token existingToken(int index) { - // Tokens not created lazily -> use regular unsync tokenOrBranch() - return LexerUtilsConstants.token(tokenOrBranch(index)); - } - - public synchronized AbstractToken createNonFlyToken( - int index, AbstractToken flyToken, int offset) { - TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset2Raw(offset)); - set(index, nonFlyToken); - return nonFlyToken; - } - - public int lookahead(int index) { - return (laState != null) ? laState.lookahead(index) : -1; - } - - public Object state(int index) { - return (laState != null) ? laState.state(index) : null; - } - - /** - * Returns absolute offset of the token at the given index - * (startOffset gets added to the child token's real offset). - *
- * For token hierarchy snapshots the returned value is corrected - * in the TokenSequence explicitly by adding TokenSequence.tokenOffsetDiff. - */ - public int tokenOffset(int index) { - return elementOffset(index); - } - - public int childTokenOffset(int rawOffset) { - // Need to make sure that the startOffset is up-to-date - updateStartOffset(); - return startOffset + childTokenRelOffset(rawOffset); - } - - /** - * Get difference between start offset of the particular child token - * against start offset of the root branch token. - */ - public int childTokenOffsetShift(int rawOffset) { - // Need to make sure that the startOffsetShift is up-to-date - updateStartOffset(); - return startOffsetShift + childTokenRelOffset(rawOffset); - } - - /** - * Get child token's real offset which is always a relative value - * to startOffset value. - */ - private int childTokenRelOffset(int rawOffset) { - return (rawOffset < offsetGapStart()) - ? rawOffset - : rawOffset - offsetGapLength(); - } - - public char childTokenCharAt(int rawOffset, int index) { - // Do not update the start offset shift - the token.text() - // did it before returning its result and its contract - // specifies that. - // Return chars by delegating to rootBranchToken - return rootBranchToken.charAt(startOffsetShift + childTokenRelOffset(rawOffset) + index); - } - - public int modCount() { - // Delegate to root to have the most up-to-date value for token sequence's check. - return root.modCount(); - } - - protected int startOffset() { // used by FlyOffsetGapList - return startOffset; - } - - public void updateStartOffset() { - synchronized (root) { - if (cachedModCount != root.modCount()) { - cachedModCount = root.modCount(); - // Assign cached start offset of the first token in this list - startOffset = branchToken.offset(null) + embedding.startSkipLength(); - // Assign difference between start offset of the first token - // in this list and start offset of the corresponding root branch token - startOffsetShift = startOffset - rootBranchToken.offset(null); - } - } - } - - public TokenList root() { - return root; - } - - protected int elementRawOffset(Object elem) { - return (elem.getClass() == BranchTokenList.class) - ? ((BranchTokenList)elem).branchToken().rawOffset() - : ((AbstractToken)elem).rawOffset(); - } - - protected void setElementRawOffset(Object elem, int rawOffset) { - if (elem.getClass() == BranchTokenList.class) - ((BranchTokenList)elem).branchToken().setRawOffset(rawOffset); - else - ((AbstractToken)elem).setRawOffset(rawOffset); - } - - protected boolean isElementFlyweight(Object elem) { - // token wrapper always contains non-flyweight token - return (elem.getClass() != BranchTokenList.class) - && ((Token)elem).isFlyweight(); - } - - protected int elementLength(Object elem) { - return LexerUtilsConstants.token(elem).length(); - } - - public synchronized void wrapToken(int index, BranchTokenList wrapper) { - set(index, wrapper); - } - - public InputAttributes inputAttributes() { - return root.inputAttributes(); - } - - // MutableTokenList extra methods - public Object tokenOrBranchUnsync(int index) { - return get(index); - } - - public int tokenCountCurrent() { - return size(); - } - - public LexerInputOperation createLexerInputOperation( - int tokenIndex, int relexOffset, Object relexState) { - CharSequence branchTokenText = branchToken.text(); - int branchTokenStartOffset = startOffset - embedding.startSkipLength(); - int endOffset = branchTokenStartOffset + branchTokenText.length() - - embedding.endSkipLength(); - // Do not need to update offset - clients - // (constructor or token list updater) call updateStartOffset() - // before calling this method - return new TextLexerInputOperation(this, tokenIndex, relexState, branchTokenText, - branchTokenStartOffset, relexOffset, endOffset); - } - - public boolean isFullyLexed() { - return true; - } - - public void replaceTokens(TokenListChange change, int removeTokenCount) { - int index = change.tokenIndex(); - // Remove obsolete tokens (original offsets are retained) - Object[] removedTokensOrBranches = new Object[removeTokenCount]; - copyElements(index, index + removeTokenCount, removedTokensOrBranches, 0); - int offset = change.modifiedTokensStartOffset(); - for (int i = 0; i < removeTokenCount; i++) { - Object tokenOrBranch = removedTokensOrBranches[i]; - Token token = LexerUtilsConstants.token(tokenOrBranch); - if (!token.isFlyweight()) { - updateElementOffsetRemove(token); - ((AbstractToken)token).setTokenList(null); - } - offset += token.length(); - } - remove(index, removeTokenCount); // Retain original offsets - laState.remove(index, removeTokenCount); // Remove lookaheads and states - change.initRemovedTokenList(removedTokensOrBranches); - change.setRemovedTokensEndOffset(offset); - - // Move and fix the gap according to the performed modification. - int diffLength = change.insertedLength() - change.removedLength(); - if (offsetGapStart() != change.offset()) { - // Minimum of the index of the first removed index and original computed index - moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex())); - } - updateOffsetGapLength(-diffLength); - - // Add created tokens. - List addedTokens = change.addedTokens(); - if (addedTokens != null) { - for (Token token : addedTokens) { - updateElementOffsetAdd(token); - } - addAll(index, addedTokens); - laState = laState.addAll(index, change.laState()); - change.setAddedTokenCount(addedTokens.size()); - change.clearAddedTokens(); - } - } - - public boolean isContinuous() { - return true; - } - - public Set skipTokenIds() { - return null; - } - - public String toString() { - return LexerUtilsConstants.appendTokenList(null, this, -1).toString(); - } - - } --- 0 ---- Index: lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java =================================================================== RCS file: lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java diff -N lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java 28 Nov 2006 14:20:15 -0000 *************** *** 0 **** --- 1,348 ---- + /* + * The contents of this file are subject to the terms of the Common Development + * and Distribution License (the License). You may not use this file except in + * compliance with the License. + * + * You can obtain a copy of the License at http://www.netbeans.org/cddl.html + * or http://www.netbeans.org/cddl.txt. + * + * When distributing Covered Code, include this CDDL Header Notice in each file + * and include the License file at http://www.netbeans.org/cddl.txt. + * If applicable, add the following below the CDDL Header, with the fields + * enclosed by brackets [] replaced by your own identifying information: + * "Portions Copyrighted [year] [name of copyright owner]" + * + * The Original Software is NetBeans. The Initial Developer of the Original + * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.lexer; + + import java.util.List; + import java.util.Set; + import org.netbeans.api.lexer.LanguagePath; + import org.netbeans.lib.editor.util.FlyOffsetGapList; + import org.netbeans.lib.lexer.inc.MutableTokenList; + import org.netbeans.api.lexer.InputAttributes; + import org.netbeans.api.lexer.Token; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo; + import org.netbeans.lib.lexer.inc.TokenListChange; + import org.netbeans.spi.lexer.LanguageEmbedding; + import org.netbeans.lib.lexer.token.AbstractToken; + import org.netbeans.lib.lexer.token.TextToken; + + + /** + * Embedded token list maintains a list of tokens + * on a particular embedded language level . + *
+ * The physical storage contains a gap to speed up list modifications + * during typing in a document when tokens are typically added/removed + * at the same index in the list. + * + *

+ * There is an intent to not degrade performance significantly + * with each extra language embedding level so the token list maintains direct + * link to the root level. + * + * @author Miloslav Metelka + * @version 1.00 + */ + + public final class EmbeddedTokenList + extends FlyOffsetGapList implements MutableTokenList { + + /** Flag for additional correctness checks (may degrade performance). */ + private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); + + /** + * Embedding container carries info about the token into which this + * token list is embedded. + */ + private final EmbeddingContainer embeddingContainer; // 36 bytes (32-super + 4) + + /** + * Language embedding for this embedded token list. + */ + private final LanguageEmbedding embedding; // 40 bytes + + /** + * Language path of this token list. + */ + private final LanguagePath languagePath; // 44 bytes + + /** + * Storage for lookaheads and states. + *
+ * It's non-null only initialized for mutable token lists + * or when in testing environment. + */ + private LAState laState; // 48 bytes + + /** + * Next embedded token list forming a single-linked list. + */ + private EmbeddedTokenList nextEmbedding; // 52 bytes + + public EmbeddedTokenList(EmbeddingContainer embeddingContainer, + LanguagePath languagePath, LanguageEmbedding embedding, + EmbeddedTokenList nextEmbedding) { + this.embeddingContainer = embeddingContainer; + this.languagePath = languagePath; + this.embedding = embedding; + this.nextEmbedding = nextEmbedding; + + if (embeddingContainer.rootTokenList().modCount() != -1 || testing) { + this.laState = LAState.empty(); // Store lookaheads and states + } + + init(); + } + + private void init() { + // Lex the whole input represented by token at once + LexerInputOperation lexerInputOperation = createLexerInputOperation( + 0, startOffset(), null); + AbstractToken token = lexerInputOperation.nextToken(); + while (token != null) { + updateElementOffsetAdd(token); // must subtract startOffset() + add(token); + if (laState != null) { + laState = laState.add(lexerInputOperation.lookahead(), + lexerInputOperation.lexerState()); + } + token = lexerInputOperation.nextToken(); + } + + trimToSize(); // Compact storage + if (laState != null) + laState.trimToSize(); + } + + EmbeddedTokenList nextEmbedding() { + return nextEmbedding; + } + + void setNextEmbedding(EmbeddedTokenList nextEmbedding) { + this.nextEmbedding = nextEmbedding; + } + + public LanguagePath languagePath() { + return languagePath; + } + + public int tokenCount() { + // initialized at once so no need to check whether lexing is finished + return size(); + } + + public synchronized Object tokenOrEmbeddingContainer(int index) { + // Assuming all the token are lexed since begining and after updates + return (index < size()) ? get(index) : null; + } + + private Token existingToken(int index) { + // Tokens not created lazily -> use regular unsync tokenOrEmbeddingContainer() + return LexerUtilsConstants.token(tokenOrEmbeddingContainer(index)); + } + + public int lookahead(int index) { + return (laState != null) ? laState.lookahead(index) : -1; + } + + public Object state(int index) { + return (laState != null) ? laState.state(index) : null; + } + + /** + * Returns absolute offset of the token at the given index + * (startOffset gets added to the child token's real offset). + *
+ * For token hierarchy snapshots the returned value is corrected + * in the TokenSequence explicitly by adding TokenSequence.tokenOffsetDiff. + */ + public int tokenOffset(int index) { + return elementOffset(index); + } + + public int childTokenOffset(int rawOffset) { + // Need to make sure that the startOffset is up-to-date + embeddingContainer.updateOffsets(); + return embeddingContainer.tokenStartOffset() + embedding.startSkipLength() + + childTokenRelOffset(rawOffset); + } + + /** + * Get difference between start offset of the particular child token + * against start offset of the root token. + */ + public int childTokenOffsetShift(int rawOffset) { + // Need to make sure that the startOffsetShift is up-to-date + updateStartOffset(); + return embeddingContainer.rootTokenOffsetShift() + childTokenRelOffset(rawOffset); + } + + /** + * Get child token's real offset which is always a relative value + * to startOffset value. + */ + private int childTokenRelOffset(int rawOffset) { + return (rawOffset < offsetGapStart()) + ? rawOffset + : rawOffset - offsetGapLength(); + } + + public char childTokenCharAt(int rawOffset, int index) { + // Do not update the start offset shift - the token.text() + // did it before returning its result and its contract + // specifies that. + // Return chars by delegating to rootToken + return embeddingContainer.charAt( + embedding.startSkipLength() + childTokenRelOffset(rawOffset) + index); + } + + public int modCount() { + // Delegate to root to have the most up-to-date value for token sequence's check. + return root().modCount(); + } + + protected int startOffset() { // used by FlyOffsetGapList + return embeddingContainer.tokenStartOffset() + embedding.startSkipLength(); + } + + public void updateStartOffset() { + embeddingContainer.updateOffsets(); + } + + public TokenList root() { + return embeddingContainer.rootTokenList(); + } + + public TokenHierarchyOperation tokenHierarchyOperation() { + return root().tokenHierarchyOperation(); + } + + public AbstractToken rootToken() { + return embeddingContainer.rootToken(); + } + + protected int elementRawOffset(Object elem) { + return (elem.getClass() == EmbeddingContainer.class) + ? ((EmbeddingContainer)elem).token().rawOffset() + : ((AbstractToken)elem).rawOffset(); + } + + protected void setElementRawOffset(Object elem, int rawOffset) { + if (elem.getClass() == EmbeddingContainer.class) + ((EmbeddingContainer)elem).token().setRawOffset(rawOffset); + else + ((AbstractToken)elem).setRawOffset(rawOffset); + } + + protected boolean isElementFlyweight(Object elem) { + // token wrapper always contains non-flyweight token + return (elem.getClass() != EmbeddingContainer.class) + && ((AbstractToken)elem).isFlyweight(); + } + + protected int elementLength(Object elem) { + return LexerUtilsConstants.token(elem).length(); + } + + public synchronized AbstractToken replaceFlyToken( + int index, AbstractToken flyToken, int offset) { + TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset2Raw(offset)); + set(index, nonFlyToken); + return nonFlyToken; + } + + public synchronized void wrapToken(int index, EmbeddingContainer embeddingContainer) { + set(index, embeddingContainer); + } + + public InputAttributes inputAttributes() { + return root().inputAttributes(); + } + + // MutableTokenList extra methods + public Object tokenOrEmbeddingContainerUnsync(int index) { + return get(index); + } + + public int tokenCountCurrent() { + return size(); + } + + public LexerInputOperation createLexerInputOperation( + int tokenIndex, int relexOffset, Object relexState) { + CharSequence tokenText = embeddingContainer.token().text(); + int tokenStartOffset = embeddingContainer.tokenStartOffset(); + int endOffset = tokenStartOffset + tokenText.length() + - embedding.endSkipLength(); + // Do not need to update offset - clients + // (constructor or token list updater) call updateStartOffset() + // before calling this method + return new TextLexerInputOperation(this, tokenIndex, relexState, tokenText, + tokenStartOffset, relexOffset, endOffset); + } + + public boolean isFullyLexed() { + return true; + } + + public void replaceTokens(TokenHierarchyEventInfo eventInfo, + TokenListChange change, int removeTokenCount) { + int index = change.index(); + // Remove obsolete tokens (original offsets are retained) + Object[] removedTokensOrEmbeddingContainers = new Object[removeTokenCount]; + copyElements(index, index + removeTokenCount, removedTokensOrEmbeddingContainers, 0); + int offset = change.offset(); + for (int i = 0; i < removeTokenCount; i++) { + Object tokenOrEmbeddingContainer = removedTokensOrEmbeddingContainers[i]; + AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); + if (!token.isFlyweight()) { + updateElementOffsetRemove(token); + token.setTokenList(null); + } + offset += token.length(); + } + remove(index, removeTokenCount); // Retain original offsets + laState.remove(index, removeTokenCount); // Remove lookaheads and states + change.setRemovedTokens(removedTokensOrEmbeddingContainers); + change.setRemovedEndOffset(offset); + + // Move and fix the gap according to the performed modification. + int diffLength = eventInfo.insertedLength() - eventInfo.removedLength(); + if (offsetGapStart() != change.offset()) { + // Minimum of the index of the first removed index and original computed index + moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex())); + } + updateOffsetGapLength(-diffLength); + + // Add created tokens. + List> addedTokens = change.addedTokens(); + if (addedTokens != null) { + for (Token token : addedTokens) { + updateElementOffsetAdd(token); + } + addAll(index, addedTokens); + laState = laState.addAll(index, change.laState()); + change.syncAddedTokenCount(); + } + } + + public boolean isContinuous() { + return true; + } + + public Set skipTokenIds() { + return null; + } + + public String toString() { + return LexerUtilsConstants.appendTokenList(null, this, -1).toString(); + } + + } Index: lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java =================================================================== RCS file: lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java diff -N lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java 28 Nov 2006 14:20:15 -0000 *************** *** 0 **** --- 1,330 ---- + /* + * The contents of this file are subject to the terms of the Common Development + * and Distribution License (the License). You may not use this file except in + * compliance with the License. + * + * You can obtain a copy of the License at http://www.netbeans.org/cddl.html + * or http://www.netbeans.org/cddl.txt. + * + * When distributing Covered Code, include this CDDL Header Notice in each file + * and include the License file at http://www.netbeans.org/cddl.txt. + * If applicable, add the following below the CDDL Header, with the fields + * enclosed by brackets [] replaced by your own identifying information: + * "Portions Copyrighted [year] [name of copyright owner]" + * + * The Original Software is NetBeans. The Initial Developer of the Original + * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.lexer; + + import org.netbeans.api.lexer.Language; + import org.netbeans.api.lexer.LanguagePath; + import org.netbeans.api.lexer.TokenHierarchyEventType; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.TokenHierarchyOperation; + import org.netbeans.lib.lexer.inc.TokenChangeInfo; + import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo; + import org.netbeans.spi.lexer.LanguageEmbedding; + import org.netbeans.spi.lexer.LanguageHierarchy; + import org.netbeans.lib.lexer.token.AbstractToken; + + + /** + * Embedding info contains information about all the embeddings + * for a particular token in a token list. + *
+ * There can be one or more {@link EmbeddedTokenList} instances for each + * cotnained embedding. + *

+ * There is an intent to not degrade performance significantly + * with each extra language embedding level so the token list maintains direct + * link to the root level. + * + * @author Miloslav Metelka + * @version 1.00 + */ + + public final class EmbeddingContainer { + + /** Flag for additional correctness checks (may degrade performance). */ + private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); + + /** + * Get embedded token list. + * + * @param tokenList non-null token list in which the token for which the embedding + * should be obtained resides. + * @param index >=0 index of the token in the token list where the embedding + * should be obtained. + * @param language whether only language embeddding of the particular language + * was requested. It may be null if any embedding should be returned. + */ + public static EmbeddedTokenList getEmbedding( + TokenList tokenList, int index, Language language) { + EmbeddingContainer ec; + AbstractToken token; + EmbeddedTokenList lastEtl = null; + synchronized (tokenList.root()) { + Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index); + if (tokenOrEmbeddingContainer.getClass() == EmbeddingContainer.class) { + // Embedding container exists + @SuppressWarnings("unchecked") + EmbeddingContainer ecUC = (EmbeddingContainer)tokenOrEmbeddingContainer; + ec = ecUC; + ec.updateOffsets(); + + EmbeddedTokenList etl = ec.firstEmbedding(); + while (etl != null) { + if (language == null || etl.languagePath().innerLanguage() == language) { + @SuppressWarnings("unchecked") + EmbeddedTokenList etlUC = (EmbeddedTokenList)etl; + return etlUC; + } + lastEtl = etl; + etl = etl.nextEmbedding(); + } + token = ec.token(); + } else { + ec = null; + @SuppressWarnings("unchecked") + AbstractToken t = (AbstractToken)tokenOrEmbeddingContainer; + token = t; + if (token.isFlyweight()) { // embedding cannot exist for this flyweight token + return null; + } + } + + // Attempt to find default embedding + LanguagePath languagePath = tokenList.languagePath(); + LanguageHierarchy languageHierarchy + = LexerUtilsConstants.mostEmbeddedLanguageHierarchy(languagePath); + @SuppressWarnings("unchecked") + LanguageEmbedding embedding = (LanguageEmbedding)LexerSpiPackageAccessor.get().embedding( + languageHierarchy, token, languagePath, tokenList.inputAttributes()); + + if (embedding == null) { + // try language embeddings registered in Lookup + @SuppressWarnings("unchecked") + LanguageEmbedding embeddingUC = (LanguageEmbedding) + LanguageManager.getInstance().findLanguageEmbedding( + token, languagePath, tokenList.inputAttributes()); + embedding = embeddingUC; + } + + if (embedding != null && (language == null || language == embedding.language())) { + if (ec == null) { + ec = new EmbeddingContainer(token); + tokenList.wrapToken(index, ec); + } + LanguagePath embeddedLanguagePath = LanguagePath.get(languagePath, + embedding.language()); + EmbeddedTokenList etl = new EmbeddedTokenList(ec, + embeddedLanguagePath, embedding, null); + if (lastEtl != null) + lastEtl.setNextEmbedding(etl); + else + ec.setFirstEmbedding(etl); + return etl; + } + return null; + } + } + + /** + * Create custom embedding. + * + * @param tokenList non-null token list in which the token for which the embedding + * should be created resides. + * @param index >=0 index of the token in the token list where the embedding + * should be created. + * @param embeddedLanguage non-null embedded language. + * @param startSkipLength >=0 number of characters in an initial part of the token + * for which the language embedding is being create that should be excluded + * from the embedded section. The excluded characters will not be lexed + * and there will be no tokens created for them. + * @param endSkipLength >=0 number of characters at the end of the token + * for which the language embedding is defined that should be excluded + * from the embedded section. The excluded characters will not be lexed + * and there will be no tokens created for them. + */ + public static boolean createEmbedding( + TokenList tokenList, int index, Language embeddedLanguage, + int startSkipLength, int endSkipLength, boolean joinSections) { + synchronized (tokenList.root()) { + TokenHierarchyOperation tokenHierarchyOperation = tokenList.tokenHierarchyOperation(); + // Only create embedddings for valid operations so not e.g. for removed token list + if (tokenHierarchyOperation == null) { + return false; + } + Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index); + EmbeddingContainer ec; + AbstractToken token; + if (tokenOrEmbeddingContainer.getClass() == EmbeddingContainer.class) { + // Embedding container exists + @SuppressWarnings("unchecked") + EmbeddingContainer ecUC = (EmbeddingContainer)tokenOrEmbeddingContainer; + ec = ecUC; + EmbeddedTokenList etl = ec.firstEmbedding(); + while (etl != null) { + if (embeddedLanguage == etl.languagePath().innerLanguage()) { + return false; // already exists + } + etl = etl.nextEmbedding(); + } + token = ec.token(); + } else { + @SuppressWarnings("unchecked") + AbstractToken t = (AbstractToken)tokenOrEmbeddingContainer; + token = t; + if (token.isFlyweight()) { // embedding cannot exist for this flyweight token + return false; + } + ec = new EmbeddingContainer(token); + tokenList.wrapToken(index, ec); + } + + // Add the new embedding as the first one in the single-linked list + LanguageEmbedding embedding = LanguageEmbedding.create(embeddedLanguage, + startSkipLength, endSkipLength, joinSections); + LanguagePath languagePath = tokenList.languagePath(); + LanguagePath embeddedLanguagePath = LanguagePath.get(languagePath, embeddedLanguage); + // Make the embedded token list to be the first in the list + EmbeddedTokenList etl = new EmbeddedTokenList( + ec, embeddedLanguagePath, embedding, ec.firstEmbedding()); + ec.setFirstEmbedding(etl); + // Increment mod count? - not in this case + + // Fire the embedding creation to the clients + // Threading model may need to be changed if necessary + int aOffset = ec.tokenStartOffset(); + TokenHierarchyEventInfo eventInfo = new TokenHierarchyEventInfo( + tokenHierarchyOperation, + TokenHierarchyEventType.EMBEDDING, + aOffset, 0, "", 0 + ); + eventInfo.setAffectedStartOffset(aOffset); + eventInfo.setAffectedEndOffset(aOffset + token.length()); + // Construct outer token change info + TokenChangeInfo info = new TokenChangeInfo(tokenList); + info.setIndex(index); + info.setOffset(aOffset); + //info.setAddedTokenCount(0); + eventInfo.setTokenChangeInfo(info); + + TokenChangeInfo embeddedInfo = new TokenChangeInfo(etl); + embeddedInfo.setIndex(0); + embeddedInfo.setOffset(aOffset + embedding.startSkipLength()); + // Should set number of added tokens directly? + // - would prevent further lazy embedded lexing so leave to zero for now + //info.setAddedTokenCount(0); + info.addEmbeddedChange(embeddedInfo); + + // Fire the change + tokenHierarchyOperation.fireTokenHierarchyChanged( + LexerApiPackageAccessor.get().createTokenChangeEvent(eventInfo)); + } + return true; + } + + private final AbstractToken token; // 12 bytes (8-super + 4) + + /** + * Cached modification count allows to determine whether the start offset + * needs to be recomputed. + */ + private int cachedModCount; // 16 bytes + + /** + * For mutable environment this field contains root token list of the hierarchy. + * + */ + private final TokenList rootTokenList; // 20 bytes + + /** + * The token in the root token list to which this embedding container relates. + *
+ * For first-level embedding it is the same like value of branchToken variable + * but for deeper embeddings it points to the corresponding branch token + * in the root token list. + *
+ * It's used for getting of the start offset of the contained tokens + * and for getting of their text. + */ + private final AbstractToken rootToken; // 24 bytes + + /** + * Cached start offset of the token for which this embedding container + * was created. + */ + private int tokenStartOffset; // 28 bytes + + /** + * First embedded token list in the single-linked list. + */ + private EmbeddedTokenList firstEmbedding; // 32 bytes + + /** + * Difference between start offset of the first token in this token list + * against the start offset of the root token. + *
+ * The offset gets refreshed upon updateStartOffset(). + */ + private int rootTokenOffsetShift; // 52 bytes + + + public EmbeddingContainer(AbstractToken token) { + this.token = token; + TokenList embeddedTokenList = token.tokenList(); + this.rootTokenList = embeddedTokenList.root(); + this.rootToken = (embeddedTokenList.getClass() == EmbeddedTokenList.class) + ? ((EmbeddedTokenList)embeddedTokenList).rootToken() + : token; + this.cachedModCount = -2; // must differ from root's one to sync offsets + updateOffsets(); + } + + public void updateOffsets() { + synchronized (rootTokenList) { + if (cachedModCount != rootTokenList.modCount()) { + cachedModCount = rootTokenList.modCount(); + tokenStartOffset = token.offset(null); + rootTokenOffsetShift = tokenStartOffset - rootToken.offset(null); + } + } + } + + public AbstractToken token() { + return token; + } + + public TokenList rootTokenList() { + return rootTokenList; + } + + public AbstractToken rootToken() { + return rootToken; + } + + public int tokenStartOffset() { + return tokenStartOffset; + } + + public int rootTokenOffsetShift() { + return rootTokenOffsetShift; + } + + public char charAt(int tokenRelOffset) { + return rootToken.charAt(rootTokenOffsetShift + tokenRelOffset); + } + + public EmbeddedTokenList firstEmbedding() { + return firstEmbedding; + } + + void setFirstEmbedding(EmbeddedTokenList firstEmbedding) { + this.firstEmbedding = firstEmbedding; + } + + } Index: lexer/src/org/netbeans/lib/lexer/LanguageManager.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LanguageManager.java,v retrieving revision 1.6 diff -c -r1.6 LanguageManager.java *** lexer/src/org/netbeans/lib/lexer/LanguageManager.java 26 Oct 2006 20:45:23 -0000 1.6 --- lexer/src/org/netbeans/lib/lexer/LanguageManager.java 28 Nov 2006 14:20:15 -0000 *************** *** 60,76 **** } }.language(); ! private static final LanguageEmbedding NO_LANG_EMBEDDING = new LanguageEmbedding() { ! public int endSkipLength() { ! return 0; ! } ! public Language language() { ! return NO_LANG; ! } ! public int startSkipLength() { ! return 0; ! } ! }; private static LanguageManager instance = null; --- 60,67 ---- } }.language(); ! private static final LanguageEmbedding NO_LANG_EMBEDDING ! = LanguageEmbedding.create(NO_LANG, 0, 0); private static LanguageManager instance = null; *************** *** 86,93 **** private List providers = Collections.emptyList(); private HashMap>> langCache = new HashMap>>(); ! private WeakHashMap> tokenLangCache ! = new WeakHashMap>(); private final String LOCK = new String("LanguageManager.LOCK"); --- 77,84 ---- private List providers = Collections.emptyList(); private HashMap>> langCache = new HashMap>>(); ! private WeakHashMap>> tokenLangCache ! = new WeakHashMap>>(); private final String LOCK = new String("LanguageManager.LOCK"); *************** *** 125,138 **** } } ! public LanguageEmbedding findLanguageEmbedding(LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes) { synchronized(LOCK) { ! WeakReference ref = tokenLangCache.get(token); ! LanguageEmbedding lang = ref == null ? null : ref.get(); if (lang == null) { for(LanguageProvider p : providers) { ! if (null != (lang = p.findLanguageEmbedding(tokenLanguage, token, inputAttributes))) { break; } } --- 116,130 ---- } } ! public LanguageEmbedding findLanguageEmbedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { synchronized(LOCK) { ! WeakReference> ref = tokenLangCache.get(token); ! LanguageEmbedding lang = ref == null ? null : ref.get(); if (lang == null) { for(LanguageProvider p : providers) { ! if (null != (lang = p.findLanguageEmbedding(token, languagePath, inputAttributes))) { break; } } *************** *** 141,147 **** lang = NO_LANG_EMBEDDING; } ! tokenLangCache.put(token, new WeakReference(lang)); } return lang == NO_LANG_EMBEDDING ? null : lang; --- 133,139 ---- lang = NO_LANG_EMBEDDING; } ! tokenLangCache.put(token, new WeakReference>(lang)); } return lang == NO_LANG_EMBEDDING ? null : lang; Index: lexer/src/org/netbeans/lib/lexer/LanguageOperation.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LanguageOperation.java,v retrieving revision 1.5 diff -c -r1.5 LanguageOperation.java *** lexer/src/org/netbeans/lib/lexer/LanguageOperation.java 27 Oct 2006 12:59:07 -0000 1.5 --- lexer/src/org/netbeans/lib/lexer/LanguageOperation.java 28 Nov 2006 14:20:16 -0000 *************** *** 19,30 **** --- 19,34 ---- package org.netbeans.lib.lexer; + import java.util.Set; import org.netbeans.api.lexer.Language; + import org.netbeans.api.lexer.LanguagePath; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CharSequenceUtilities; import org.netbeans.spi.lexer.LanguageHierarchy; import org.netbeans.lib.lexer.token.TextToken; + import org.netbeans.spi.lexer.LanguageEmbedding; + import org.netbeans.spi.lexer.TokenFactory; import org.netbeans.spi.lexer.TokenValidator; /** *************** *** 36,48 **** public final class LanguageOperation { ! private static final Object NULL = new Object(); private LanguageHierarchy languageHierarchy; private Language language; ! private Object[] tokenValidators; private FlyItem[] flyItems; --- 40,70 ---- public final class LanguageOperation { ! private static final int MAX_START_SKIP_LENGTH_CACHED = 10; ! ! private static final int MAX_END_SKIP_LENGTH_CACHED = 10; ! ! private static final TokenValidator NULL_VALIDATOR ! = new TokenValidator() { ! public Token validateToken(Token token, ! TokenFactory factory, ! CharSequence tokenText, int modRelOffset, ! int removedLength, CharSequence removedText, ! int insertedLength, CharSequence insertedText) { ! return null; ! } ! }; private LanguageHierarchy languageHierarchy; private Language language; ! /** Embeddings caached by start skip length and end skip length. */ ! private LanguageEmbedding[][] cachedEmbeddings; ! ! private LanguageEmbedding[][] cachedJoinSectionsEmbeddings; ! ! private TokenValidator[] tokenValidators; private FlyItem[] flyItems; *************** *** 76,95 **** return language; } ! public synchronized TokenValidator tokenValidator(TokenId id) { if (tokenValidators == null) { ! tokenValidators = new Object[language.maxOrdinal() + 1]; } // Not synced intentionally (no problem to create dup instances) ! Object o = tokenValidators[id.ordinal()]; ! if (o == null) { ! o = LexerSpiPackageAccessor.get().createTokenValidator(languageHierarchy(), id); ! if (o == null) { ! o = NULL; } ! tokenValidators[id.ordinal()] = o; } ! return (o == NULL) ? null : (TokenValidator)o; } public synchronized TextToken getFlyweightToken(T id, String text) { --- 98,117 ---- return language; } ! public synchronized TokenValidator tokenValidator(T id) { if (tokenValidators == null) { ! tokenValidators = allocateTokenValidatorArray(language.maxOrdinal() + 1); } // Not synced intentionally (no problem to create dup instances) ! TokenValidator validator = tokenValidators[id.ordinal()]; ! if (validator == null) { ! validator = LexerSpiPackageAccessor.get().createTokenValidator(languageHierarchy(), id); ! if (validator == null) { ! validator = nullValidator(); } ! tokenValidators[id.ordinal()] = validator; } ! return (validator == nullValidator()) ? null : validator; } public synchronized TextToken getFlyweightToken(T id, String text) { *************** *** 127,132 **** --- 149,215 ---- } assert (token != null); // Should return non-null token return token; + } + + /** + * Get cached or create a new embedding with the language of this operation + * and the given start and end skip lengths. + * @return non-null embedding. + */ + public synchronized LanguageEmbedding getEmbedding( + int startSkipLength, int endSkipLength, boolean joinSections) { + LanguageEmbedding[][] ce = joinSections ? cachedJoinSectionsEmbeddings : cachedEmbeddings; + if (ce == null || startSkipLength >= ce.length) { + if (startSkipLength > MAX_START_SKIP_LENGTH_CACHED) + return createEmbedding(startSkipLength, endSkipLength, joinSections); + @SuppressWarnings("unchecked") + LanguageEmbedding[][] tmp = (LanguageEmbedding[][]) + new LanguageEmbedding[startSkipLength + 1][]; + if (ce != null) + System.arraycopy(ce, 0, tmp, 0, ce.length); + ce = tmp; + if (joinSections) + cachedJoinSectionsEmbeddings = ce; + else + cachedEmbeddings = ce; + } + LanguageEmbedding[] byESL = ce[startSkipLength]; + if (byESL == null || endSkipLength >= byESL.length) { // given endSkipLength not cached + if (endSkipLength > MAX_END_SKIP_LENGTH_CACHED) + return createEmbedding(startSkipLength, endSkipLength, joinSections); + @SuppressWarnings("unchecked") + LanguageEmbedding[] tmp = (LanguageEmbedding[]) + new LanguageEmbedding[endSkipLength + 1]; + if (byESL != null) + System.arraycopy(byESL, 0, tmp, 0, byESL.length); + byESL = tmp; + ce[startSkipLength] = byESL; + } + LanguageEmbedding e = byESL[endSkipLength]; + if (e == null) { + e = createEmbedding(startSkipLength, endSkipLength, joinSections); + byESL[endSkipLength] = e; + } + return e; + } + + private LanguageEmbedding createEmbedding(int startSkipLength, int endSkipLength, boolean joinSections) { + return LexerSpiPackageAccessor.get().createLanguageEmbedding( + language(), startSkipLength, endSkipLength, joinSections); + } + + public Set staticLanguagePaths() { + return null; // TBD + } + + @SuppressWarnings("unchecked") + private final TokenValidator nullValidator() { + return (TokenValidator)NULL_VALIDATOR; + } + + @SuppressWarnings("unchecked") + private final TokenValidator[] allocateTokenValidatorArray(int length) { + return (TokenValidator[]) new TokenValidator[length]; } private static final class FlyItem { Index: lexer/src/org/netbeans/lib/lexer/LexerApiPackageAccessor.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LexerApiPackageAccessor.java,v retrieving revision 1.3 diff -c -r1.3 LexerApiPackageAccessor.java *** lexer/src/org/netbeans/lib/lexer/LexerApiPackageAccessor.java 26 Oct 2006 20:45:23 -0000 1.3 --- lexer/src/org/netbeans/lib/lexer/LexerApiPackageAccessor.java 28 Nov 2006 14:20:16 -0000 *************** *** 19,31 **** package org.netbeans.lib.lexer; - import java.util.Collection; - import java.util.Map; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchyEvent; import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.inc.TokenListChange; import org.netbeans.spi.lexer.LanguageHierarchy; --- 19,31 ---- package org.netbeans.lib.lexer; import org.netbeans.api.lexer.Language; + import org.netbeans.api.lexer.TokenChange; import org.netbeans.api.lexer.TokenHierarchyEvent; import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.inc.TokenChangeInfo; ! import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo; import org.netbeans.spi.lexer.LanguageHierarchy; *************** *** 60,75 **** public abstract Language createLanguage( LanguageHierarchy languageHierarchy); ! public abstract LanguageHierarchy languageHierarchy( ! Language language); public abstract TokenHierarchy createTokenHierarchy( ! TokenHierarchyOperation tokenHierarchyOperation); public abstract TokenHierarchyEvent createTokenChangeEvent( ! TokenHierarchy tokenHierarchy, TokenListChange change); ! public abstract TokenHierarchyOperation tokenHierarchyOperation( ! TokenHierarchy tokenHierarchy); } --- 60,81 ---- public abstract Language createLanguage( LanguageHierarchy languageHierarchy); ! public abstract LanguageHierarchy languageHierarchy( ! Language language); public abstract TokenHierarchy createTokenHierarchy( ! TokenHierarchyOperation tokenHierarchyOperation); public abstract TokenHierarchyEvent createTokenChangeEvent( ! TokenHierarchyEventInfo info); ! public abstract TokenChange createTokenChange( ! TokenChangeInfo info); ! ! public abstract TokenChangeInfo tokenChangeInfo( ! TokenChange tokenChange); ! ! public abstract TokenHierarchyOperation tokenHierarchyOperation( ! TokenHierarchy tokenHierarchy); } Index: lexer/src/org/netbeans/lib/lexer/LexerInputOperation.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LexerInputOperation.java,v retrieving revision 1.3 diff -c -r1.3 LexerInputOperation.java *** lexer/src/org/netbeans/lib/lexer/LexerInputOperation.java 23 Oct 2006 19:26:40 -0000 1.3 --- lexer/src/org/netbeans/lib/lexer/LexerInputOperation.java 28 Nov 2006 14:20:16 -0000 *************** *** 41,47 **** * @version 1.00 */ ! public abstract class LexerInputOperation implements CharProvider { /** Flag for additional correctness checks (may degrade performance). */ private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); --- 41,47 ---- * @version 1.00 */ ! public abstract class LexerInputOperation implements CharProvider { /** Flag for additional correctness checks (may degrade performance). */ private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); *************** *** 71,81 **** */ private int tokenLength; ! private final TokenList tokenList; private final boolean mutableInput; ! private final Lexer lexer; /** * Start of the token being currently recognized. --- 71,81 ---- */ private int tokenLength; ! private final TokenList tokenList; private final boolean mutableInput; ! private final Lexer lexer; /** * Start of the token being currently recognized. *************** *** 99,105 **** private CharProvider.ExtraPreprocessedChars extraPreprocessedChars; ! public LexerInputOperation(TokenList tokenList, int tokenIndex, Object lexerRestartState) { this.tokenList = tokenList; this.mutableInput = (tokenList.modCount() != -1); // Determine flySequenceLength setting --- 99,105 ---- private CharProvider.ExtraPreprocessedChars extraPreprocessedChars; ! public LexerInputOperation(TokenList tokenList, int tokenIndex, Object lexerRestartState) { this.tokenList = tokenList; this.mutableInput = (tokenList.modCount() != -1); // Determine flySequenceLength setting *************** *** 110,117 **** } LanguagePath languagePath = tokenList.languagePath(); ! LanguageOperation languageOperation = LexerUtilsConstants.languageOperation(languagePath); ! TokenFactory tokenFactory = LexerSpiPackageAccessor.get().createTokenFactory(this); // Check whether character preprocessing is necessary CharPreprocessor p = LexerSpiPackageAccessor.get().createCharPreprocessor( --- 110,117 ---- } LanguagePath languagePath = tokenList.languagePath(); ! LanguageOperation languageOperation = LexerUtilsConstants.mostEmbeddedLanguageOperation(languagePath); ! TokenFactory tokenFactory = LexerSpiPackageAccessor.get().createTokenFactory(this); // Check whether character preprocessing is necessary CharPreprocessor p = LexerSpiPackageAccessor.get().createCharPreprocessor( *************** *** 130,137 **** LexerInput lexerInput = LexerSpiPackageAccessor.get().createLexerInput( (preprocessorOperation != null) ? preprocessorOperation : this); ! @SuppressWarnings("unchecked") ! LexerRestartInfo info = LexerSpiPackageAccessor.get().createLexerRestartInfo( lexerInput, tokenFactory, lexerRestartState, tokenList.languagePath(), inputAttributes()); lexer = LexerSpiPackageAccessor.get().createLexer( --- 130,136 ---- LexerInput lexerInput = LexerSpiPackageAccessor.get().createLexerInput( (preprocessorOperation != null) ? preprocessorOperation : this); ! LexerRestartInfo info = LexerSpiPackageAccessor.get().createLexerRestartInfo( lexerInput, tokenFactory, lexerRestartState, tokenList.languagePath(), inputAttributes()); lexer = LexerSpiPackageAccessor.get().createLexer( *************** *** 142,150 **** public abstract char readExisting(int index); ! public abstract void approveToken(AbstractToken token); ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } --- 141,149 ---- public abstract char readExisting(int index); ! public abstract void approveToken(AbstractToken token); ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } *************** *** 220,226 **** lookaheadIndex -= tokenLength; } ! protected final TokenList tokenList() { return tokenList; } --- 219,225 ---- lookaheadIndex -= tokenLength; } ! protected final TokenList tokenList() { return tokenList; } *************** *** 244,254 **** return isMutableInput() || testing; } ! public AbstractToken nextToken() { assert (!lexerFinished); ! AbstractToken token; ! do { ! token = (AbstractToken)lexer().nextToken(); if (token == null) { LexerUtilsConstants.checkLexerInputFinished( (preprocessorOperation != null) ? (CharProvider)preprocessorOperation : this, this); --- 243,253 ---- return isMutableInput() || testing; } ! public AbstractToken nextToken() { assert (!lexerFinished); ! while (true) { ! @SuppressWarnings("unchecked") ! AbstractToken token = (AbstractToken)lexer().nextToken(); if (token == null) { LexerUtilsConstants.checkLexerInputFinished( (preprocessorOperation != null) ? (CharProvider)preprocessorOperation : this, this); *************** *** 257,264 **** } else { approveToken(token); } ! } while (token == TokenFactory.SKIP_TOKEN); ! return token; } /** --- 256,265 ---- } else { approveToken(token); } ! if (token == TokenFactory.SKIP_TOKEN) ! continue; // Fetch next token ! return token; ! } } /** *************** *** 299,305 **** preprocessErrorList.add(error); } ! public final void initPreprocessedToken(AbstractToken token) { CharPreprocessorError error = null; if (preprocessErrorList != null && preprocessErrorList.size() > 0) { for (int i = preprocessErrorList.size() - 1; i >= 0; i--) { --- 300,306 ---- preprocessErrorList.add(error); } ! public final void initPreprocessedToken(AbstractToken token) { CharPreprocessorError error = null; if (preprocessErrorList != null && preprocessErrorList.size() > 0) { for (int i = preprocessErrorList.size() - 1; i >= 0; i--) { *************** *** 329,336 **** // No extra preprocessed characters } ! public final LanguageOperation languageOperation() { ! return LexerUtilsConstants.languageOperation(tokenList.languagePath()); } public final Object lexerState() { --- 330,337 ---- // No extra preprocessed characters } ! public final LanguageOperation languageOperation() { ! return LexerUtilsConstants.mostEmbeddedLanguageOperation(tokenList.languagePath()); } public final Object lexerState() { *************** *** 353,359 **** flySequenceLength = 0; } ! protected final boolean isSkipToken(AbstractToken token) { return (token == TokenFactory.SKIP_TOKEN); } --- 354,360 ---- flySequenceLength = 0; } ! protected final boolean isSkipToken(AbstractToken token) { return (token == TokenFactory.SKIP_TOKEN); } Index: lexer/src/org/netbeans/lib/lexer/LexerSpiPackageAccessor.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LexerSpiPackageAccessor.java,v retrieving revision 1.5 diff -c -r1.5 LexerSpiPackageAccessor.java *** lexer/src/org/netbeans/lib/lexer/LexerSpiPackageAccessor.java 26 Oct 2006 20:45:23 -0000 1.5 --- lexer/src/org/netbeans/lib/lexer/LexerSpiPackageAccessor.java 28 Nov 2006 14:20:16 -0000 *************** *** 68,107 **** public abstract Map> createTokenCategories(LanguageHierarchy languageHierarchy); ! public abstract Lexer createLexer(LanguageHierarchy languageHierarchy, LexerRestartInfo info); public abstract LexerRestartInfo createLexerRestartInfo( LexerInput input, TokenFactory tokenFactory, Object state, LanguagePath languagePath, InputAttributes inputAttributes); ! public abstract String mimeType(LanguageHierarchy languageHierarchy); ! public abstract LanguageEmbedding embedding( ! LanguageHierarchy languageHierarchy, Token token, boolean tokenComplete, LanguagePath languagePath, InputAttributes inputAttributes); ! public abstract TokenValidator createTokenValidator(LanguageHierarchy languageHierarchy, TokenId id); ! public abstract CharPreprocessor createCharPreprocessor(LanguageHierarchy languageHierarchy); ! public abstract boolean isRetainTokenText(LanguageHierarchy languageHierarchy, TokenId id); ! public abstract LanguageOperation operation(LanguageHierarchy languageHierarchy); ! public abstract LexerInput createLexerInput(CharProvider charProvider); public abstract void init(CharPreprocessor preprocessor, CharPreprocessorOperation operation); public abstract void preprocessChar(CharPreprocessor preprocessor); public abstract Language language(MutableTextInput mti); ! public abstract CharSequence text(MutableTextInput mti); ! public abstract InputAttributes inputAttributes(MutableTextInput mti); ! public abstract Object inputSource(MutableTextInput mti); ! public abstract TokenFactory createTokenFactory(LexerInputOperation lexerInputOperation); } --- 68,110 ---- public abstract Map> createTokenCategories(LanguageHierarchy languageHierarchy); ! public abstract Lexer createLexer(LanguageHierarchy languageHierarchy, LexerRestartInfo info); public abstract LexerRestartInfo createLexerRestartInfo( LexerInput input, TokenFactory tokenFactory, Object state, LanguagePath languagePath, InputAttributes inputAttributes); ! public abstract String mimeType(LanguageHierarchy languageHierarchy); ! public abstract LanguageEmbedding embedding( ! LanguageHierarchy languageHierarchy, Token token, LanguagePath languagePath, InputAttributes inputAttributes); ! public abstract TokenValidator createTokenValidator(LanguageHierarchy languageHierarchy, T id); ! public abstract CharPreprocessor createCharPreprocessor(LanguageHierarchy languageHierarchy); ! public abstract boolean isRetainTokenText(LanguageHierarchy languageHierarchy, T id); ! public abstract LanguageOperation operation(LanguageHierarchy languageHierarchy); ! public abstract LexerInput createLexerInput(CharProvider charProvider); public abstract void init(CharPreprocessor preprocessor, CharPreprocessorOperation operation); public abstract void preprocessChar(CharPreprocessor preprocessor); public abstract Language language(MutableTextInput mti); + + public abstract LanguageEmbedding createLanguageEmbedding( + Language language, int startSkipLength, int endSkipLength, boolean joinSections); ! public abstract CharSequence text(MutableTextInput mti); ! public abstract InputAttributes inputAttributes(MutableTextInput mti); ! public abstract I inputSource(MutableTextInput mti); ! public abstract TokenFactory createTokenFactory(LexerInputOperation lexerInputOperation); } Index: lexer/src/org/netbeans/lib/lexer/LexerUtilsConstants.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LexerUtilsConstants.java,v retrieving revision 1.4 diff -c -r1.4 LexerUtilsConstants.java *** lexer/src/org/netbeans/lib/lexer/LexerUtilsConstants.java 26 Oct 2006 20:45:23 -0000 1.4 --- lexer/src/org/netbeans/lib/lexer/LexerUtilsConstants.java 28 Nov 2006 14:20:16 -0000 *************** *** 26,32 **** import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.ArrayUtilities; import org.netbeans.lib.lexer.inc.SnapshotTokenList; - import org.netbeans.spi.lexer.CharPreprocessor; import org.netbeans.spi.lexer.LanguageHierarchy; import org.netbeans.spi.lexer.LexerInput; import org.netbeans.lib.lexer.token.AbstractToken; --- 26,31 ---- *************** *** 128,166 **** } } ! public static LanguageHierarchy languageHierarchy(Language language) { return LexerApiPackageAccessor.get().languageHierarchy(language); } ! public static LanguageHierarchy languageHierarchy(LanguagePath languagePath) { ! return languageHierarchy(languagePath.innerLanguage()); ! } ! ! public static LanguageOperation languageOperation(Language language) { return LexerSpiPackageAccessor.get().operation(languageHierarchy(language)); } ! public static LanguageOperation languageOperation(LanguagePath languagePath) { ! return LexerSpiPackageAccessor.get().operation(languageHierarchy(languagePath)); } ! public static AbstractToken token(Object tokenOrBranch) { ! return (tokenOrBranch.getClass() == BranchTokenList.class) ! ? ((BranchTokenList)tokenOrBranch).branchToken() ! : (AbstractToken)tokenOrBranch; } ! public static AbstractToken token(TokenList tokenList, int index) { ! return token(tokenList.tokenOrBranch(index)); } ! public static StringBuilder appendTokenList(StringBuilder sb, TokenList tokenList, int currentIndex) { if (sb == null) { sb = new StringBuilder(); } ! TokenHierarchy tokenHierarchy; if (tokenList instanceof SnapshotTokenList) { ! tokenHierarchy = ((SnapshotTokenList)tokenList).snapshot().tokenHierarchy(); sb.append(tokenList).append('\n'); } else { tokenHierarchy = null; --- 127,201 ---- } } ! public static LanguageHierarchy languageHierarchy(Language language) { return LexerApiPackageAccessor.get().languageHierarchy(language); } ! public static LanguageOperation languageOperation(Language language) { return LexerSpiPackageAccessor.get().operation(languageHierarchy(language)); } ! /** ! * Returns the most embedded language in the given language path. ! *
! * The method casts the resulting language to the generic type requested by the caller. ! */ ! public static Language mostEmbeddedLanguage(LanguagePath languagePath) { ! @SuppressWarnings("unchecked") ! Language l = (Language)languagePath.innerLanguage(); ! return l; ! } ! ! /** ! * Returns language hierarchy of the most embedded language in the given language path. ! *
! * The method casts the resulting language hierarchy to the generic type requested by the caller. ! */ ! public static LanguageHierarchy mostEmbeddedLanguageHierarchy(LanguagePath languagePath) { ! @SuppressWarnings("unchecked") ! LanguageHierarchy lh = (LanguageHierarchy)languageHierarchy(languagePath.innerLanguage()); ! return lh; ! } ! ! /** ! * Returns language operation of the most embedded language in the given language path. ! *
! * The method casts the resulting language operation to the generic type requested by the caller. ! */ ! public static LanguageOperation mostEmbeddedLanguageOperation(LanguagePath languagePath) { ! @SuppressWarnings("unchecked") ! LanguageOperation lo = (LanguageOperation)LexerSpiPackageAccessor.get().operation( ! mostEmbeddedLanguageHierarchy(languagePath)); ! return lo; } ! /** ! * Returns token from the given object which is either the token ! * or an embedding container. ! *
! * The method casts the resulting token to the generic type requested by the caller. ! */ ! public static AbstractToken token(Object tokenOrEmbeddingContainer) { ! @SuppressWarnings("unchecked") ! AbstractToken token = (AbstractToken) ! ((tokenOrEmbeddingContainer.getClass() == EmbeddingContainer.class) ! ? ((EmbeddingContainer)tokenOrEmbeddingContainer).token() ! : (AbstractToken)tokenOrEmbeddingContainer); ! return token; } ! public static AbstractToken token(TokenList tokenList, int index) { ! return token(tokenList.tokenOrEmbeddingContainer(index)); } ! public static StringBuilder appendTokenList(StringBuilder sb, ! TokenList tokenList, int currentIndex) { if (sb == null) { sb = new StringBuilder(); } ! TokenHierarchy tokenHierarchy; if (tokenList instanceof SnapshotTokenList) { ! tokenHierarchy = ((SnapshotTokenList)tokenList).snapshot().tokenHierarchy(); sb.append(tokenList).append('\n'); } else { tokenHierarchy = null; *************** *** 171,183 **** for (int i = 0; i < tokenCount; i++) { sb.append((i == currentIndex) ? '*' : ' '); ArrayUtilities.appendBracketedIndex(sb, i, digitCount); ! Object tokenOrBranch = tokenList.tokenOrBranch(i); ! if (tokenOrBranch == null) { System.err.println("tokenList=" + tokenList + ", i=" + i); } ! sb.append((tokenOrBranch.getClass() == BranchTokenList.class) ? '<' : ' '); sb.append(": "); ! AbstractToken token = token(tokenOrBranch); sb.append(token.dumpInfo(tokenHierarchy)); sb.append('\n'); } --- 206,218 ---- for (int i = 0; i < tokenCount; i++) { sb.append((i == currentIndex) ? '*' : ' '); ArrayUtilities.appendBracketedIndex(sb, i, digitCount); ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(i); ! if (tokenOrEmbeddingContainer == null) { System.err.println("tokenList=" + tokenList + ", i=" + i); } ! sb.append((tokenOrEmbeddingContainer.getClass() == EmbeddingContainer.class) ? '<' : ' '); sb.append(": "); ! AbstractToken token = token(tokenOrEmbeddingContainer); sb.append(token.dumpInfo(tokenHierarchy)); sb.append('\n'); } Index: lexer/src/org/netbeans/lib/lexer/PreprocessedTextLexerInputOperation.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/PreprocessedTextLexerInputOperation.java,v retrieving revision 1.2 diff -c -r1.2 PreprocessedTextLexerInputOperation.java *** lexer/src/org/netbeans/lib/lexer/PreprocessedTextLexerInputOperation.java 4 Oct 2006 17:03:17 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/PreprocessedTextLexerInputOperation.java 28 Nov 2006 14:20:16 -0000 *************** *** 19,28 **** package org.netbeans.lib.lexer; - import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.spi.lexer.LexerInput; - import org.netbeans.lib.lexer.token.AbstractToken; /** * Used when branching a token with preprocessed text. --- 19,26 ---- *************** *** 31,37 **** * @version 1.00 */ ! public final class PreprocessedTextLexerInputOperation extends TextLexerInputOperation { private final PreprocessedTextStorage preprocessedText; --- 29,35 ---- * @version 1.00 */ ! public final class PreprocessedTextLexerInputOperation extends TextLexerInputOperation { private final PreprocessedTextStorage preprocessedText; *************** *** 45,55 **** private int tokenEndRawLengthShift; ! public PreprocessedTextLexerInputOperation(TokenList tokenList, PreprocessedTextStorage prepText) { this(tokenList, 0, null, prepText, 0, 0, prepText.length()); } ! public PreprocessedTextLexerInputOperation(TokenList tokenList, int tokenIndex, Object lexerRestartState, PreprocessedTextStorage prepText, int prepTextStartOffset, int startOffset, int endOffset) { super(tokenList, tokenIndex, lexerRestartState, prepText, --- 43,53 ---- private int tokenEndRawLengthShift; ! public PreprocessedTextLexerInputOperation(TokenList tokenList, PreprocessedTextStorage prepText) { this(tokenList, 0, null, prepText, 0, 0, prepText.length()); } ! public PreprocessedTextLexerInputOperation(TokenList tokenList, int tokenIndex, Object lexerRestartState, PreprocessedTextStorage prepText, int prepTextStartOffset, int startOffset, int endOffset) { super(tokenList, tokenIndex, lexerRestartState, prepText, Index: lexer/src/org/netbeans/lib/lexer/SubSequenceTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/SubSequenceTokenList.java,v retrieving revision 1.5 diff -c -r1.5 SubSequenceTokenList.java *** lexer/src/org/netbeans/lib/lexer/SubSequenceTokenList.java 23 Oct 2006 12:41:47 -0000 1.5 --- lexer/src/org/netbeans/lib/lexer/SubSequenceTokenList.java 28 Nov 2006 14:20:16 -0000 *************** *** 39,55 **** * @version 1.00 */ ! public final class SubSequenceTokenList implements TokenList { /** * Token list to which this filtering token list delegates. */ ! private TokenList tokenList; /** * Last retrieved token's end offset. */ ! private AbstractToken lastToken; /** * Last retrieved token index. --- 39,55 ---- * @version 1.00 */ ! public final class SubSequenceTokenList implements TokenList { /** * Token list to which this filtering token list delegates. */ ! private TokenList tokenList; /** * Last retrieved token's end offset. */ ! private AbstractToken lastToken; /** * Last retrieved token index. *************** *** 83,89 **** */ private int limitEndIndex; ! public SubSequenceTokenList(TokenList tokenList, int limitStartOffset, int limitEndOffset) { this.tokenList = tokenList; this.limitStartOffset = limitStartOffset; this.limitEndOffset = limitEndOffset; --- 83,89 ---- */ private int limitEndIndex; ! public SubSequenceTokenList(TokenList tokenList, int limitStartOffset, int limitEndOffset) { this.tokenList = tokenList; this.limitStartOffset = limitStartOffset; this.limitEndOffset = limitEndOffset; *************** *** 94,104 **** if (diff != Integer.MAX_VALUE) { // some tokens exist if (diff >= lastToken.length()) { lastTokenIndex++; ! Object tokenOrBranch = tokenList.tokenOrBranch(lastTokenIndex); ! if (tokenOrBranch != null && (lastTokenOffset = tokenList.tokenOffset(lastTokenIndex)) < limitEndOffset ) { ! lastToken = LexerUtilsConstants.token(tokenOrBranch); limitStartIndex = lastTokenIndex; limitEndIndex = Integer.MAX_VALUE; // To be computed later } // Otherwise limitStartIndex and limitEndIndex remain zero => no tokens --- 94,104 ---- if (diff != Integer.MAX_VALUE) { // some tokens exist if (diff >= lastToken.length()) { lastTokenIndex++; ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(lastTokenIndex); ! if (tokenOrEmbeddingContainer != null && (lastTokenOffset = tokenList.tokenOffset(lastTokenIndex)) < limitEndOffset ) { ! lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer); limitStartIndex = lastTokenIndex; limitEndIndex = Integer.MAX_VALUE; // To be computed later } // Otherwise limitStartIndex and limitEndIndex remain zero => no tokens *************** *** 113,127 **** } else {// Lower bound is zero => limitStartIndex is zero // Check first token (done here for simpler tokenCount() etc.) ! Object tokenOrBranch = tokenList.tokenOrBranch(0); ! if (tokenOrBranch != null && (lastTokenOffset = tokenList.tokenOffset(0)) < limitEndOffset) { ! lastToken = LexerUtilsConstants.token(tokenOrBranch); // lastTokenIndex remains zero limitEndIndex = Integer.MAX_VALUE; } // Otherwise limitEndIndex remain zero => no tokens } } ! public TokenList delegate() { return tokenList; } --- 113,127 ---- } else {// Lower bound is zero => limitStartIndex is zero // Check first token (done here for simpler tokenCount() etc.) ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(0); ! if (tokenOrEmbeddingContainer != null && (lastTokenOffset = tokenList.tokenOffset(0)) < limitEndOffset) { ! lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer); // lastTokenIndex remains zero limitEndIndex = Integer.MAX_VALUE; } // Otherwise limitEndIndex remain zero => no tokens } } ! public TokenList delegate() { return tokenList; } *************** *** 133,139 **** return limitEndOffset; } ! public Object tokenOrBranch(int index) { if (limitStartIndex == -1) // No tokens return null; index += limitStartIndex; // Shift to underlying tokenList indices --- 133,139 ---- return limitEndOffset; } ! public Object tokenOrEmbeddingContainer(int index) { if (limitStartIndex == -1) // No tokens return null; index += limitStartIndex; // Shift to underlying tokenList indices *************** *** 142,149 **** case -1: // Prev to lastToken - must exist if (index < limitStartIndex) return null; ! Object tokenOrBranch = tokenList.tokenOrBranch(index); ! AbstractToken token = LexerUtilsConstants.token(tokenOrBranch); lastTokenIndex = index; // If the token list is continuous or the original token // is flyweight (there cannot be a gap before flyweight token) --- 142,149 ---- case -1: // Prev to lastToken - must exist if (index < limitStartIndex) return null; ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index); ! AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); lastTokenIndex = index; // If the token list is continuous or the original token // is flyweight (there cannot be a gap before flyweight token) *************** *** 154,168 **** else // Compute offset through tokenList lastTokenOffset = tokenList.tokenOffset(index); lastToken = token; ! return tokenOrBranch; case 0: // Last token return lastToken; case 1: // Next to lastToken ! tokenOrBranch = tokenList.tokenOrBranch(index); ! if (tokenOrBranch != null) { ! token = LexerUtilsConstants.token(tokenOrBranch); // If the token list is continuous or the fetched token // is flyweight (there cannot be a gap before flyweight token) // the original offset can be just increased --- 154,168 ---- else // Compute offset through tokenList lastTokenOffset = tokenList.tokenOffset(index); lastToken = token; ! return tokenOrEmbeddingContainer; case 0: // Last token return lastToken; case 1: // Next to lastToken ! tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index); ! if (tokenOrEmbeddingContainer != null) { ! token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); // If the token list is continuous or the fetched token // is flyweight (there cannot be a gap before flyweight token) // the original offset can be just increased *************** *** 177,198 **** lastToken = token; lastTokenIndex = index; lastTokenOffset = tokenOffset; ! return tokenOrBranch; } // above upper bound } limitEndIndex = index; // lastToken at prev index was valid so may assign this return null; default: // Not related to lastToken ! tokenOrBranch = tokenList.tokenOrBranch(index); ! if (tokenOrBranch != null) { int tokenOffset = tokenList.tokenOffset(index); // Check the offset to be below upper bound if (tokenOffset < limitEndOffset) { // below upper offset bound ! lastToken = LexerUtilsConstants.token(tokenOrBranch); lastTokenIndex = index; lastTokenOffset = tokenOffset; ! return tokenOrBranch; } // >=limitEndOffset } // index too high // As the null gets returned all the tokens that could --- 177,198 ---- lastToken = token; lastTokenIndex = index; lastTokenOffset = tokenOffset; ! return tokenOrEmbeddingContainer; } // above upper bound } limitEndIndex = index; // lastToken at prev index was valid so may assign this return null; default: // Not related to lastToken ! tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index); ! if (tokenOrEmbeddingContainer != null) { int tokenOffset = tokenList.tokenOffset(index); // Check the offset to be below upper bound if (tokenOffset < limitEndOffset) { // below upper offset bound ! lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer); lastTokenIndex = index; lastTokenOffset = tokenOffset; ! return tokenOrEmbeddingContainer; } // >=limitEndOffset } // index too high // As the null gets returned all the tokens that could *************** *** 208,214 **** // As limitEndIndex is inited it will no longer use lastToken caching // because TokenSequence will use its own similar caching for token offsets. return (index < limitEndIndex) ! ? tokenList.tokenOrBranch(index) : null; } } --- 208,214 ---- // As limitEndIndex is inited it will no longer use lastToken caching // because TokenSequence will use its own similar caching for token offsets. return (index < limitEndIndex) ! ? tokenList.tokenOrEmbeddingContainer(index) : null; } } *************** *** 240,247 **** return tcc - limitStartIndex; } ! public AbstractToken createNonFlyToken(int index, AbstractToken flyToken, int offset) { ! return tokenList.createNonFlyToken(index + limitStartIndex, flyToken, offset); } public int modCount() { --- 240,247 ---- return tcc - limitStartIndex; } ! public AbstractToken replaceFlyToken(int index, AbstractToken flyToken, int offset) { ! return tokenList.replaceFlyToken(index + limitStartIndex, flyToken, offset); } public int modCount() { *************** *** 260,273 **** throw new IllegalStateException("Unexpected call."); } ! public void wrapToken(int index, BranchTokenList wrapper) { ! tokenList.wrapToken(limitStartIndex + index, wrapper); } ! public TokenList root() { return tokenList.root(); } public InputAttributes inputAttributes() { return tokenList.inputAttributes(); } --- 260,277 ---- throw new IllegalStateException("Unexpected call."); } ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { ! tokenList.wrapToken(limitStartIndex + index, embeddingContainer); } ! public TokenList root() { return tokenList.root(); } + public TokenHierarchyOperation tokenHierarchyOperation() { + return tokenList.tokenHierarchyOperation(); + } + public InputAttributes inputAttributes() { return tokenList.inputAttributes(); } *************** *** 285,295 **** return tokenList.isContinuous(); } ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } ! private AbstractToken token(int index) { return LexerUtilsConstants.token(tokenList, index); } --- 289,299 ---- return tokenList.isContinuous(); } ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } ! private AbstractToken token(int index) { return LexerUtilsConstants.token(tokenList, index); } *************** *** 305,311 **** private int move(int offset) { int tokenCount = tokenList.tokenCountCurrent(); // presently created token count if (tokenCount == 0) { // no tokens yet -> attempt to create at least one ! if (tokenList.tokenOrBranch(0) == null) { // really no tokens at all // In this case the token sequence could not be positioned yet // so no need to reset "index" or other vars return Integer.MAX_VALUE; --- 309,315 ---- private int move(int offset) { int tokenCount = tokenList.tokenCountCurrent(); // presently created token count if (tokenCount == 0) { // no tokens yet -> attempt to create at least one ! if (tokenList.tokenOrEmbeddingContainer(0) == null) { // really no tokens at all // In this case the token sequence could not be positioned yet // so no need to reset "index" or other vars return Integer.MAX_VALUE; *************** *** 323,331 **** lastToken = token(tokenCount - 1); int tokenLength = lastToken.length(); while (offset >= lastTokenOffset + tokenLength) { // above present token ! Object tokenOrBranch = tokenList.tokenOrBranch(tokenCount); ! if (tokenOrBranch != null) { ! lastToken = LexerUtilsConstants.token(tokenOrBranch); if (lastToken.isFlyweight()) { // need to use previous tokenLength lastTokenOffset += tokenLength; } else { // non-flyweight token - retrieve offset --- 327,335 ---- lastToken = token(tokenCount - 1); int tokenLength = lastToken.length(); while (offset >= lastTokenOffset + tokenLength) { // above present token ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(tokenCount); ! if (tokenOrEmbeddingContainer != null) { ! lastToken = LexerUtilsConstants.token(tokenOrEmbeddingContainer); if (lastToken.isFlyweight()) { // need to use previous tokenLength lastTokenOffset += tokenLength; } else { // non-flyweight token - retrieve offset Index: lexer/src/org/netbeans/lib/lexer/TextLexerInputOperation.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/TextLexerInputOperation.java,v retrieving revision 1.2 diff -c -r1.2 TextLexerInputOperation.java *** lexer/src/org/netbeans/lib/lexer/TextLexerInputOperation.java 4 Oct 2006 17:03:17 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/TextLexerInputOperation.java 28 Nov 2006 14:20:16 -0000 *************** *** 19,25 **** package org.netbeans.lib.lexer; - import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.spi.lexer.LexerInput; import org.netbeans.lib.lexer.token.AbstractToken; --- 19,24 ---- *************** *** 31,37 **** * @version 1.00 */ ! public class TextLexerInputOperation extends LexerInputOperation { /** * Input text from which the reading of characters is done. --- 30,36 ---- * @version 1.00 */ ! public class TextLexerInputOperation extends LexerInputOperation { /** * Input text from which the reading of characters is done. *************** *** 46,56 **** private int readEndIndex; ! public TextLexerInputOperation(TokenList tokenList, CharSequence inputText) { this(tokenList, 0, null, inputText, 0, 0, inputText.length()); } ! public TextLexerInputOperation(TokenList tokenList, int tokenIndex, Object lexerRestartState, CharSequence inputText, int inputTextStartOffset, int startOffset, int endOffset) { super(tokenList, tokenIndex, lexerRestartState); --- 45,55 ---- private int readEndIndex; ! public TextLexerInputOperation(TokenList tokenList, CharSequence inputText) { this(tokenList, 0, null, inputText, 0, 0, inputText.length()); } ! public TextLexerInputOperation(TokenList tokenList, int tokenIndex, Object lexerRestartState, CharSequence inputText, int inputTextStartOffset, int startOffset, int endOffset) { super(tokenList, tokenIndex, lexerRestartState); *************** *** 79,85 **** return inputText.charAt(tokenStartIndex() + index); } ! public void approveToken(AbstractToken token) { int tokenLength = tokenLength(); if (isSkipToken(token)) { preventFlyToken(); --- 78,84 ---- return inputText.charAt(tokenStartIndex() + index); } ! public void approveToken(AbstractToken token) { int tokenLength = tokenLength(); if (isSkipToken(token)) { preventFlyToken(); Index: lexer/src/org/netbeans/lib/lexer/TokenHierarchyOperation.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/TokenHierarchyOperation.java,v retrieving revision 1.4 diff -c -r1.4 TokenHierarchyOperation.java *** lexer/src/org/netbeans/lib/lexer/TokenHierarchyOperation.java 26 Oct 2006 20:45:24 -0000 1.4 --- lexer/src/org/netbeans/lib/lexer/TokenHierarchyOperation.java 28 Nov 2006 14:20:16 -0000 *************** *** 33,41 **** --- 33,44 ---- import org.netbeans.lib.lexer.batch.CopyTextTokenList; import org.netbeans.lib.lexer.batch.TextTokenList; import org.netbeans.lib.lexer.inc.IncTokenList; + import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo; import org.netbeans.lib.lexer.inc.TokenListUpdater; import org.netbeans.spi.lexer.MutableTextInput; import org.netbeans.api.lexer.InputAttributes; + import org.netbeans.api.lexer.LanguagePath; + import org.netbeans.api.lexer.TokenHierarchyEventType; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.lexer.inc.SnapshotTokenList; import org.netbeans.lib.lexer.inc.TokenListChange; *************** *** 52,58 **** * @version 1.00 */ ! public final class TokenHierarchyOperation { // "I" stands for input /** * The token hierarchy delegating to this operation. --- 55,61 ---- * @version 1.00 */ ! public final class TokenHierarchyOperation { // "I" stands for input /** * The token hierarchy delegating to this operation. *************** *** 66,72 **** */ private MutableTextInput mutableTextInput; ! private TokenList tokenList; /** * The hierarchy can be made inactive to release the tokens --- 69,75 ---- */ private MutableTextInput mutableTextInput; ! private TokenList tokenList; /** * The hierarchy can be made inactive to release the tokens *************** *** 80,86 **** /** * Primary token hierarchy for snapshot. */ ! private TokenHierarchyOperation liveTokenHierarchyOperation; /** * References to active snapshots. --- 83,89 ---- /** * Primary token hierarchy for snapshot. */ ! private TokenHierarchyOperation liveTokenHierarchyOperation; /** * References to active snapshots. *************** *** 93,105 **** private EventListenerList listenerList; private boolean snapshotReleased; /** * Constructor for reader as input. */ public TokenHierarchyOperation(Reader inputReader, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! this.tokenList = new CopyTextTokenList(inputReader, language, skipTokenIds, inputAttributes); init(); } --- 96,110 ---- private EventListenerList listenerList; private boolean snapshotReleased; + + private Set languagePaths; /** * Constructor for reader as input. */ public TokenHierarchyOperation(Reader inputReader, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! this.tokenList = new CopyTextTokenList(this, inputReader, language, skipTokenIds, inputAttributes); init(); } *************** *** 108,119 **** * Constructor for character sequence as input. */ public TokenHierarchyOperation(CharSequence inputText, boolean copyInputText, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { this.tokenList = copyInputText ! ? (TokenList)new CopyTextTokenList(inputText, language, skipTokenIds, inputAttributes) ! : (TokenList)new TextTokenList(inputText, language, skipTokenIds, inputAttributes); init(); } --- 113,124 ---- * Constructor for character sequence as input. */ public TokenHierarchyOperation(CharSequence inputText, boolean copyInputText, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { this.tokenList = copyInputText ! ? new CopyTextTokenList(this, inputText, language, skipTokenIds, inputAttributes) ! : new TextTokenList(this, inputText, language, skipTokenIds, inputAttributes); init(); } *************** *** 122,144 **** * Constructor for mutable input. */ public TokenHierarchyOperation(MutableTextInput mutableTextInput, ! Language language) { this.mutableTextInput = mutableTextInput; ! this.tokenList = new IncTokenList(mutableTextInput); init(); } ! public TokenHierarchyOperation(TokenHierarchyOperation liveTokenHierarchy) { this.liveTokenHierarchyOperation = liveTokenHierarchy; ! this.tokenList = new SnapshotTokenList(this); init(); } private void init() { assert (tokenHierarchy == null); tokenHierarchy = LexerApiPackageAccessor.get().createTokenHierarchy(this); if (isMutable()) { - listenerList = new EventListenerList(); snapshotRefs = new ArrayList(1); } } --- 127,151 ---- * Constructor for mutable input. */ public TokenHierarchyOperation(MutableTextInput mutableTextInput, ! Language language) { this.mutableTextInput = mutableTextInput; ! this.tokenList = new IncTokenList(this, mutableTextInput); init(); } ! public TokenHierarchyOperation(TokenHierarchyOperation liveTokenHierarchy) { this.liveTokenHierarchyOperation = liveTokenHierarchy; ! this.tokenList = new SnapshotTokenList(this); init(); } private void init() { assert (tokenHierarchy == null); tokenHierarchy = LexerApiPackageAccessor.get().createTokenHierarchy(this); + // Create listener list even for non-mutable hierarchies as there may be + // custom embeddings created that need to be notified + listenerList = new EventListenerList(); if (isMutable()) { snapshotRefs = new ArrayList(1); } } *************** *** 147,157 **** return tokenHierarchy; } ! public TokenList tokenList() { return tokenList; } ! public TokenList checkedTokenList() { checkSnapshotNotReleased(); return tokenList(); } --- 154,164 ---- return tokenHierarchy; } ! public TokenList tokenList() { return tokenList; } ! public TokenList checkedTokenList() { checkSnapshotNotReleased(); return tokenList(); } *************** *** 164,170 **** return mutableTextInput; } ! public Object mutableInputSource() { return isMutable() ? LexerSpiPackageAccessor.get().inputSource(mutableTextInput) : null; --- 171,177 ---- return mutableTextInput; } ! public I mutableInputSource() { return isMutable() ? LexerSpiPackageAccessor.get().inputSource(mutableTextInput) : null; *************** *** 194,235 **** } public void addTokenHierarchyListener(TokenHierarchyListener listener) { ! if (isMutable()) { ! listenerList.add(TokenHierarchyListener.class, listener); ! } } public void removeTokenHierarchyListener(TokenHierarchyListener listener) { ! if (isMutable()) { ! listenerList.remove(TokenHierarchyListener.class, listener); ! } } public void textModified(int offset, int removedLength, CharSequence removedText, int insertedLength) { ! TokenListChange change = new TokenListChange(this, TokenHierarchyEvent.Type.TEXT_MODIFY, offset, removedLength, removedText, insertedLength); if (active) { ! IncTokenList incTokenList = (IncTokenList)tokenList; incTokenList.incrementModCount(); ! TokenListUpdater.update(incTokenList, change); if (!incTokenList.isFullyLexed()) incTokenList.refreshLexerInputOperation(); synchronized (snapshotRefs) { for (int i = snapshotRefs.size() - 1; i >= 0; i--) { ! TokenHierarchyOperation op = snapshotRefs.get(i).get(); if (op != null) { ! ((SnapshotTokenList) op.tokenList()).update(change); } } } ! } else { // not active - no changes to hierarchy ! change.noChange(tokenList); ! } ! fireTokenHierarchyChanged(LexerApiPackageAccessor.get().createTokenChangeEvent(tokenHierarchy, change)); } public boolean isSnapshot() { return (liveTokenHierarchyOperation != null); } --- 201,246 ---- } public void addTokenHierarchyListener(TokenHierarchyListener listener) { ! listenerList.add(TokenHierarchyListener.class, listener); } public void removeTokenHierarchyListener(TokenHierarchyListener listener) { ! listenerList.remove(TokenHierarchyListener.class, listener); } public void textModified(int offset, int removedLength, CharSequence removedText, int insertedLength) { ! TokenHierarchyEventInfo eventInfo = new TokenHierarchyEventInfo( ! this, TokenHierarchyEventType.MODIFICATION, offset, removedLength, removedText, insertedLength); if (active) { ! IncTokenList incTokenList = (IncTokenList)tokenList; incTokenList.incrementModCount(); ! TokenListChange change = new TokenListChange(incTokenList); ! TokenListUpdater.update(incTokenList, eventInfo, change); if (!incTokenList.isFullyLexed()) incTokenList.refreshLexerInputOperation(); synchronized (snapshotRefs) { for (int i = snapshotRefs.size() - 1; i >= 0; i--) { ! TokenHierarchyOperation op = snapshotRefs.get(i).get(); if (op != null) { ! ((SnapshotTokenList)op.tokenList()).update(eventInfo, change); } } } ! eventInfo.setTokenChangeInfo(change.tokenChangeInfo()); ! eventInfo.setAffectedStartOffset(change.offset()); ! eventInfo.setAffectedEndOffset(change.addedEndOffset()); ! fireTokenHierarchyChanged( ! LexerApiPackageAccessor.get().createTokenChangeEvent(eventInfo)); ! } // not active - no changes fired } + public Set languagePaths() { + return languagePaths; // TBD + } + public boolean isSnapshot() { return (liveTokenHierarchyOperation != null); } *************** *** 252,258 **** public TokenHierarchy createSnapshot() { if (isMutable()) { ! TokenHierarchyOperation snapshot = new TokenHierarchyOperation(this); snapshotRefs.add(new SnapshotRef(snapshot)); return snapshot.tokenHierarchy(); } --- 263,269 ---- public TokenHierarchy createSnapshot() { if (isMutable()) { ! TokenHierarchyOperation snapshot = new TokenHierarchyOperation(this); snapshotRefs.add(new SnapshotRef(snapshot)); return snapshot.tokenHierarchy(); } *************** *** 280,286 **** } } ! void removeSnapshot(TokenHierarchyOperation snapshot) { synchronized (snapshotRefs) { for (int i = snapshotRefs.size() - 1; i >= 0; i--) { Reference ref = (Reference)snapshotRefs.get(i); --- 291,297 ---- } } ! void removeSnapshot(TokenHierarchyOperation snapshot) { synchronized (snapshotRefs) { for (int i = snapshotRefs.size() - 1; i >= 0; i--) { Reference ref = (Reference)snapshotRefs.get(i); *************** *** 311,325 **** return true; } ! public TokenHierarchyOperation liveTokenHierarchyOperation() { return liveTokenHierarchyOperation; } ! public int tokenOffset(AbstractToken token, TokenList tokenList, int rawOffset) { if (this.tokenList.getClass() == SnapshotTokenList.class) { if (tokenList != null) { ! return ((SnapshotTokenList)this.tokenList).tokenOffset(token, tokenList, rawOffset); ! } else { // passed tokenList is null => token removed from BranchTokenList return rawOffset; } } else { // not a snapshot - regular situation --- 322,338 ---- return true; } ! public TokenHierarchyOperation liveTokenHierarchyOperation() { return liveTokenHierarchyOperation; } ! public int tokenOffset(AbstractToken token, TokenList tokenList, int rawOffset) { if (this.tokenList.getClass() == SnapshotTokenList.class) { if (tokenList != null) { ! @SuppressWarnings("unchecked") ! SnapshotTokenList tlUC = (SnapshotTokenList)this.tokenList; ! return tlUC.tokenOffset(token, tokenList, rawOffset); ! } else { // passed tokenList is null => token removed from EmbeddedTokenList return rawOffset; } } else { // not a snapshot - regular situation *************** *** 338,346 **** } ! private final class SnapshotRef extends WeakReference> implements Runnable { ! SnapshotRef(TokenHierarchyOperation snapshot) { super(snapshot, org.openide.util.Utilities.activeReferenceQueue()); } --- 351,359 ---- } ! private final class SnapshotRef extends WeakReference> implements Runnable { ! SnapshotRef(TokenHierarchyOperation snapshot) { super(snapshot, org.openide.util.Utilities.activeReferenceQueue()); } Index: lexer/src/org/netbeans/lib/lexer/TokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/TokenList.java,v retrieving revision 1.2 diff -c -r1.2 TokenList.java *** lexer/src/org/netbeans/lib/lexer/TokenList.java 4 Oct 2006 17:03:18 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/TokenList.java 28 Nov 2006 14:20:16 -0000 *************** *** 43,49 **** * to store the token characters in multiple arrays * and to correctly compute the tokens' starting offsets. *

  • IncTokenList
  • - token list for mutable-input environment. ! *
  • BranchTokenList
  • - token list for language embedding * suitable for both batch and incremental environments. * * --- 43,49 ---- * to store the token characters in multiple arrays * and to correctly compute the tokens' starting offsets. *
  • IncTokenList
  • - token list for mutable-input environment. ! *
  • EmbeddedTokenList
  • - token list for a single language embedding * suitable for both batch and incremental environments. * * *************** *** 51,57 **** * @version 1.00 */ ! public interface TokenList { /** * Language path of this token list. --- 51,57 ---- * @version 1.00 */ ! public interface TokenList { /** * Language path of this token list. *************** *** 59,65 **** LanguagePath languagePath(); /** ! * Get token or {@link BranchTokenList} at given index in this list. *
    * The method's implementation may need to be synchronized as multiple * threads can access it at the same time. --- 59,65 ---- LanguagePath languagePath(); /** ! * Get token or {@link EmbeddingContainer} at given index in this list. *
    * The method's implementation may need to be synchronized as multiple * threads can access it at the same time. *************** *** 70,76 **** * @param >=0 index of the token in this list. * @return valid token or null if the index is too high. */ ! Object tokenOrBranch(int index); /** * Replace flyweight token at the given index with its non-flyweight copy. --- 70,76 ---- * @param >=0 index of the token in this list. * @return valid token or null if the index is too high. */ ! Object tokenOrEmbeddingContainer(int index); /** * Replace flyweight token at the given index with its non-flyweight copy. *************** *** 82,95 **** * @param offset >=0 absolute offset where the flyweight token resides. * @return non-flyweight token instance. */ ! AbstractToken createNonFlyToken(int index, ! AbstractToken flyToken, int offset); /** * Get absolute offset of the token at the given index in the token list. *
    * This method can only be called if the token at the given index ! * was already fetched by {@link tokenOrBranch(int)}. *
    * For branch token lists this method is only expected to be called * after {@link #updateStartOffsetShift()} was called so it does not perform --- 82,104 ---- * @param offset >=0 absolute offset where the flyweight token resides. * @return non-flyweight token instance. */ ! AbstractToken replaceFlyToken(int index, AbstractToken flyToken, int offset); ! ! /** ! * Wrap the token by a branch token list due to language embedding ! * that exists for the token. ! * ! * @param index existing index in this token list at which the token ! * should be wrapped with the embedding info. ! * @param embeddingContainer embedding info that should wrap the token. ! */ ! void wrapToken(int index, EmbeddingContainer embeddingContainer); /** * Get absolute offset of the token at the given index in the token list. *
    * This method can only be called if the token at the given index ! * was already fetched by {@link tokenOrEmbeddingContainer(int)}. *
    * For branch token lists this method is only expected to be called * after {@link #updateStartOffsetShift()} was called so it does not perform *************** *** 164,178 **** char childTokenCharAt(int rawOffset, int index); /** ! * Wrap the token by a branch token list due to language embedding ! * that exists for the token. */ ! void wrapToken(int index, BranchTokenList wrapper); /** ! * Get the root token list of the token list hierarchy. */ ! TokenList root(); /** * Extra attributes related to the input being lexed. --- 173,187 ---- char childTokenCharAt(int rawOffset, int index); /** ! * Get the root token list of the token list hierarchy. */ ! TokenList root(); /** ! * Get token hierarchy operation for this token list or null ! * if this token list does not have any token hierarchy. */ ! TokenHierarchyOperation tokenHierarchyOperation(); /** * Extra attributes related to the input being lexed. *************** *** 236,241 **** /** * Get set of token ids to be skipped during token creation. */ ! Set skipTokenIds(); } --- 245,250 ---- /** * Get set of token ids to be skipped during token creation. */ ! Set skipTokenIds(); } Index: lexer/src/org/netbeans/lib/lexer/batch/BatchTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/batch/BatchTokenList.java,v retrieving revision 1.5 diff -c -r1.5 BatchTokenList.java *** lexer/src/org/netbeans/lib/lexer/batch/BatchTokenList.java 7 Nov 2006 16:16:23 -0000 1.5 --- lexer/src/org/netbeans/lib/lexer/batch/BatchTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 23,29 **** import java.util.Set; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.LanguagePath; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LAState; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.LexerInputOperation; --- 23,29 ---- import java.util.Set; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.LanguagePath; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.LAState; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.LexerInputOperation; *************** *** 31,36 **** --- 31,37 ---- import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.token.AbstractToken; import org.netbeans.lib.lexer.token.TextToken; *************** *** 42,48 **** * @version 1.00 */ ! public abstract class BatchTokenList extends ArrayList implements TokenList { /** Flag for additional correctness checks (may degrade performance). */ private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); --- 43,50 ---- * @version 1.00 */ ! public abstract class BatchTokenList ! extends ArrayList implements TokenList { /** Flag for additional correctness checks (may degrade performance). */ private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test"); *************** *** 60,83 **** BatchTokenList.maintainLAState = maintainLAState; } private final LanguagePath languagePath; ! private final Set skipTokenIds; private final InputAttributes inputAttributes; /** * Lexer input used for lexing of the input. */ ! private LexerInputOperation lexerInputOperation; private LAState laState; private boolean inited; ! public BatchTokenList(Language language, ! Set skipTokenIds, InputAttributes inputAttributes) { this.languagePath = LanguagePath.get(language); this.skipTokenIds = skipTokenIds; this.inputAttributes = inputAttributes; --- 62,88 ---- BatchTokenList.maintainLAState = maintainLAState; } + private final TokenHierarchyOperation tokenHierarchyOperation; + private final LanguagePath languagePath; ! private final Set skipTokenIds; private final InputAttributes inputAttributes; /** * Lexer input used for lexing of the input. */ ! private LexerInputOperation lexerInputOperation; private LAState laState; private boolean inited; ! public BatchTokenList(TokenHierarchyOperation tokenHierarchyOperation, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! this.tokenHierarchyOperation = tokenHierarchyOperation; this.languagePath = LanguagePath.get(language); this.skipTokenIds = skipTokenIds; this.inputAttributes = inputAttributes; *************** *** 88,103 **** public abstract char childTokenCharAt(int rawOffset, int index); ! protected abstract LexerInputOperation createLexerInputOperation(); protected void init() { lexerInputOperation = createLexerInputOperation(); } ! public TokenList root() { return this; // this list should always be the root list of the token hierarchy } ! public LanguagePath languagePath() { return languagePath; } --- 93,112 ---- public abstract char childTokenCharAt(int rawOffset, int index); ! protected abstract LexerInputOperation createLexerInputOperation(); protected void init() { lexerInputOperation = createLexerInputOperation(); } ! public TokenList root() { return this; // this list should always be the root list of the token hierarchy } ! ! public TokenHierarchyOperation tokenHierarchyOperation() { ! return tokenHierarchyOperation; ! } ! public LanguagePath languagePath() { return languagePath; } *************** *** 108,114 **** inited = true; } if (lexerInputOperation != null) { // still lexing ! tokenOrBranchImpl(Integer.MAX_VALUE); } return size(); } --- 117,123 ---- inited = true; } if (lexerInputOperation != null) { // still lexing ! tokenOrEmbeddingContainerImpl(Integer.MAX_VALUE); } return size(); } *************** *** 123,129 **** } public int tokenOffset(int index) { ! Token token = existingToken(index); int offset; if (token.isFlyweight()) { offset = 0; --- 132,138 ---- } public int tokenOffset(int index) { ! Token token = existingToken(index); int offset; if (token.isFlyweight()) { offset = 0; *************** *** 141,157 **** return offset; } ! public synchronized Object tokenOrBranch(int index) { ! return tokenOrBranchImpl(index); } ! private Object tokenOrBranchImpl(int index) { if (!inited) { init(); inited = true; } while (lexerInputOperation != null && index >= size()) { ! Token token = lexerInputOperation.nextToken(); if (token != null) { // lexer returned valid token add(token); if (laState != null) { // maintaining lookaheads and states --- 150,166 ---- return offset; } ! public synchronized Object tokenOrEmbeddingContainer(int index) { ! return tokenOrEmbeddingContainerImpl(index); } ! private Object tokenOrEmbeddingContainerImpl(int index) { if (!inited) { init(); inited = true; } while (lexerInputOperation != null && index >= size()) { ! Token token = lexerInputOperation.nextToken(); if (token != null) { // lexer returned valid token add(token); if (laState != null) { // maintaining lookaheads and states *************** *** 166,182 **** return (index < size()) ? get(index) : null; } ! private Token existingToken(int index) { return LexerUtilsConstants.token(get(index)); } - public synchronized AbstractToken createNonFlyToken( - int index, AbstractToken flyToken, int offset) { - TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset); - set(index, nonFlyToken); - return nonFlyToken; - } - public int lookahead(int index) { return (laState != null) ? laState.lookahead(index) : -1; } --- 175,184 ---- return (index < size()) ? get(index) : null; } ! private Token existingToken(int index) { return LexerUtilsConstants.token(get(index)); } public int lookahead(int index) { return (laState != null) ? laState.lookahead(index) : -1; } *************** *** 189,196 **** return -1; // immutable input } ! public void wrapToken(int index, BranchTokenList wrapper) { ! set(index, wrapper); } public InputAttributes inputAttributes() { --- 191,205 ---- return -1; // immutable input } ! public synchronized AbstractToken replaceFlyToken( ! int index, AbstractToken flyToken, int offset) { ! TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset); ! set(index, nonFlyToken); ! return nonFlyToken; ! } ! ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { ! set(index, embeddingContainer); } public InputAttributes inputAttributes() { *************** *** 201,207 **** return (skipTokenIds == null); } ! public Set skipTokenIds() { return skipTokenIds; } --- 210,216 ---- return (skipTokenIds == null); } ! public Set skipTokenIds() { return skipTokenIds; } Index: lexer/src/org/netbeans/lib/lexer/batch/CopyTextTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/batch/CopyTextTokenList.java,v retrieving revision 1.4 diff -c -r1.4 CopyTextTokenList.java *** lexer/src/org/netbeans/lib/lexer/batch/CopyTextTokenList.java 26 Oct 2006 20:45:24 -0000 1.4 --- lexer/src/org/netbeans/lib/lexer/batch/CopyTextTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 25,30 **** --- 25,31 ---- import org.netbeans.lib.lexer.LexerInputOperation; import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.TokenHierarchyOperation; /** *************** *** 36,55 **** * @version 1.00 */ ! public final class CopyTextTokenList extends BatchTokenList { /** Either reader or char sequence */ private final Object input; ! public CopyTextTokenList(Reader inputReader, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! super(language, skipTokenIds, inputAttributes); this.input = inputReader; } ! public CopyTextTokenList(CharSequence inputText, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! super(language, skipTokenIds, inputAttributes); this.input = inputText; } --- 37,56 ---- * @version 1.00 */ ! public final class CopyTextTokenList extends BatchTokenList { /** Either reader or char sequence */ private final Object input; ! public CopyTextTokenList(TokenHierarchyOperation tokenHierarchyOperation, Reader inputReader, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! super(tokenHierarchyOperation, language, skipTokenIds, inputAttributes); this.input = inputReader; } ! public CopyTextTokenList(TokenHierarchyOperation tokenHierarchyOperation, CharSequence inputText, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! super(tokenHierarchyOperation, language, skipTokenIds, inputAttributes); this.input = inputText; } *************** *** 69,78 **** throw new IllegalStateException("Should never be called"); // NOI18N } ! protected LexerInputOperation createLexerInputOperation() { return (input instanceof Reader) ! ? new SkimLexerInputOperation(this, (Reader)input) ! : new SkimLexerInputOperation(this, (CharSequence)input); } } --- 70,79 ---- throw new IllegalStateException("Should never be called"); // NOI18N } ! protected LexerInputOperation createLexerInputOperation() { return (input instanceof Reader) ! ? new SkimLexerInputOperation(this, (Reader)input) ! : new SkimLexerInputOperation(this, (CharSequence)input); } } Index: lexer/src/org/netbeans/lib/lexer/batch/SkimLexerInputOperation.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/batch/SkimLexerInputOperation.java,v retrieving revision 1.2 diff -c -r1.2 SkimLexerInputOperation.java *** lexer/src/org/netbeans/lib/lexer/batch/SkimLexerInputOperation.java 4 Oct 2006 17:03:19 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/batch/SkimLexerInputOperation.java 28 Nov 2006 14:20:17 -0000 *************** *** 21,30 **** import java.io.IOException; import java.io.Reader; - import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.lexer.LexerInputOperation; - import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; import org.netbeans.spi.lexer.LexerInput; --- 21,28 ---- *************** *** 36,42 **** * @version 1.00 */ ! public final class SkimLexerInputOperation extends LexerInputOperation { private static final char[] EMPTY_CHAR_ARRAY = new char[0]; --- 34,40 ---- * @version 1.00 */ ! public final class SkimLexerInputOperation extends LexerInputOperation { private static final char[] EMPTY_CHAR_ARRAY = new char[0]; *************** *** 99,105 **** /** * Actual token cluster where the tokens are being placed. */ ! private SkimTokenList cluster; private int clusterTextEndIndex; --- 97,103 ---- /** * Actual token cluster where the tokens are being placed. */ ! private SkimTokenList cluster; private int clusterTextEndIndex; *************** *** 118,130 **** */ private int offsetShift; ! public SkimLexerInputOperation(TokenList tokenList, Reader reader) { super(tokenList, 0, null); this.reader = reader; this.readCharArray = new char[DEFAULT_READ_CHAR_ARRAY_SIZE]; } ! public SkimLexerInputOperation(TokenList tokenList, CharSequence readCharSequence) { super(tokenList, 0, null); this.readCharSequence = readCharSequence; this.readEndIndex = readCharSequence.length(); --- 116,128 ---- */ private int offsetShift; ! public SkimLexerInputOperation(TokenList tokenList, Reader reader) { super(tokenList, 0, null); this.reader = reader; this.readCharArray = new char[DEFAULT_READ_CHAR_ARRAY_SIZE]; } ! public SkimLexerInputOperation(TokenList tokenList, CharSequence readCharSequence) { super(tokenList, 0, null); this.readCharSequence = readCharSequence; this.readEndIndex = readCharSequence.length(); *************** *** 157,163 **** : readCharSequence.charAt(index); } ! public void approveToken(AbstractToken token) { int tokenLength = token.length(); if (isSkipToken(token)) { preventFlyToken(); --- 155,161 ---- : readCharSequence.charAt(index); } ! public void approveToken(AbstractToken token) { int tokenLength = token.length(); if (isSkipToken(token)) { preventFlyToken(); *************** *** 183,189 **** clusterSize = tokenLength; } defaultClusterSize = clusterSize; ! cluster = new SkimTokenList((CopyTextTokenList)tokenList(), clusterStartOffset, new char[clusterSize]); } --- 181,187 ---- clusterSize = tokenLength; } defaultClusterSize = clusterSize; ! cluster = new SkimTokenList((CopyTextTokenList)tokenList(), clusterStartOffset, new char[clusterSize]); } Index: lexer/src/org/netbeans/lib/lexer/batch/SkimTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/batch/SkimTokenList.java,v retrieving revision 1.2 diff -c -r1.2 SkimTokenList.java *** lexer/src/org/netbeans/lib/lexer/batch/SkimTokenList.java 4 Oct 2006 17:03:19 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/batch/SkimTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 21,30 **** import java.util.Set; import org.netbeans.api.lexer.LanguagePath; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.TokenList; import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.lexer.token.AbstractToken; /** --- 21,31 ---- import java.util.Set; import org.netbeans.api.lexer.LanguagePath; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.TokenList; import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.token.AbstractToken; /** *************** *** 42,63 **** * @version 1.00 */ ! public final class SkimTokenList implements TokenList { ! private CopyTextTokenList tokenList; private int startOffset; private char[] text; ! public SkimTokenList(CopyTextTokenList tokenList, int startOffset, char[] text) { this.tokenList = tokenList; this.startOffset = startOffset; this.text = text; } ! public CopyTextTokenList getTokenList() { return tokenList; } --- 43,64 ---- * @version 1.00 */ ! public final class SkimTokenList implements TokenList { ! private CopyTextTokenList tokenList; private int startOffset; private char[] text; ! public SkimTokenList(CopyTextTokenList tokenList, int startOffset, char[] text) { this.tokenList = tokenList; this.startOffset = startOffset; this.text = text; } ! public CopyTextTokenList getTokenList() { return tokenList; } *************** *** 86,98 **** return 0; } ! public Object tokenOrBranch(int index) { ! return tokenList.tokenOrBranch(index); } ! public AbstractToken createNonFlyToken( int index, AbstractToken flyToken, int offset) { ! return tokenList.createNonFlyToken(index, flyToken, offset); } --- 87,99 ---- return 0; } ! public Object tokenOrEmbeddingContainer(int index) { ! return tokenList.tokenOrEmbeddingContainer(index); } ! public AbstractToken replaceFlyToken( int index, AbstractToken flyToken, int offset) { ! return tokenList.replaceFlyToken(index, flyToken, offset); } *************** *** 116,131 **** return tokenList.tokenCountCurrent(); } ! public TokenList root() { return tokenList.root(); } public LanguagePath languagePath() { return tokenList.languagePath(); } ! public void wrapToken(int index, BranchTokenList wrapper) { ! tokenList.wrapToken(index, wrapper); } public InputAttributes inputAttributes() { --- 117,136 ---- return tokenList.tokenCountCurrent(); } ! public TokenList root() { return tokenList.root(); } + public TokenHierarchyOperation tokenHierarchyOperation() { + return tokenList.tokenHierarchyOperation(); + } + public LanguagePath languagePath() { return tokenList.languagePath(); } ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { ! tokenList.wrapToken(index, embeddingContainer); } public InputAttributes inputAttributes() { *************** *** 136,142 **** return tokenList.isContinuous(); } ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } --- 141,147 ---- return tokenList.isContinuous(); } ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } Index: lexer/src/org/netbeans/lib/lexer/batch/TextTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/batch/TextTokenList.java,v retrieving revision 1.4 diff -c -r1.4 TextTokenList.java *** lexer/src/org/netbeans/lib/lexer/batch/TextTokenList.java 26 Oct 2006 20:45:24 -0000 1.4 --- lexer/src/org/netbeans/lib/lexer/batch/TextTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 25,30 **** --- 25,31 ---- import org.netbeans.lib.lexer.TextLexerInputOperation; import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.TokenHierarchyOperation; /** *************** *** 34,46 **** * @version 1.00 */ ! public final class TextTokenList extends BatchTokenList { private CharSequence inputText; ! public TextTokenList(CharSequence inputText, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! super(language, skipTokenIds, inputAttributes); this.inputText = inputText; } --- 35,47 ---- * @version 1.00 */ ! public final class TextTokenList extends BatchTokenList { private CharSequence inputText; ! public TextTokenList(TokenHierarchyOperation tokenHierarchyOperation, CharSequence inputText, ! Language language, Set skipTokenIds, InputAttributes inputAttributes) { ! super(tokenHierarchyOperation, language, skipTokenIds, inputAttributes); this.inputText = inputText; } *************** *** 48,55 **** return inputText.charAt(rawOffset + index); // rawOffset is absolute } ! protected LexerInputOperation createLexerInputOperation() { ! return new TextLexerInputOperation(this, inputText); } } --- 49,56 ---- return inputText.charAt(rawOffset + index); // rawOffset is absolute } ! protected LexerInputOperation createLexerInputOperation() { ! return new TextLexerInputOperation(this, inputText); } } Index: lexer/src/org/netbeans/lib/lexer/inc/FilterSnapshotTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/FilterSnapshotTokenList.java,v retrieving revision 1.2 diff -c -r1.2 FilterSnapshotTokenList.java *** lexer/src/org/netbeans/lib/lexer/inc/FilterSnapshotTokenList.java 4 Oct 2006 17:03:19 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/inc/FilterSnapshotTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 23,30 **** import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.LanguagePath; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.BranchTokenList; ! import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; --- 23,30 ---- import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.LanguagePath; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.EmbeddingContainer; ! import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; *************** *** 53,62 **** * @version 1.00 */ ! public final class FilterSnapshotTokenList implements TokenList { /** Original token list. */ ! private TokenList tokenList; /** * Difference of the offsets retrieved from tokenList.offset(index) --- 53,62 ---- * @version 1.00 */ ! public final class FilterSnapshotTokenList implements TokenList { /** Original token list. */ ! private TokenList tokenList; /** * Difference of the offsets retrieved from tokenList.offset(index) *************** *** 64,70 **** */ private int tokenOffsetDiff; ! public FilterSnapshotTokenList(TokenList tokenList, int tokenOffsetDiff) { this.tokenList = tokenList; this.tokenOffsetDiff = tokenOffsetDiff; } --- 64,70 ---- */ private int tokenOffsetDiff; ! public FilterSnapshotTokenList(TokenList tokenList, int tokenOffsetDiff) { this.tokenList = tokenList; this.tokenOffsetDiff = tokenOffsetDiff; } *************** *** 77,88 **** return tokenOffsetDiff; } ! public Object tokenOrBranch(int index) { ! return tokenList.tokenOrBranch(index); } ! public AbstractToken createNonFlyToken(int index, AbstractToken flyToken, int offset) { ! return tokenList.createNonFlyToken(index, flyToken, offset); } public int tokenOffset(int index) { --- 77,88 ---- return tokenOffsetDiff; } ! public Object tokenOrEmbeddingContainer(int index) { ! return tokenList.tokenOrEmbeddingContainer(index); } ! public AbstractToken replaceFlyToken(int index, AbstractToken flyToken, int offset) { ! return tokenList.replaceFlyToken(index, flyToken, offset); } public int tokenOffset(int index) { *************** *** 113,126 **** throw new IllegalStateException("Unexpected call."); } ! public void wrapToken(int index, BranchTokenList wrapper) { ! tokenList.wrapToken(index, wrapper); } ! public TokenList root() { ! throw new IllegalStateException("Unexpected call."); } ! public InputAttributes inputAttributes() { return tokenList.inputAttributes(); } --- 113,130 ---- throw new IllegalStateException("Unexpected call."); } ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { ! tokenList.wrapToken(index, embeddingContainer); } ! public TokenList root() { ! return tokenList.root(); } ! ! public TokenHierarchyOperation tokenHierarchyOperation() { ! return tokenList.tokenHierarchyOperation(); ! } ! public InputAttributes inputAttributes() { return tokenList.inputAttributes(); } *************** *** 138,144 **** return tokenList.isContinuous(); } ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } --- 142,148 ---- return tokenList.isContinuous(); } ! public Set skipTokenIds() { return tokenList.skipTokenIds(); } Index: lexer/src/org/netbeans/lib/lexer/inc/IncTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/IncTokenList.java,v retrieving revision 1.2 diff -c -r1.2 IncTokenList.java *** lexer/src/org/netbeans/lib/lexer/inc/IncTokenList.java 4 Oct 2006 17:03:19 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/inc/IncTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 27,38 **** import org.netbeans.lib.lexer.TextLexerInputOperation; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.editor.util.FlyOffsetGapList; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LexerInputOperation; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.spi.lexer.MutableTextInput; import org.netbeans.lib.lexer.token.AbstractToken; import org.netbeans.lib.lexer.token.TextToken; --- 27,39 ---- import org.netbeans.lib.lexer.TextLexerInputOperation; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.editor.util.FlyOffsetGapList; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.LexerInputOperation; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.spi.lexer.MutableTextInput; import org.netbeans.lib.lexer.token.AbstractToken; import org.netbeans.lib.lexer.token.TextToken; *************** *** 55,63 **** * @version 1.00 */ ! public final class IncTokenList extends FlyOffsetGapList implements MutableTokenList { ! private final MutableTextInput mutableTextInput; private final LanguagePath languagePath; --- 56,67 ---- * @version 1.00 */ ! public final class IncTokenList ! extends FlyOffsetGapList implements MutableTokenList { ! ! private final TokenHierarchyOperation tokenHierarchyOperation; ! private final MutableTextInput mutableTextInput; private final LanguagePath languagePath; *************** *** 68,74 **** /** * Lexer input operation used for lexing of the input. */ ! private LexerInputOperation lexerInputOperation; private boolean inited; --- 72,78 ---- /** * Lexer input operation used for lexing of the input. */ ! private LexerInputOperation lexerInputOperation; private boolean inited; *************** *** 77,83 **** private LAState laState; ! public IncTokenList(MutableTextInput mutableTextInput) { this.mutableTextInput = mutableTextInput; this.languagePath = LanguagePath.get( LexerSpiPackageAccessor.get().language(mutableTextInput)); --- 81,89 ---- private LAState laState; ! public IncTokenList(TokenHierarchyOperation tokenHierarchyOperation, ! MutableTextInput mutableTextInput) { ! this.tokenHierarchyOperation = tokenHierarchyOperation; this.mutableTextInput = mutableTextInput; this.languagePath = LanguagePath.get( LexerSpiPackageAccessor.get().language(mutableTextInput)); *************** *** 87,93 **** } private void init() { ! lexerInputOperation = new TextLexerInputOperation(this, text); } public LanguagePath languagePath() { --- 93,99 ---- } private void init() { ! lexerInputOperation = new TextLexerInputOperation(this, text); } public LanguagePath languagePath() { *************** *** 100,106 **** inited = true; } if (lexerInputOperation != null) { // still lexing ! tokenOrBranchImpl(Integer.MAX_VALUE); } return size(); } --- 106,112 ---- inited = true; } if (lexerInputOperation != null) { // still lexing ! tokenOrEmbeddingContainerImpl(Integer.MAX_VALUE); } return size(); } *************** *** 131,141 **** rootModCount++; } ! public synchronized Object tokenOrBranch(int index) { ! return tokenOrBranchImpl(index); } ! private Object tokenOrBranchImpl(int index) { if (!inited) { init(); inited = true; --- 137,147 ---- rootModCount++; } ! public synchronized Object tokenOrEmbeddingContainer(int index) { ! return tokenOrEmbeddingContainerImpl(index); } ! private Object tokenOrEmbeddingContainerImpl(int index) { if (!inited) { init(); inited = true; *************** *** 156,170 **** return (index < size()) ? get(index) : null; } ! public synchronized AbstractToken createNonFlyToken( int index, AbstractToken flyToken, int offset) { TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset2Raw(offset)); set(index, nonFlyToken); return nonFlyToken; } ! public synchronized void wrapToken(int index, BranchTokenList wrapper) { ! set(index, wrapper); } public InputAttributes inputAttributes() { --- 162,176 ---- return (index < size()) ? get(index) : null; } ! public synchronized AbstractToken replaceFlyToken( int index, AbstractToken flyToken, int offset) { TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset2Raw(offset)); set(index, nonFlyToken); return nonFlyToken; } ! public synchronized void wrapToken(int index, EmbeddingContainer embeddingContainer) { ! set(index, embeddingContainer); } public InputAttributes inputAttributes() { *************** *** 181,187 **** protected boolean isElementFlyweight(Object elem) { // token wrapper always contains non-flyweight token ! return (elem.getClass() != BranchTokenList.class) && ((AbstractToken)elem).isFlyweight(); } --- 187,193 ---- protected boolean isElementFlyweight(Object elem) { // token wrapper always contains non-flyweight token ! return (elem.getClass() != EmbeddingContainer.class) && ((AbstractToken)elem).isFlyweight(); } *************** *** 189,201 **** return LexerUtilsConstants.token(elem).length(); } ! private AbstractToken existingToken(int index) { ! // Must use synced tokenOrBranch() because of possible change // of the underlying list impl when adding lazily requested tokens ! return LexerUtilsConstants.token(tokenOrBranch(index)); } ! public Object tokenOrBranchUnsync(int index) { // Solely for token list updater or token hierarchy snapshots // having single-threaded exclusive write access return get(index); --- 195,207 ---- return LexerUtilsConstants.token(elem).length(); } ! private AbstractToken existingToken(int index) { ! // Must use synced tokenOrEmbeddingContainer() because of possible change // of the underlying list impl when adding lazily requested tokens ! return LexerUtilsConstants.token(tokenOrEmbeddingContainer(index)); } ! public Object tokenOrEmbeddingContainerUnsync(int index) { // Solely for token list updater or token hierarchy snapshots // having single-threaded exclusive write access return get(index); *************** *** 213,226 **** return size(); } ! public TokenList root() { return this; } ! public LexerInputOperation createLexerInputOperation( int tokenIndex, int relexOffset, Object relexState) { // Used for mutable lists only so maintain LA and state ! return new TextLexerInputOperation(this, tokenIndex, relexState, text, 0, relexOffset, text.length()); } --- 219,236 ---- return size(); } ! public TokenList root() { return this; } ! public TokenHierarchyOperation tokenHierarchyOperation() { ! return tokenHierarchyOperation; ! } ! ! public LexerInputOperation createLexerInputOperation( int tokenIndex, int relexOffset, Object relexState) { // Used for mutable lists only so maintain LA and state ! return new TextLexerInputOperation(this, tokenIndex, relexState, text, 0, relexOffset, text.length()); } *************** *** 228,242 **** return inited && (lexerInputOperation == null); } ! public void replaceTokens(TokenListChange change, int removeTokenCount) { ! int index = change.tokenIndex(); // Remove obsolete tokens (original offsets are retained) Object[] removedTokensOrBranches = new Object[removeTokenCount]; copyElements(index, index + removeTokenCount, removedTokensOrBranches, 0); ! int offset = change.modifiedTokensStartOffset(); for (int i = 0; i < removeTokenCount; i++) { ! Object tokenOrBranch = removedTokensOrBranches[i]; ! AbstractToken token = LexerUtilsConstants.token(tokenOrBranch); if (!token.isFlyweight()) { updateElementOffsetRemove(token); token.setTokenList(null); --- 238,253 ---- return inited && (lexerInputOperation == null); } ! public void replaceTokens(TokenHierarchyEventInfo eventInfo, ! TokenListChange change, int removeTokenCount) { ! int index = change.index(); // Remove obsolete tokens (original offsets are retained) Object[] removedTokensOrBranches = new Object[removeTokenCount]; copyElements(index, index + removeTokenCount, removedTokensOrBranches, 0); ! int offset = change.offset(); for (int i = 0; i < removeTokenCount; i++) { ! Object tokenOrEmbeddingContainer = removedTokensOrBranches[i]; ! AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); if (!token.isFlyweight()) { updateElementOffsetRemove(token); token.setTokenList(null); *************** *** 245,255 **** } remove(index, removeTokenCount); // Retain original offsets laState.remove(index, removeTokenCount); // Remove lookaheads and states ! change.initRemovedTokenList(removedTokensOrBranches); ! change.setRemovedTokensEndOffset(offset); // Move and fix the gap according to the performed modification. ! int diffLength = change.insertedLength() - change.removedLength(); if (offsetGapStart() != change.offset()) { // Minimum of the index of the first removed index and original computed index moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex())); --- 256,266 ---- } remove(index, removeTokenCount); // Retain original offsets laState.remove(index, removeTokenCount); // Remove lookaheads and states ! change.setRemovedTokens(removedTokensOrBranches); ! change.setRemovedEndOffset(offset); // Move and fix the gap according to the performed modification. ! int diffLength = eventInfo.insertedLength() - eventInfo.removedLength(); if (offsetGapStart() != change.offset()) { // Minimum of the index of the first removed index and original computed index moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex())); *************** *** 257,272 **** updateOffsetGapLength(-diffLength); // Add created tokens. ! List addedTokens = change.addedTokens(); if (addedTokens != null) { for (int i = 0; i < addedTokens.size(); i++) { ! AbstractToken token = (AbstractToken)addedTokens.get(i); updateElementOffsetAdd(token); } addAll(index, addedTokens); laState = laState.addAll(index, change.laState()); ! change.setAddedTokenCount(addedTokens.size()); ! change.clearAddedTokens(); } } --- 268,282 ---- updateOffsetGapLength(-diffLength); // Add created tokens. ! List> addedTokens = change.addedTokens(); if (addedTokens != null) { for (int i = 0; i < addedTokens.size(); i++) { ! AbstractToken token = addedTokens.get(i); updateElementOffsetAdd(token); } addAll(index, addedTokens); laState = laState.addAll(index, change.laState()); ! change.syncAddedTokenCount(); } } *************** *** 286,292 **** return true; } ! public Set skipTokenIds() { return null; } --- 296,302 ---- return true; } ! public Set skipTokenIds() { return null; } Index: lexer/src/org/netbeans/lib/lexer/inc/MutableTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/MutableTokenList.java,v retrieving revision 1.2 diff -c -r1.2 MutableTokenList.java *** lexer/src/org/netbeans/lib/lexer/inc/MutableTokenList.java 4 Oct 2006 17:03:20 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/inc/MutableTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 19,24 **** --- 19,25 ---- package org.netbeans.lib.lexer.inc; + import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.lexer.LexerInputOperation; import org.netbeans.lib.lexer.TokenList; *************** *** 29,35 **** * @version 1.00 */ ! public interface MutableTokenList extends TokenList { /** * Return token or branch token list at the requested index --- 30,36 ---- * @version 1.00 */ ! public interface MutableTokenList extends TokenList { /** * Return token or branch token list at the requested index *************** *** 38,49 **** * Also do not perform any checks regarding index validity * - only items below {@link #tokenCountCurrent()} will be requested. */ ! Object tokenOrBranchUnsync(int index); /** * Create lexer input operation used for relexing of the input. */ ! LexerInputOperation createLexerInputOperation( int tokenIndex, int relexOffset, Object relexState); /** --- 39,50 ---- * Also do not perform any checks regarding index validity * - only items below {@link #tokenCountCurrent()} will be requested. */ ! Object tokenOrEmbeddingContainerUnsync(int index); /** * Create lexer input operation used for relexing of the input. */ ! LexerInputOperation createLexerInputOperation( int tokenIndex, int relexOffset, Object relexState); /** *************** *** 57,62 **** /** * Update the token list by replacing tokens according to the given change. */ ! void replaceTokens(TokenListChange change, int removeTokenCount); } --- 58,64 ---- /** * Update the token list by replacing tokens according to the given change. */ ! void replaceTokens(TokenHierarchyEventInfo eventInfo, ! TokenListChange change, int removeTokenCount); } Index: lexer/src/org/netbeans/lib/lexer/inc/RemovedTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/RemovedTokenList.java,v retrieving revision 1.3 diff -c -r1.3 RemovedTokenList.java *** lexer/src/org/netbeans/lib/lexer/inc/RemovedTokenList.java 11 Oct 2006 14:30:12 -0000 1.3 --- lexer/src/org/netbeans/lib/lexer/inc/RemovedTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 24,31 **** import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; import org.netbeans.lib.lexer.token.TextToken; --- 24,32 ---- import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.LexerUtilsConstants; + import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; import org.netbeans.lib.lexer.token.TextToken; *************** *** 37,60 **** * @version 1.00 */ ! public final class RemovedTokenList implements TokenList { ! private final TokenListChange change; private Object[] tokensOrBranches; private int removedTokensStartOffset; ! public RemovedTokenList(TokenListChange change, Object[] tokensOrBranches) { ! this.change = change; this.tokensOrBranches = tokensOrBranches; } public LanguagePath languagePath() { ! return change.languagePath(); } ! public Object tokenOrBranch(int index) { return (index < tokensOrBranches.length) ? tokensOrBranches[index] : null; } --- 38,61 ---- * @version 1.00 */ ! public final class RemovedTokenList implements TokenList { ! private final LanguagePath languagePath; private Object[] tokensOrBranches; private int removedTokensStartOffset; ! public RemovedTokenList(LanguagePath languagePath, Object[] tokensOrBranches) { ! this.languagePath = languagePath; this.tokensOrBranches = tokensOrBranches; } public LanguagePath languagePath() { ! return languagePath; } ! public Object tokenOrEmbeddingContainer(int index) { return (index < tokensOrBranches.length) ? tokensOrBranches[index] : null; } *************** *** 86,96 **** } } ! private Token existingToken(int index) { return LexerUtilsConstants.token(tokensOrBranches[index]); } ! public synchronized AbstractToken createNonFlyToken( int index, AbstractToken flyToken, int offset) { TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset); tokensOrBranches[index] = nonFlyToken; --- 87,97 ---- } } ! private Token existingToken(int index) { return LexerUtilsConstants.token(tokensOrBranches[index]); } ! public synchronized AbstractToken replaceFlyToken( int index, AbstractToken flyToken, int offset) { TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset); tokensOrBranches[index] = nonFlyToken; *************** *** 118,131 **** throw new IllegalStateException("Querying of text for removed tokens not supported"); // NOI18N } ! public void wrapToken(int index, BranchTokenList wrapper) { throw new IllegalStateException("Branching of removed tokens not supported"); // NOI18N } ! public TokenList root() { return this; } public InputAttributes inputAttributes() { return null; } --- 119,136 ---- throw new IllegalStateException("Querying of text for removed tokens not supported"); // NOI18N } ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { throw new IllegalStateException("Branching of removed tokens not supported"); // NOI18N } ! public TokenList root() { return this; } + public TokenHierarchyOperation tokenHierarchyOperation() { + return null; + } + public InputAttributes inputAttributes() { return null; } *************** *** 134,140 **** return true; } ! public Set skipTokenIds() { return null; } --- 139,145 ---- return true; } ! public Set skipTokenIds() { return null; } Index: lexer/src/org/netbeans/lib/lexer/inc/SnapshotTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/SnapshotTokenList.java,v retrieving revision 1.3 diff -c -r1.3 SnapshotTokenList.java *** lexer/src/org/netbeans/lib/lexer/inc/SnapshotTokenList.java 11 Oct 2006 14:30:12 -0000 1.3 --- lexer/src/org/netbeans/lib/lexer/inc/SnapshotTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 25,31 **** import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CompactMap; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.TokenList; --- 25,32 ---- import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CompactMap; ! import org.netbeans.lib.lexer.EmbeddedTokenList; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.TokenList; *************** *** 40,51 **** * @version 1.00 */ ! public final class SnapshotTokenList implements TokenList { /** Due to debugging purposes - dumpInfo() use. */ ! private TokenHierarchyOperation snapshot; ! private IncTokenList liveTokenList; private int liveTokenGapStart = -1; --- 41,52 ---- * @version 1.00 */ ! public final class SnapshotTokenList implements TokenList { /** Due to debugging purposes - dumpInfo() use. */ ! private TokenHierarchyOperation snapshot; ! private IncTokenList liveTokenList; private int liveTokenGapStart = -1; *************** *** 70,76 **** private int origTokenCount; /** Overrides of tokens' offset. */ ! private CompactMap token2offset; public int liveTokenGapStart() { return liveTokenGapStart; --- 71,77 ---- private int origTokenCount; /** Overrides of tokens' offset. */ ! private CompactMap, Token2OffsetEntry> token2offset; public int liveTokenGapStart() { return liveTokenGapStart; *************** *** 80,93 **** return liveTokenGapEnd; } ! public SnapshotTokenList(TokenHierarchyOperation snapshot) { this.snapshot = snapshot; ! this.liveTokenList = (IncTokenList)snapshot. liveTokenHierarchyOperation().tokenList(); ! token2offset = new CompactMap(); } ! public TokenHierarchyOperation snapshot() { return snapshot; } --- 81,94 ---- return liveTokenGapEnd; } ! public SnapshotTokenList(TokenHierarchyOperation snapshot) { this.snapshot = snapshot; ! this.liveTokenList = (IncTokenList)snapshot. liveTokenHierarchyOperation().tokenList(); ! token2offset = new CompactMap,Token2OffsetEntry>(); } ! public TokenHierarchyOperation snapshot() { return snapshot; } *************** *** 95,109 **** return liveTokenList.languagePath(); } ! public Object tokenOrBranch(int index) { if (liveTokenGapStart == -1 || index < liveTokenGapStart) { ! return liveTokenList.tokenOrBranch(index); } index -= liveTokenGapStart; if (index < origTokenCount) { return origTokensOrBranches[origTokenStartIndex + index]; } ! return liveTokenList.tokenOrBranch(liveTokenGapEnd + index - origTokenCount); } public int lookahead(int index) { --- 96,110 ---- return liveTokenList.languagePath(); } ! public Object tokenOrEmbeddingContainer(int index) { if (liveTokenGapStart == -1 || index < liveTokenGapStart) { ! return liveTokenList.tokenOrEmbeddingContainer(index); } index -= liveTokenGapStart; if (index < origTokenCount) { return origTokensOrBranches[origTokenStartIndex + index]; } ! return liveTokenList.tokenOrEmbeddingContainer(liveTokenGapEnd + index - origTokenCount); } public int lookahead(int index) { *************** *** 128,141 **** } index -= origTokenCount; ! AbstractToken token = LexerUtilsConstants.token(liveTokenList. ! tokenOrBranchUnsync(liveTokenGapEnd + index)); int offset; if (token.isFlyweight()) { offset = token.length(); while (--index >= 0) { token = LexerUtilsConstants.token(liveTokenList. ! tokenOrBranchUnsync(liveTokenGapEnd + index)); if (token.isFlyweight()) { offset += token.length(); } else { // non-flyweight element --- 129,142 ---- } index -= origTokenCount; ! AbstractToken token = LexerUtilsConstants.token(liveTokenList. ! tokenOrEmbeddingContainerUnsync(liveTokenGapEnd + index)); int offset; if (token.isFlyweight()) { offset = token.length(); while (--index >= 0) { token = LexerUtilsConstants.token(liveTokenList. ! tokenOrEmbeddingContainerUnsync(liveTokenGapEnd + index)); if (token.isFlyweight()) { offset += token.length(); } else { // non-flyweight element *************** *** 157,168 **** } /** ! * @param token non-null token for whicht the offset is being computed. * @param tokenList non-null token list to which the token belongs. * @param rawOffset raw offset of the token. * @return offset for the particular token. */ ! public int tokenOffset(AbstractToken token, TokenList tokenList, int rawOffset) { // The following situations can happen: // 1. Token instance is contained in token2offset map so the token's // offset is overriden by the information in the map. --- 158,170 ---- } /** ! * @param token non-null token for which the offset is being computed. * @param tokenList non-null token list to which the token belongs. * @param rawOffset raw offset of the token. * @return offset for the particular token. */ ! public int tokenOffset( ! AbstractToken token, TokenList tokenList, int rawOffset) { // The following situations can happen: // 1. Token instance is contained in token2offset map so the token's // offset is overriden by the information in the map. *************** *** 179,193 **** // 4. Token from branch token list is passed. // In this case the offset of the corresponding rootBranchToken // needs to be corrected if necessary. ! if (tokenList.getClass() == BranchTokenList.class) { ! BranchTokenList branchTokenList = (BranchTokenList)tokenList; ! AbstractToken rootBranchToken = branchTokenList.rootBranchToken(); ! Token2OffsetEntry entry = (Token2OffsetEntry)token2offset.get(rootBranchToken); if (entry != null) { ! return entry.offset() + branchTokenList.childTokenOffsetShift(rawOffset); } else { // no special entry => check whether the regular offset is below liveTokenGapStartOffset ! int offset = branchTokenList.childTokenOffset(rawOffset); ! TokenList rootTokenList = branchTokenList.root(); if (rootTokenList != null && rootTokenList.getClass() == IncTokenList.class) { if (offset >= liveTokenGapStartOffset) { offset += liveTokenOffsetDiff; --- 181,195 ---- // 4. Token from branch token list is passed. // In this case the offset of the corresponding rootBranchToken // needs to be corrected if necessary. ! if (tokenList.getClass() == EmbeddedTokenList.class) { ! EmbeddedTokenList etl = (EmbeddedTokenList)tokenList; ! AbstractToken rootBranchToken = etl.rootToken(); ! Token2OffsetEntry entry = token2offset.get(rootBranchToken); if (entry != null) { ! return entry.offset() + etl.childTokenOffsetShift(rawOffset); } else { // no special entry => check whether the regular offset is below liveTokenGapStartOffset ! int offset = etl.childTokenOffset(rawOffset); ! TokenList rootTokenList = etl.root(); if (rootTokenList != null && rootTokenList.getClass() == IncTokenList.class) { if (offset >= liveTokenGapStartOffset) { offset += liveTokenOffsetDiff; *************** *** 196,203 **** return offset; } ! } else { ! Token2OffsetEntry entry = (Token2OffsetEntry)token2offset.get(token); if (entry != null) { return entry.offset(); } else { --- 198,206 ---- return offset; } ! } else { // queried token list is the root list genericsed by ! @SuppressWarnings("unchecked") ! Token2OffsetEntry entry = token2offset.get((AbstractToken)token); if (entry != null) { return entry.offset(); } else { *************** *** 241,271 **** throw new IllegalStateException("Not expected to be called"); // NOI18N } ! public void wrapToken(int index, BranchTokenList wrapper) { // Allow branching if (liveTokenGapStart == -1 || index < liveTokenGapStart) { ! liveTokenList.wrapToken(index, wrapper); } else { index -= liveTokenGapStart; if (index < origTokenCount) { ! origTokensOrBranches[origTokenStartIndex + index] = wrapper; } else { ! liveTokenList.wrapToken(liveTokenGapEnd + index - origTokenCount, wrapper); } } } ! public AbstractToken createNonFlyToken(int index, AbstractToken flyToken, int offset) { AbstractToken nonFlyToken; if (liveTokenGapStart == -1 || index < liveTokenGapStart) { ! nonFlyToken = liveTokenList.createNonFlyToken(index, flyToken, offset); } else { index -= liveTokenGapStart; if (index < origTokenCount) { nonFlyToken = ((TextToken)flyToken).createCopy(this, offset); origTokensOrBranches[origTokenStartIndex + index] = nonFlyToken; } else { ! nonFlyToken = liveTokenList.createNonFlyToken( liveTokenGapEnd + index - origTokenCount, flyToken, offset - liveTokenOffsetDiff); } --- 244,274 ---- throw new IllegalStateException("Not expected to be called"); // NOI18N } ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { // Allow branching if (liveTokenGapStart == -1 || index < liveTokenGapStart) { ! liveTokenList.wrapToken(index, embeddingContainer); } else { index -= liveTokenGapStart; if (index < origTokenCount) { ! origTokensOrBranches[origTokenStartIndex + index] = embeddingContainer; } else { ! liveTokenList.wrapToken(liveTokenGapEnd + index - origTokenCount, embeddingContainer); } } } ! public AbstractToken replaceFlyToken(int index, AbstractToken flyToken, int offset) { AbstractToken nonFlyToken; if (liveTokenGapStart == -1 || index < liveTokenGapStart) { ! nonFlyToken = liveTokenList.replaceFlyToken(index, flyToken, offset); } else { index -= liveTokenGapStart; if (index < origTokenCount) { nonFlyToken = ((TextToken)flyToken).createCopy(this, offset); origTokensOrBranches[origTokenStartIndex + index] = nonFlyToken; } else { ! nonFlyToken = liveTokenList.replaceFlyToken( liveTokenGapEnd + index - origTokenCount, flyToken, offset - liveTokenOffsetDiff); } *************** *** 273,282 **** return nonFlyToken; } ! public TokenList root() { return this; } public InputAttributes inputAttributes() { return liveTokenList.inputAttributes(); } --- 276,289 ---- return nonFlyToken; } ! public TokenList root() { return this; } + public TokenHierarchyOperation tokenHierarchyOperation() { + return snapshot; + } + public InputAttributes inputAttributes() { return liveTokenList.inputAttributes(); } *************** *** 285,291 **** return true; } ! public Set skipTokenIds() { return null; } --- 292,298 ---- return true; } ! public Set skipTokenIds() { return null; } *************** *** 296,314 **** && !token2offset.containsKey(token); } ! public void update(TokenListChange change) { ! TokenList removedTokenList = change.removedTokenList(); ! int startRemovedIndex = change.tokenIndex(); int endRemovedIndex = startRemovedIndex + removedTokenList.tokenCount(); if (liveTokenGapStart == -1) { // no modifications yet liveTokenGapStart = startRemovedIndex; liveTokenGapEnd = startRemovedIndex; ! liveTokenGapStartOffset = change.modifiedTokensStartOffset(); origTokensOrBranches = new Object[removedTokenList.tokenCount()]; origOffsets = new int[origTokensOrBranches.length]; } ! int liveTokenIndexDiff = change.addedTokenCount() - change.removedTokenCount(); if (startRemovedIndex < liveTokenGapStart) { // will affect initial shared tokens int extraOrigTokenCount = liveTokenGapStart - startRemovedIndex; ensureOrigTokensStartCapacity(extraOrigTokenCount); --- 303,322 ---- && !token2offset.containsKey(token); } ! public void update(TokenHierarchyEventInfo eventInfo, TokenListChange change) { ! TokenList removedTokenList = change.tokenChangeInfo().removedTokenList(); ! int startRemovedIndex = change.index(); int endRemovedIndex = startRemovedIndex + removedTokenList.tokenCount(); if (liveTokenGapStart == -1) { // no modifications yet liveTokenGapStart = startRemovedIndex; liveTokenGapEnd = startRemovedIndex; ! liveTokenGapStartOffset = change.offset(); origTokensOrBranches = new Object[removedTokenList.tokenCount()]; origOffsets = new int[origTokensOrBranches.length]; } ! int liveTokenIndexDiff = change.tokenChangeInfo().addedTokenCount() ! - removedTokenList.tokenCount(); if (startRemovedIndex < liveTokenGapStart) { // will affect initial shared tokens int extraOrigTokenCount = liveTokenGapStart - startRemovedIndex; ensureOrigTokensStartCapacity(extraOrigTokenCount); *************** *** 317,349 **** int bound = Math.min(endRemovedIndex, liveTokenGapStart); int index; ! int offset = change.modifiedTokensStartOffset(); liveTokenGapStartOffset = offset; for (index = startRemovedIndex; index < bound; index++) { ! Object tokenOrBranch = removedTokenList.tokenOrBranch(index - startRemovedIndex); ! Token t = LexerUtilsConstants.token(tokenOrBranch); ! if (!t.isFlyweight()) { ! AbstractToken token = (AbstractToken)t; ! TokenList tokenList = token.tokenList(); if (tokenList == null) { ! tokenList = new StandaloneTokenList(change.languagePath(), ! change.originalText().toCharArray(offset, offset + token.length())); token.setTokenList(tokenList); } } origOffsets[origTokenStartIndex] = offset; ! origTokensOrBranches[origTokenStartIndex++] = tokenOrBranch; ! offset += t.length(); } while (index < liveTokenGapStart) { ! Object tokenOrBranch = liveTokenList.tokenOrBranchUnsync(index + liveTokenIndexDiff); ! AbstractToken t = LexerUtilsConstants.token(tokenOrBranch); if (!t.isFlyweight()) { ! token2offset.putEntry(new Token2OffsetEntry(t, offset)); } origOffsets[origTokenStartIndex] = offset; ! origTokensOrBranches[origTokenStartIndex++] = tokenOrBranch; offset += t.length(); index++; } --- 325,356 ---- int bound = Math.min(endRemovedIndex, liveTokenGapStart); int index; ! int offset = change.offset(); liveTokenGapStartOffset = offset; for (index = startRemovedIndex; index < bound; index++) { ! Object tokenOrEmbeddingContainer = removedTokenList.tokenOrEmbeddingContainer(index - startRemovedIndex); ! AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); ! if (!token.isFlyweight()) { ! TokenList tokenList = token.tokenList(); if (tokenList == null) { ! tokenList = new StandaloneTokenList(change.languagePath(), ! eventInfo.originalText().toCharArray(offset, offset + token.length())); token.setTokenList(tokenList); } } origOffsets[origTokenStartIndex] = offset; ! origTokensOrBranches[origTokenStartIndex++] = tokenOrEmbeddingContainer; ! offset += token.length(); } while (index < liveTokenGapStart) { ! Object tokenOrEmbeddingContainer = liveTokenList.tokenOrEmbeddingContainerUnsync(index + liveTokenIndexDiff); ! AbstractToken t = LexerUtilsConstants.token(tokenOrEmbeddingContainer); if (!t.isFlyweight()) { ! token2offset.putEntry(new Token2OffsetEntry(t, offset)); } origOffsets[origTokenStartIndex] = offset; ! origTokensOrBranches[origTokenStartIndex++] = tokenOrEmbeddingContainer; offset += t.length(); index++; } *************** *** 358,401 **** int bound = Math.max(startRemovedIndex, liveTokenGapEnd); int index = endRemovedIndex; ! int offset = change.removedTokensEndOffset(); for (index = endRemovedIndex - 1; index >= bound; index--) { ! Object tokenOrBranch = removedTokenList.tokenOrBranch(index - startRemovedIndex); ! AbstractToken t = LexerUtilsConstants.token(tokenOrBranch); ! offset -= t.length(); ! if (!t.isFlyweight()) { ! AbstractToken token = (AbstractToken)t; ! TokenList tokenList = token.tokenList(); if (tokenList == null) { ! tokenList = new StandaloneTokenList(change.languagePath(), ! change.originalText().toCharArray(offset, offset + token.length())); token.setTokenList(tokenList); } } origOffsets[origTokenIndex] = offset + liveTokenOffsetDiff; // If the token's offset had to be diff-ed already then a map entry is necessary if (liveTokenOffsetDiff != 0) { ! token2offset.putEntry(new Token2OffsetEntry(t, origOffsets[origTokenIndex])); } ! origTokensOrBranches[origTokenIndex--] = tokenOrBranch; } while (index >= liveTokenGapEnd) { ! Object tokenOrBranch = liveTokenList.tokenOrBranchUnsync(index + liveTokenIndexDiff); ! AbstractToken t = LexerUtilsConstants.token(tokenOrBranch); ! offset -= t.length(); ! if (!t.isFlyweight()) { ! token2offset.putEntry(new Token2OffsetEntry(t, offset)); } origOffsets[origTokenIndex] = offset + liveTokenOffsetDiff; ! token2offset.putEntry(new Token2OffsetEntry(t, origOffsets[origTokenIndex])); ! origTokensOrBranches[origTokenIndex--] = tokenOrBranch; index--; } liveTokenGapEnd = endRemovedIndex; } ! liveTokenOffsetDiff += change.removedLength() - change.insertedLength(); liveTokenGapEnd += liveTokenIndexDiff; } --- 365,407 ---- int bound = Math.max(startRemovedIndex, liveTokenGapEnd); int index = endRemovedIndex; ! int offset = change.removedEndOffset(); for (index = endRemovedIndex - 1; index >= bound; index--) { ! Object tokenOrEmbeddingContainer = removedTokenList.tokenOrEmbeddingContainer(index - startRemovedIndex); ! AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); ! offset -= token.length(); ! if (!token.isFlyweight()) { ! TokenList tokenList = token.tokenList(); if (tokenList == null) { ! tokenList = new StandaloneTokenList(change.languagePath(), ! eventInfo.originalText().toCharArray(offset, offset + token.length())); token.setTokenList(tokenList); } } origOffsets[origTokenIndex] = offset + liveTokenOffsetDiff; // If the token's offset had to be diff-ed already then a map entry is necessary if (liveTokenOffsetDiff != 0) { ! token2offset.putEntry(new Token2OffsetEntry(token, origOffsets[origTokenIndex])); } ! origTokensOrBranches[origTokenIndex--] = tokenOrEmbeddingContainer; } while (index >= liveTokenGapEnd) { ! Object tokenOrEmbeddingContainer = liveTokenList.tokenOrEmbeddingContainerUnsync(index + liveTokenIndexDiff); ! AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer); ! offset -= token.length(); ! if (!token.isFlyweight()) { ! token2offset.putEntry(new Token2OffsetEntry(token, offset)); } origOffsets[origTokenIndex] = offset + liveTokenOffsetDiff; ! token2offset.putEntry(new Token2OffsetEntry(token, origOffsets[origTokenIndex])); ! origTokensOrBranches[origTokenIndex--] = tokenOrEmbeddingContainer; index--; } liveTokenGapEnd = endRemovedIndex; } ! liveTokenOffsetDiff += eventInfo.removedLength() - eventInfo.insertedLength(); liveTokenGapEnd += liveTokenIndexDiff; } *************** *** 471,492 **** return liveTokenGapStartOffset + liveTokenOffsetDiff; } ! private static final class Token2OffsetEntry extends CompactMap.MapEntry { ! private final AbstractToken token; // 20 bytes (16-super + 4) private final int offset; // 24 bytes ! Token2OffsetEntry(AbstractToken token, int offset) { this.token = token; this.offset = offset; } ! public AbstractToken getKey() { return token; } ! public Token2OffsetEntry getValue() { return this; } --- 477,499 ---- return liveTokenGapStartOffset + liveTokenOffsetDiff; } ! private static final class Token2OffsetEntry ! extends CompactMap.MapEntry,Token2OffsetEntry> { ! private final AbstractToken token; // 20 bytes (16-super + 4) private final int offset; // 24 bytes ! Token2OffsetEntry(AbstractToken token, int offset) { this.token = token; this.offset = offset; } ! public AbstractToken getKey() { return token; } ! public Token2OffsetEntry getValue() { return this; } *************** *** 495,508 **** } protected boolean valueEquals(Object value2) { ! return (value2 instanceof Token2OffsetEntry && ((Token2OffsetEntry)value2).offset() == offset()); } public int offset() { return offset; } ! public Token2OffsetEntry setValue(Token2OffsetEntry value) { throw new IllegalStateException("Prohibited"); // NOI18N } --- 502,518 ---- } protected boolean valueEquals(Object value2) { ! // In fact the second entry would have to be of because ! // the tokens (as keys) must be the same objects to be equal ! return (value2 instanceof Token2OffsetEntry ! && ((Token2OffsetEntry)value2).offset() == offset()); } public int offset() { return offset; } ! public Token2OffsetEntry setValue(Token2OffsetEntry value) { throw new IllegalStateException("Prohibited"); // NOI18N } Index: lexer/src/org/netbeans/lib/lexer/inc/StandaloneTokenList.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/StandaloneTokenList.java,v retrieving revision 1.2 diff -c -r1.2 StandaloneTokenList.java *** lexer/src/org/netbeans/lib/lexer/inc/StandaloneTokenList.java 4 Oct 2006 17:03:20 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/inc/StandaloneTokenList.java 28 Nov 2006 14:20:17 -0000 *************** *** 23,29 **** import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.LanguagePath; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; --- 23,30 ---- import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.LanguagePath; import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.EmbeddingContainer; ! import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; *************** *** 35,41 **** * @version 1.00 */ ! public final class StandaloneTokenList implements TokenList { private char[] tokenText; --- 36,42 ---- * @version 1.00 */ ! public final class StandaloneTokenList implements TokenList { private char[] tokenText; *************** *** 50,60 **** return languagePath; } ! public Object tokenOrBranch(int index) { throw new IllegalStateException("Not expected to be called"); // NOI18N } ! public AbstractToken createNonFlyToken( int index, AbstractToken flyToken, int offset) { throw new IllegalStateException("Not expected to be called"); // NOI18N } --- 51,61 ---- return languagePath; } ! public Object tokenOrEmbeddingContainer(int index) { throw new IllegalStateException("Not expected to be called"); // NOI18N } ! public AbstractToken replaceFlyToken( int index, AbstractToken flyToken, int offset) { throw new IllegalStateException("Not expected to be called"); // NOI18N } *************** *** 92,105 **** return tokenText[index]; } ! public void wrapToken(int index, BranchTokenList wrapper) { throw new IllegalStateException("Branching of standalone tokens not supported"); // NOI18N } ! public TokenList root() { return this; } public InputAttributes inputAttributes() { throw new IllegalStateException("Not expected to be called"); // NOI18N } --- 93,110 ---- return tokenText[index]; } ! public void wrapToken(int index, EmbeddingContainer embeddingContainer) { throw new IllegalStateException("Branching of standalone tokens not supported"); // NOI18N } ! public TokenList root() { return this; } + public TokenHierarchyOperation tokenHierarchyOperation() { + return null; + } + public InputAttributes inputAttributes() { throw new IllegalStateException("Not expected to be called"); // NOI18N } *************** *** 108,114 **** return true; } ! public Set skipTokenIds() { return null; } --- 113,119 ---- return true; } ! public Set skipTokenIds() { return null; } Index: lexer/src/org/netbeans/lib/lexer/inc/TokenChangeInfo.java =================================================================== RCS file: lexer/src/org/netbeans/lib/lexer/inc/TokenChangeInfo.java diff -N lexer/src/org/netbeans/lib/lexer/inc/TokenChangeInfo.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/src/org/netbeans/lib/lexer/inc/TokenChangeInfo.java 28 Nov 2006 14:20:17 -0000 *************** *** 0 **** --- 1,123 ---- + /* + * The contents of this file are subject to the terms of the Common Development + * and Distribution License (the License). You may not use this file except in + * compliance with the License. + * + * You can obtain a copy of the License at http://www.netbeans.org/cddl.html + * or http://www.netbeans.org/cddl.txt. + * + * When distributing Covered Code, include this CDDL Header Notice in each file + * and include the License file at http://www.netbeans.org/cddl.txt. + * If applicable, add the following below the CDDL Header, with the fields + * enclosed by brackets [] replaced by your own identifying information: + * "Portions Copyrighted [year] [name of copyright owner]" + * + * The Original Software is NetBeans. The Initial Developer of the Original + * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.lexer.inc; + + import java.util.List; + import org.netbeans.api.lexer.TokenChange; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.LAState; + import org.netbeans.lib.lexer.LexerApiPackageAccessor; + import org.netbeans.lib.lexer.TokenList; + import org.netbeans.lib.lexer.token.AbstractToken; + + /** + * Description of the change in a token list. + *
    + * The change is expressed as a list of removed tokens + * plus the current list and index and number of the tokens + * added to the current list. + * + * @author Miloslav Metelka + * @version 1.00 + */ + + public final class TokenChangeInfo { + + private static final TokenChange[] EMPTY_EMBEDDED_CHANGES + = (TokenChange[])new TokenChange[0]; + + private TokenChange[] embeddedChanges = EMPTY_EMBEDDED_CHANGES; + + private final TokenList currentTokenList; + + private RemovedTokenList removedTokenList; + + private List> addedTokens; + + private LAState laState; + + private int addedTokenCount; + + private int index; + + private int offset; + + private int removedTokensEndOffset; + + private int addedTokensEndOffset; + + private int offsetGapIndex; + + private TokenListChange child; + + + public TokenChangeInfo(TokenList currentTokenList) { + this.currentTokenList = currentTokenList; + } + + public TokenChange[] embeddedChanges() { + return embeddedChanges; + } + + public void addEmbeddedChange(TokenChangeInfo change) { + TokenChange[] tmp = (TokenChange[]) + new TokenChange[embeddedChanges.length + 1]; + System.arraycopy(embeddedChanges, 0, tmp, 0, embeddedChanges.length); + tmp[embeddedChanges.length] = LexerApiPackageAccessor.get().createTokenChange(change); + embeddedChanges = tmp; + } + + public int index() { + return index; + } + + public void setIndex(int index) { + this.index = index; + } + + public int offset() { + return offset; + } + + public void setOffset(int offset) { + this.offset = offset; + } + + public RemovedTokenList removedTokenList() { + return removedTokenList; + } + + public void setRemovedTokenList(RemovedTokenList removedTokenList) { + this.removedTokenList = removedTokenList; + } + + public int addedTokenCount() { + return addedTokenCount; + } + + public void setAddedTokenCount(int addedTokenCount) { + this.addedTokenCount = addedTokenCount; + } + + public TokenList currentTokenList() { + return currentTokenList; + } + + } \ No newline at end of file Index: lexer/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventInfo.java =================================================================== RCS file: lexer/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventInfo.java diff -N lexer/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventInfo.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventInfo.java 28 Nov 2006 14:20:17 -0000 *************** *** 0 **** --- 1,147 ---- + /* + * The contents of this file are subject to the terms of the Common Development + * and Distribution License (the License). You may not use this file except in + * compliance with the License. + * + * You can obtain a copy of the License at http://www.netbeans.org/cddl.html + * or http://www.netbeans.org/cddl.txt. + * + * When distributing Covered Code, include this CDDL Header Notice in each file + * and include the License file at http://www.netbeans.org/cddl.txt. + * If applicable, add the following below the CDDL Header, with the fields + * enclosed by brackets [] replaced by your own identifying information: + * "Portions Copyrighted [year] [name of copyright owner]" + * + * The Original Software is NetBeans. The Initial Developer of the Original + * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.lexer.inc; + + import org.netbeans.api.lexer.TokenChange; + import org.netbeans.api.lexer.TokenHierarchyEventType; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.LexerApiPackageAccessor; + import org.netbeans.lib.lexer.LexerSpiPackageAccessor; + import org.netbeans.lib.lexer.TokenHierarchyOperation; + + /** + * Shared information for all the token list changes + * for a single token hierarchy event. + * + * @author Miloslav Metelka + * @version 1.00 + */ + + public final class TokenHierarchyEventInfo { + + private final TokenHierarchyOperation tokenHierarchyOperation; + + private final TokenHierarchyEventType type; + + private TokenChange tokenChange; + + private final int modificationOffset; + + private final int removedLength; + + private final CharSequence removedText; + + private final int insertedLength; + + private OriginalText originalText; + + private int affectedStartOffset; + + private int affectedEndOffset; + + public TokenHierarchyEventInfo(TokenHierarchyOperation tokenHierarchyOperation, + TokenHierarchyEventType type, int modificationOffset, int removedLength, CharSequence removedText, int insertedLength) { + // Initial checks + if (modificationOffset < 0) { + throw new IllegalArgumentException("modificationOffset=" + modificationOffset + " < 0"); // NOI18N + } + if (removedLength < 0) { + throw new IllegalArgumentException("removedLength=" + removedLength + " < 0"); // NOI18N + } + if (insertedLength < 0) { + throw new IllegalArgumentException("insertedLength=" + insertedLength + " < 0"); // NOI18N + } + + this.tokenHierarchyOperation = tokenHierarchyOperation; + this.type = type; + this.modificationOffset = modificationOffset; + this.removedLength = removedLength; + this.removedText = removedText; + this.insertedLength = insertedLength; + } + + public TokenHierarchyOperation tokenHierarchyOperation() { + return tokenHierarchyOperation; + } + + public TokenHierarchyEventType type() { + return type; + } + + public TokenChange tokenChange() { + return tokenChange; + } + + public void setTokenChangeInfo(TokenChangeInfo info) { + this.tokenChange = LexerApiPackageAccessor.get().createTokenChange(info); + } + + public int affectedStartOffset() { + return affectedStartOffset; + } + + public void setAffectedStartOffset(int affectedStartOffset) { + this.affectedStartOffset = affectedStartOffset; + } + + public int affectedEndOffset() { + return affectedEndOffset; + } + + public void setAffectedEndOffset(int affectedEndOffset) { + this.affectedEndOffset = affectedEndOffset; + } + + public int modificationOffset() { + return modificationOffset; + } + + public int removedLength() { + return removedLength; + } + + public CharSequence removedText() { + return removedText; + } + + public int insertedLength() { + return insertedLength; + } + + public OriginalText originalText() { + if (originalText == null) { + if (removedLength != 0 && removedText == null) { + throw new IllegalStateException("Cannot obtain removed text for " // NOI18N + + tokenHierarchyOperation.mutableInputSource() + + " which breaks token snapshots operation and" // NOI18N + + " token text retaining after token's removal." // NOI18N + + " Valid removedText in TokenHierarchyControl.textModified()" // NOI18N + + " should be provided." // NOI18N + ); + } + originalText = new OriginalText( + LexerSpiPackageAccessor.get().text(tokenHierarchyOperation.mutableTextInput()), + modificationOffset, removedText, insertedLength + ); + } + return originalText; + } + + } Index: lexer/src/org/netbeans/lib/lexer/inc/TokenListChange.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/TokenListChange.java,v retrieving revision 1.2 diff -c -r1.2 TokenListChange.java *** lexer/src/org/netbeans/lib/lexer/inc/TokenListChange.java 4 Oct 2006 17:03:20 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/inc/TokenListChange.java 28 Nov 2006 14:20:17 -0000 *************** *** 22,32 **** import java.util.ArrayList; import java.util.List; import org.netbeans.api.lexer.LanguagePath; ! import org.netbeans.api.lexer.Token; ! import org.netbeans.api.lexer.TokenHierarchyEvent; import org.netbeans.lib.lexer.LAState; - import org.netbeans.lib.lexer.LexerSpiPackageAccessor; - import org.netbeans.lib.lexer.TokenHierarchyOperation; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; --- 22,29 ---- import java.util.ArrayList; import java.util.List; import org.netbeans.api.lexer.LanguagePath; ! import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.lexer.LAState; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.token.AbstractToken; *************** *** 41,323 **** * @version 1.00 */ ! public final class TokenListChange { ! private static final AbstractToken[] EMPTY_TOKEN_ARRAY = new AbstractToken[0]; ! ! private SharedInfo sharedInfo; ! ! private LanguagePath languagePath; ! ! private RemovedTokenList removedTokenList; ! private TokenList currentTokenList; ! ! private List addedTokens; private LAState laState; - /** - * Offset of the first removed token. - */ - private int removedTokensOffset; - - private int addedTokenCount; - - private int tokenIndex; - - private int modifiedTokensStartOffset; - - private int removedTokensEndOffset; - - private int addedTokensEndOffset; - private int offsetGapIndex; - - private TokenListChange child; - - - public TokenListChange(TokenHierarchyOperation tokenHierarchyOperation, TokenHierarchyEvent.Type type, - int offset, int removedLength, CharSequence removedText, int insertedLength) { - - // Initial checks - if (offset < 0) { - throw new IllegalArgumentException("offset=" + offset + " < 0"); // NOI18N - } - if (removedLength < 0) { - throw new IllegalArgumentException("removedLength=" + removedLength + " < 0"); // NOI18N - } - if (insertedLength < 0) { - throw new IllegalArgumentException("insertedLength=" + insertedLength + " < 0"); // NOI18N - } - - sharedInfo = new SharedInfo(tokenHierarchyOperation, type, offset, removedLength, removedText, insertedLength); - } - - public TokenListChange(TokenListChange parentChange) { - this.sharedInfo = parentChange.sharedInfo(); - parentChange.setChild(this); - } - - public TokenHierarchyOperation tokenHierarchyOperation() { - return sharedInfo.tokenHierarchyOperation(); - } - - public TokenHierarchyEvent.Type type() { - return sharedInfo.type(); - } ! public int offset() { ! return sharedInfo.offset(); ! } ! ! public int removedLength() { ! return sharedInfo.removedLength(); ! } ! public CharSequence removedText() { ! return sharedInfo.removedText(); ! } ! ! public OriginalText originalText() { ! return sharedInfo.originalText(); ! } ! public int insertedLength() { ! return sharedInfo.insertedLength(); } ! public CharSequence insertedText() { ! return null; } public LanguagePath languagePath() { ! return languagePath; } ! ! public void setLanguagePath(LanguagePath languagePath) { ! this.languagePath = languagePath; } ! ! public int removedTokenCount() { ! return removedTokenList.tokenCount(); } ! public int addedTokenCount() { ! return addedTokenCount; } ! public void setAddedTokenCount(int addedTokenCount) { ! this.addedTokenCount = addedTokenCount; } ! public TokenList currentTokenList() { ! return currentTokenList; ! } ! ! public void setCurrentTokenList(TokenList tokenList) { ! this.currentTokenList = tokenList; } ! public int tokenIndex() { ! return tokenIndex; } ! public void setTokenIndex(int tokenIndex) { ! this.tokenIndex = tokenIndex; ! } ! ! public RemovedTokenList removedTokenList() { ! return removedTokenList; ! } ! ! public void initRemovedTokenList(Object[] removedTokensOrBranches) { ! removedTokenList = new RemovedTokenList(this, removedTokensOrBranches); ! } ! ! public void addToken(AbstractToken token, int lookahead, Object state) { if (addedTokens == null) { ! addedTokens = new ArrayList(2); laState = LAState.empty(); } addedTokens.add(token); laState = laState.add(lookahead, state); } ! public List addedTokens() { return addedTokens; } ! ! public LAState laState() { ! return laState; ! } ! ! public void clearAddedTokens() { ! addedTokens = null; ! laState = null; ! } ! ! public void noChange(TokenList tokenList) { ! this.currentTokenList = tokenList; ! this.removedTokenList = new RemovedTokenList(this, EMPTY_TOKEN_ARRAY); ! setTokenIndex(-1); ! } ! ! public int modifiedTokensStartOffset() { ! return modifiedTokensStartOffset; ! } ! ! public void setModifiedTokensStartOffset(int modifiedTokensStartOffset) { ! this.modifiedTokensStartOffset = modifiedTokensStartOffset; ! } ! ! public int removedTokensEndOffset() { ! return removedTokensEndOffset; ! } ! ! public void setRemovedTokensEndOffset(int removedTokensEndOffset) { ! this.removedTokensEndOffset = removedTokensEndOffset; ! } ! ! public int addedTokensEndOffset() { ! return addedTokensEndOffset; } ! public void setAddedTokensEndOffset(int addedTokensEndOffset) { ! this.addedTokensEndOffset = addedTokensEndOffset; } ! ! public int offsetGapIndex() { ! return offsetGapIndex; } ! ! public void setOffsetGapIndex(int offsetGapIndex) { ! this.offsetGapIndex = offsetGapIndex; } ! ! public TokenListChange child() { ! return child; } ! ! void setChild(TokenListChange child) { ! assert (this.child == null); ! this.child = child; } ! ! SharedInfo sharedInfo() { ! return sharedInfo; } - - - - private static final class SharedInfo { - - private final TokenHierarchyOperation tokenHierarchyOperation; - - private final TokenHierarchyEvent.Type type; - - private final int offset; - - private final int removedLength; - - private final CharSequence removedText; - - private final int insertedLength; - - private OriginalText originalText; - - SharedInfo(TokenHierarchyOperation tokenHierarchyOperation, TokenHierarchyEvent.Type type, - int offset, int removedLength, CharSequence removedText, int insertedLength) { - this.tokenHierarchyOperation = tokenHierarchyOperation; - this.type = type; - this.offset = offset; - this.removedLength = removedLength; - this.removedText = removedText; - this.insertedLength = insertedLength; - } - - public TokenHierarchyOperation tokenHierarchyOperation() { - return tokenHierarchyOperation; - } - - public TokenHierarchyEvent.Type type() { - return type; - } - - public int offset() { - return offset; - } - - public int removedLength() { - return removedLength; - } - - public CharSequence removedText() { - return removedText; - } - - public int insertedLength() { - return insertedLength; - } - - public OriginalText originalText() { - if (originalText == null) { - if (removedLength != 0 && removedText == null) { - throw new IllegalStateException("Cannot obtain removed text for " // NOI18N - + tokenHierarchyOperation.mutableInputSource() - + " which breaks token snapshots operation and" // NOI18N - + " token text retaining after token's removal." // NOI18N - + " Valid removedText in TokenHierarchyControl.textModified()" // NOI18N - + " should be provided." // NOI18N - ); - } - originalText = new OriginalText( - LexerSpiPackageAccessor.get().text(tokenHierarchyOperation.mutableTextInput()), - offset, removedText, insertedLength - ); - } - return originalText; - } - } } --- 38,133 ---- * @version 1.00 */ ! public final class TokenListChange { ! private final TokenChangeInfo tokenChangeInfo; ! private List> addedTokens; private LAState laState; private int offsetGapIndex; ! private int removedEndOffset; ! private int addedEndOffset; ! public TokenListChange(TokenList tokenList) { ! tokenChangeInfo = new TokenChangeInfo(tokenList); } ! public TokenChangeInfo tokenChangeInfo() { ! return tokenChangeInfo; } public LanguagePath languagePath() { ! return tokenChangeInfo.currentTokenList().languagePath(); } ! ! public int index() { ! return tokenChangeInfo.index(); } ! ! public void setIndex(int tokenIndex) { ! tokenChangeInfo.setIndex(tokenIndex); } ! public int offset() { ! return tokenChangeInfo.offset(); } ! public void setOffset(int offset) { ! tokenChangeInfo.setOffset(offset); } ! public int offsetGapIndex() { ! return offsetGapIndex; } ! public void setOffsetGapIndex(int offsetGapIndex) { ! this.offsetGapIndex = offsetGapIndex; } ! public void addToken(AbstractToken token, int lookahead, Object state) { if (addedTokens == null) { ! addedTokens = new ArrayList>(2); laState = LAState.empty(); } addedTokens.add(token); laState = laState.add(lookahead, state); } ! public List> addedTokens() { return addedTokens; } ! ! public void syncAddedTokenCount() { ! tokenChangeInfo.setAddedTokenCount(addedTokens.size()); } ! public void setRemovedTokens(Object[] removedTokensOrBranches) { ! tokenChangeInfo.setRemovedTokenList(new RemovedTokenList( ! languagePath(), removedTokensOrBranches)); } ! ! public int removedEndOffset() { ! return removedEndOffset; } ! ! public void setRemovedEndOffset(int removedEndOffset) { ! this.removedEndOffset = removedEndOffset; } ! ! public int addedEndOffset() { ! return addedEndOffset; } ! ! public void setAddedEndOffset(int addedEndOffset) { ! this.addedEndOffset = addedEndOffset; } ! ! public LAState laState() { ! return laState; } } Index: lexer/src/org/netbeans/lib/lexer/inc/TokenListUpdater.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/inc/TokenListUpdater.java,v retrieving revision 1.3 diff -c -r1.3 TokenListUpdater.java *** lexer/src/org/netbeans/lib/lexer/inc/TokenListUpdater.java 12 Oct 2006 12:56:31 -0000 1.3 --- lexer/src/org/netbeans/lib/lexer/inc/TokenListUpdater.java 28 Nov 2006 14:20:17 -0000 *************** *** 19,25 **** package org.netbeans.lib.lexer.inc; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LanguageOperation; import org.netbeans.lib.lexer.LexerInputOperation; import org.netbeans.lib.lexer.LexerUtilsConstants; --- 19,26 ---- package org.netbeans.lib.lexer.inc; ! import org.netbeans.api.lexer.TokenId; ! import org.netbeans.lib.lexer.EmbeddedTokenList; import org.netbeans.lib.lexer.LanguageOperation; import org.netbeans.lib.lexer.LexerInputOperation; import org.netbeans.lib.lexer.LexerUtilsConstants; *************** *** 88,102 **** * Use incremental algorithm to update the list of tokens * after a modification done in the underlying storage. */ ! public static void update(MutableTokenList tokenList, TokenListChange change) { // Ensure the offsets in token list are up-to-date ! if (tokenList.getClass() == BranchTokenList.class) { ! ((BranchTokenList)tokenList).updateStartOffset(); } // Fetch offset where the modification occurred ! int modOffset = change.offset(); ! LanguageOperation languageOperation = LexerUtilsConstants.languageOperation( tokenList.languagePath()); int tokenCount = tokenList.tokenCountCurrent(); // presently created token count --- 89,104 ---- * Use incremental algorithm to update the list of tokens * after a modification done in the underlying storage. */ ! public static void update(MutableTokenList tokenList, ! TokenHierarchyEventInfo eventInfo, TokenListChange change) { // Ensure the offsets in token list are up-to-date ! if (tokenList.getClass() == EmbeddedTokenList.class) { ! ((EmbeddedTokenList)tokenList).updateStartOffset(); } // Fetch offset where the modification occurred ! int modOffset = eventInfo.modificationOffset(); ! LanguageOperation languageOperation = LexerUtilsConstants.mostEmbeddedLanguageOperation( tokenList.languagePath()); int tokenCount = tokenList.tokenCountCurrent(); // presently created token count *************** *** 105,111 **** // (for modification right at the begining of modified token) // then the token will be attempted to be validated (without running // a lexer). ! AbstractToken modToken; // modTokenOffset holds begining of the token in which the modification occurred. int modTokenOffset; // index points to the modified token --- 107,113 ---- // (for modification right at the begining of modified token) // then the token will be attempted to be validated (without running // a lexer). ! AbstractToken modToken; // modTokenOffset holds begining of the token in which the modification occurred. int modTokenOffset; // index points to the modified token *************** *** 190,202 **** relexIndex = index; relexOffset = modTokenOffset; // Can validate modToken if removal does not span whole token ! if (modToken != null && change.removedLength() < modToken.length()) { attemptValidation = true; } } else { // Previous token exists // Check for insert-only right at the end of the previous token ! if (modOffset == modTokenOffset && change.removedLength() == 0) { index--; // move to previous token modToken = token(tokenList, index); modTokenOffset -= modToken.length(); --- 192,204 ---- relexIndex = index; relexOffset = modTokenOffset; // Can validate modToken if removal does not span whole token ! if (modToken != null && eventInfo.removedLength() < modToken.length()) { attemptValidation = true; } } else { // Previous token exists // Check for insert-only right at the end of the previous token ! if (modOffset == modTokenOffset && eventInfo.removedLength() == 0) { index--; // move to previous token modToken = token(tokenList, index); modTokenOffset -= modToken.length(); *************** *** 208,214 **** relexIndex = index; relexOffset = modTokenOffset; // Check whether modification was localized to modToken only ! if (modOffset + change.removedLength() < modTokenOffset + modToken.length()) { attemptValidation = true; } --- 210,216 ---- relexIndex = index; relexOffset = modTokenOffset; // Check whether modification was localized to modToken only ! if (modOffset + eventInfo.removedLength() < modTokenOffset + modToken.length()) { attemptValidation = true; } *************** *** 218,224 **** // Go back and mark all affected tokens for removals while (relexIndex >= 0) { ! AbstractToken token = token(tokenList, relexIndex); // Check if token was not affected by modification if (relexOffset + tokenList.lookahead(relexIndex) <= modOffset) { break; --- 220,226 ---- // Go back and mark all affected tokens for removals while (relexIndex >= 0) { ! AbstractToken token = token(tokenList, relexIndex); // Check if token was not affected by modification if (relexOffset + tokenList.lookahead(relexIndex) <= modOffset) { break; *************** *** 244,250 **** TokenValidator tokenValidator = languageOperation.tokenValidator(modToken.id()); if (tokenValidator != null && (tokenList.getClass() != IncTokenList.class ! || change.tokenHierarchyOperation().canModifyToken(index, modToken)) ) { // if (tokenValidator.validateToken(modToken, modOffset - modTokenOffset, modRelOffset, --- 246,252 ---- TokenValidator tokenValidator = languageOperation.tokenValidator(modToken.id()); if (tokenValidator != null && (tokenList.getClass() != IncTokenList.class ! || eventInfo.tokenHierarchyOperation().canModifyToken(index, modToken)) ) { // if (tokenValidator.validateToken(modToken, modOffset - modTokenOffset, modRelOffset, *************** *** 262,268 **** // by iterating forward if (index < tokenCount) { matchOffset = modTokenOffset + modToken.length(); ! int removeEndOffset = modOffset + change.removedLength(); while (matchOffset < removeEndOffset && index + 1 < tokenCount) { index++; matchOffset += token(tokenList, index).length(); --- 264,270 ---- // by iterating forward if (index < tokenCount) { matchOffset = modTokenOffset + modToken.length(); ! int removeEndOffset = modOffset + eventInfo.removedLength(); while (matchOffset < removeEndOffset && index + 1 < tokenCount) { index++; matchOffset += token(tokenList, index).length(); *************** *** 276,283 **** Object relexState = (relexIndex > 0) ? tokenList.state(relexIndex - 1) : null; // Update the matchOffset so that it corresponds to the state // after the modification ! matchOffset += change.insertedLength() - change.removedLength(); ! change.setModifiedTokensStartOffset(relexOffset); // Variables' values: // 'index' - points to modified token. Or index == tokenCount for modification --- 278,285 ---- Object relexState = (relexIndex > 0) ? tokenList.state(relexIndex - 1) : null; // Update the matchOffset so that it corresponds to the state // after the modification ! matchOffset += eventInfo.insertedLength() - eventInfo.removedLength(); ! change.setOffset(relexOffset); // Variables' values: // 'index' - points to modified token. Or index == tokenCount for modification *************** *** 312,322 **** } if (relex) { // Start relexing ! LexerInputOperation lexerInputOperation = tokenList.createLexerInputOperation(relexIndex, relexOffset, relexState); do { // Fetch new tokens from lexer as necessary ! AbstractToken token = lexerInputOperation.nextToken(); if (token == null) { attemptValidation = false; break; --- 314,324 ---- } if (relex) { // Start relexing ! LexerInputOperation lexerInputOperation = tokenList.createLexerInputOperation(relexIndex, relexOffset, relexState); do { // Fetch new tokens from lexer as necessary ! AbstractToken token = lexerInputOperation.nextToken(); if (token == null) { attemptValidation = false; break; *************** *** 424,441 **** } // Now ensure that the original tokens will be replaced by the relexed ones. ! change.setTokenIndex(relexIndex); ! change.setCurrentTokenList(tokenList); ! change.setAddedTokensEndOffset(relexOffset); ! tokenList.replaceTokens(change, (modToken != null) ? (index - relexIndex + 1) : (index - relexIndex)); } ! private static AbstractToken token(MutableTokenList tokenList, int index) { ! Object tokenOrBranch = tokenList.tokenOrBranchUnsync(index); // Unsync impl suffices ! return (tokenOrBranch.getClass() == BranchTokenList.class) ! ? (AbstractToken)((BranchTokenList)tokenOrBranch).branchToken() ! : (AbstractToken)tokenOrBranch; } } --- 426,440 ---- } // Now ensure that the original tokens will be replaced by the relexed ones. ! change.setIndex(relexIndex); ! change.setAddedEndOffset(relexOffset); ! tokenList.replaceTokens(eventInfo, change, (modToken != null) ? (index - relexIndex + 1) : (index - relexIndex)); } ! private static AbstractToken token(MutableTokenList tokenList, int index) { ! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainerUnsync(index); // Unsync impl suffices ! return LexerUtilsConstants.token(tokenOrEmbeddingContainer); } } Index: lexer/src/org/netbeans/lib/lexer/token/AbstractToken.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/token/AbstractToken.java,v retrieving revision 1.2 diff -c -r1.2 AbstractToken.java *** lexer/src/org/netbeans/lib/lexer/token/AbstractToken.java 4 Oct 2006 17:03:21 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/token/AbstractToken.java 28 Nov 2006 14:20:17 -0000 *************** *** 23,32 **** import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CharSequenceUtilities; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LexerApiPackageAccessor; import org.netbeans.lib.lexer.TokenList; - import org.netbeans.lib.lexer.inc.IncTokenList; /** * Abstract token is base class of all token implementations used in the lexer module. --- 23,31 ---- import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CharSequenceUtilities; ! import org.netbeans.lib.lexer.EmbeddedTokenList; import org.netbeans.lib.lexer.LexerApiPackageAccessor; import org.netbeans.lib.lexer.TokenList; /** * Abstract token is base class of all token implementations used in the lexer module. *************** *** 39,45 **** private final T id; // 12 bytes (8-super + 4) ! private TokenList tokenList; // 16 bytes private int rawOffset; // 20 bytes --- 38,44 ---- private final T id; // 12 bytes (8-super + 4) ! private TokenList tokenList; // 16 bytes private int rawOffset; // 20 bytes *************** *** 51,57 **** this.id = id; } ! AbstractToken(T id, TokenList tokenList, int rawOffset) { this.id = id; this.tokenList = tokenList; this.rawOffset = rawOffset; --- 50,56 ---- this.id = id; } ! AbstractToken(T id, TokenList tokenList, int rawOffset) { this.id = id; this.tokenList = tokenList; this.rawOffset = rawOffset; *************** *** 73,80 **** */ public CharSequence text() { if (tokenList != null) { ! if (tokenList.getClass() == BranchTokenList.class) { ! ((BranchTokenList)tokenList).updateStartOffset(); } return this; } else { --- 72,79 ---- */ public CharSequence text() { if (tokenList != null) { ! if (tokenList.getClass() == EmbeddedTokenList.class) { ! ((EmbeddedTokenList)tokenList).updateStartOffset(); } return this; } else { *************** *** 85,98 **** /** * Get token list to which this token delegates its operation. */ ! public final TokenList tokenList() { return tokenList; } /** * Release this token from being attached to its parent token list. */ ! public final void setTokenList(TokenList tokenList) { this.tokenList = tokenList; } --- 84,97 ---- /** * Get token list to which this token delegates its operation. */ ! public final TokenList tokenList() { return tokenList; } /** * Release this token from being attached to its parent token list. */ ! public final void setTokenList(TokenList tokenList) { this.tokenList = tokenList; } *************** *** 126,132 **** return false; } ! public final int offset(TokenHierarchy tokenHierarchy) { if (rawOffset == -1) { // flyweight token return -1; } --- 125,131 ---- return false; } ! public final int offset(TokenHierarchy tokenHierarchy) { if (rawOffset == -1) { // flyweight token return -1; } Index: lexer/src/org/netbeans/lib/lexer/token/DefaultToken.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/token/DefaultToken.java,v retrieving revision 1.3 diff -c -r1.3 DefaultToken.java *** lexer/src/org/netbeans/lib/lexer/token/DefaultToken.java 7 Nov 2006 16:16:22 -0000 1.3 --- lexer/src/org/netbeans/lib/lexer/token/DefaultToken.java 28 Nov 2006 14:20:17 -0000 *************** *** 23,29 **** import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CharSequenceUtilities; ! import org.netbeans.lib.lexer.BranchTokenList; import org.netbeans.lib.lexer.LexerApiPackageAccessor; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.inc.IncTokenList; --- 23,29 ---- import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenId; import org.netbeans.lib.editor.util.CharSequenceUtilities; ! import org.netbeans.lib.lexer.EmbeddingContainer; import org.netbeans.lib.lexer.LexerApiPackageAccessor; import org.netbeans.lib.lexer.TokenList; import org.netbeans.lib.lexer.inc.IncTokenList; Index: lexer/src/org/netbeans/lib/lexer/token/TextToken.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/token/TextToken.java,v retrieving revision 1.2 diff -c -r1.2 TextToken.java *** lexer/src/org/netbeans/lib/lexer/token/TextToken.java 4 Oct 2006 17:03:22 -0000 1.2 --- lexer/src/org/netbeans/lib/lexer/token/TextToken.java 28 Nov 2006 14:20:17 -0000 *************** *** 63,69 **** this.text = text; } ! private TextToken(T id, TokenList tokenList, int rawOffset, CharSequence text) { super(id, tokenList, rawOffset); assert (text != null); this.text = text; --- 63,69 ---- this.text = text; } ! private TextToken(T id, TokenList tokenList, int rawOffset, CharSequence text) { super(id, tokenList, rawOffset); assert (text != null); this.text = text; *************** *** 77,83 **** return text; } ! public final TextToken createCopy(TokenList tokenList, int rawOffset) { return new TextToken(id(), tokenList, rawOffset, text()); } --- 77,83 ---- return text; } ! public final TextToken createCopy(TokenList tokenList, int rawOffset) { return new TextToken(id(), tokenList, rawOffset, text()); } Index: lexer/src/org/netbeans/spi/lexer/LanguageEmbedding.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/spi/lexer/LanguageEmbedding.java,v retrieving revision 1.5 diff -c -r1.5 LanguageEmbedding.java *** lexer/src/org/netbeans/spi/lexer/LanguageEmbedding.java 27 Oct 2006 12:59:07 -0000 1.5 --- lexer/src/org/netbeans/spi/lexer/LanguageEmbedding.java 28 Nov 2006 14:20:18 -0000 *************** *** 21,72 **** import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenId; /** * Description of a particular language embedding including ! * starting and ending skipped regions of a branch token ! * containing this embedding * and a definition of an embedded language hierarchy. * - *

    - * Depending on the language the embedding may be made flyweight - * and service more than one branch token. - *
    - * Or it may be constructed to service just a single branch token. - * * @author Miloslav Metelka * @version 1.00 */ ! public abstract class LanguageEmbedding { /** * Get the embedded language. * * @return non-null embedded language. */ ! public abstract Language language(); /** ! * Get length of the initial part of the branch token that should be skipped * so it will be excluded from lexing and no tokens will be created for it. * ! * @return >=0 number of characters in an initial part of the branch token * (for which the language embedding is defined) that should be excluded * from the embedded section. The excluded characters will not be lexed * and there will be no tokens created for them. */ ! public abstract int startSkipLength(); /** ! * Get length of the ending part of the branch token that should be skipped * so it will be excluded from lexing and no tokens will be created for it. * ! * @return >=0 number of characters at the end of the branch token * (for which the language embedding is defined) that should be excluded * from the embedded section. The excluded characters will not be lexed * and there will be no tokens created for them. */ ! public abstract int endSkipLength(); } --- 21,163 ---- import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenId; + import org.netbeans.lib.lexer.LanguageOperation; + import org.netbeans.lib.lexer.LexerUtilsConstants; /** * Description of a particular language embedding including ! * starting and ending skipped regions of a token containing this embedding * and a definition of an embedded language hierarchy. * * @author Miloslav Metelka * @version 1.00 */ ! public final class LanguageEmbedding { ! ! /** ! * Create language embedding that does not join embedded sections. ! * ! * @see #create(Language, int, int, boolean) ! */ ! public static LanguageEmbedding create( ! Language language, int startSkipLength, int endSkipLength) { ! return create(language, startSkipLength, endSkipLength, false); ! } ! ! /** ! * Construct new language embedding for the given parameters ! * or get an existing cached one. ! * ! * @param language non-null language. ! * @param startSkipLength >=0 number of characters in an initial part of the token ! * for which the language embedding is defined that should be excluded ! * from the embedded section. The excluded characters will not be lexed ! * and there will be no tokens created for them. ! * @param endSkipLength >=0 number of characters at the end of the token ! * for which the language embedding is defined that should be excluded ! * from the embedded section. The excluded characters will not be lexed ! * and there will be no tokens created for them. ! * @param joinSections whether sections with this embedding should be joined ! * across the input source or whether they should stay separate. ! *
    ! * For example for HTML sections embedded in JSP this flag should be true: ! *

    !      *   <!-- HTML comment start
    !      *       <% System.out.println("Hello"); %>
    !             still in HTML comment --<
    !      *  
    ! *
    ! * Only the embedded sections with the same language path can be joined. ! */ ! public static LanguageEmbedding create( ! Language language, int startSkipLength, int endSkipLength, boolean joinSections) { ! if (language == null) { ! throw new IllegalArgumentException("language may not be null"); // NOI18N ! } ! if (startSkipLength < 0) { ! throw new IllegalArgumentException("startSkipLength=" + startSkipLength + " < 0"); ! } ! if (endSkipLength < 0) { ! throw new IllegalArgumentException("endSkipLength=" + endSkipLength + " < 0"); ! } ! ! LanguageOperation op = LexerUtilsConstants.languageOperation(language); ! return op.getEmbedding(startSkipLength, endSkipLength, joinSections); ! } ! ! private final Language language; ! ! private final int startSkipLength; ! ! private final int endSkipLength; ! ! private final boolean joinSections; ! ! /** ! * Package-private constructor used by lexer spi package accessor. ! */ ! LanguageEmbedding(Language language, ! int startSkipLength, int endSkipLength, boolean joinSections) { ! assert (language != null) : "Embedded language may not be null."; // NOI18N ! assert (startSkipLength >= 0 && endSkipLength >= 0); ! this.language = language; ! this.startSkipLength = startSkipLength; ! this.endSkipLength = endSkipLength; ! this.joinSections = joinSections; ! } /** * Get the embedded language. * * @return non-null embedded language. */ ! public Language language() { ! return language; ! } /** ! * Get length of the initial part of the token (for which the embedding ! * is being created) that should be skipped * so it will be excluded from lexing and no tokens will be created for it. * ! * @return >=0 number of characters in an initial part of the token * (for which the language embedding is defined) that should be excluded * from the embedded section. The excluded characters will not be lexed * and there will be no tokens created for them. */ ! public int startSkipLength() { ! return startSkipLength; ! } /** ! * Get length of the ending part of the token (for which the embedding ! * is being created) that should be skipped * so it will be excluded from lexing and no tokens will be created for it. * ! * @return >=0 number of characters at the end of the token * (for which the language embedding is defined) that should be excluded * from the embedded section. The excluded characters will not be lexed * and there will be no tokens created for them. */ ! public int endSkipLength() { ! return endSkipLength; ! } ! ! /** ! * Whether sections with this embedding should be joined with the other ! * sections with this embedding at the same level. ! * ! * @return joinSections whether sections with this embedding should be joined ! * across the input source or whether they should stay separate. ! */ ! public boolean joinSections() { ! return joinSections; ! } ! ! public String toString() { ! return "language: " + language() + ", skip[" + startSkipLength() // NOI18N ! + ", " + endSkipLength + "]"; // NOI18N ! } } Index: lexer/src/org/netbeans/spi/lexer/LanguageHierarchy.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/spi/lexer/LanguageHierarchy.java,v retrieving revision 1.7 diff -c -r1.7 LanguageHierarchy.java *** lexer/src/org/netbeans/spi/lexer/LanguageHierarchy.java 27 Oct 2006 12:59:07 -0000 1.7 --- lexer/src/org/netbeans/spi/lexer/LanguageHierarchy.java 28 Nov 2006 14:20:18 -0000 *************** *** 200,208 **** * list which would not be possible if the token would be flyweight. * * @param token non-null token for which the language embedding will be resolved. ! * @param tokenComplete true if the given token was completely lexed. False ! * in case when just the initial token part is known and the embedded lexer ! * will influence where the token will physically end. * * @param languagePath non-null language path at which the language embedding * is being created. It may be used for obtaining appropriate information --- 200,209 ---- * list which would not be possible if the token would be flyweight. * * @param token non-null token for which the language embedding will be resolved. ! *
    ! * The token may have a zero length ({@link Token#length()} == 0) ! * in case the language infrastructure performs a poll for all embedded ! * languages for the * * @param languagePath non-null language path at which the language embedding * is being created. It may be used for obtaining appropriate information *************** *** 214,220 **** * @return language embedding instance or null if there is no language embedding * for this token. */ ! protected LanguageEmbedding embedding(Token token, boolean tokenComplete, LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No extra hardcoded embedding by default } --- 215,221 ---- * @return language embedding instance or null if there is no language embedding * for this token. */ ! protected LanguageEmbedding embedding(Token token, LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No extra hardcoded embedding by default } *************** *** 309,331 **** return languageHierarchy.createTokenCategories(); } ! public String mimeType(LanguageHierarchy languageHierarchy) { return languageHierarchy.mimeType(); } ! public LanguageOperation operation(LanguageHierarchy languageHierarchy) { return languageHierarchy.operation; } ! @SuppressWarnings("unchecked") ! public LanguageEmbedding embedding(LanguageHierarchy languageHierarchy, ! Token token, boolean tokenComplete, ! LanguagePath languagePath, InputAttributes inputAttributes) { ! return languageHierarchy.embedding(token, tokenComplete, languagePath, inputAttributes); } ! @SuppressWarnings("unchecked") ! public Lexer createLexer(LanguageHierarchy languageHierarchy, LexerRestartInfo info) { return languageHierarchy.createLexer(info); } --- 310,330 ---- return languageHierarchy.createTokenCategories(); } ! public String mimeType(LanguageHierarchy languageHierarchy) { return languageHierarchy.mimeType(); } ! public LanguageOperation operation(LanguageHierarchy languageHierarchy) { return languageHierarchy.operation; } ! public LanguageEmbedding embedding(LanguageHierarchy languageHierarchy, ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { ! return languageHierarchy.embedding(token, languagePath, inputAttributes); } ! public Lexer createLexer( ! LanguageHierarchy languageHierarchy, LexerRestartInfo info) { return languageHierarchy.createLexer(info); } *************** *** 335,347 **** return new LexerRestartInfo(input, tokenFactory, state, languagePath, inputAttributes); } ! @SuppressWarnings("unchecked") ! public TokenValidator createTokenValidator(LanguageHierarchy languageHierarchy, TokenId id) { return languageHierarchy.createTokenValidator(id); } ! @SuppressWarnings("unchecked") ! public boolean isRetainTokenText(LanguageHierarchy languageHierarchy, TokenId id) { return languageHierarchy.isRetainTokenText(id); } --- 334,346 ---- return new LexerRestartInfo(input, tokenFactory, state, languagePath, inputAttributes); } ! public TokenValidator createTokenValidator( ! LanguageHierarchy languageHierarchy, T id) { return languageHierarchy.createTokenValidator(id); } ! public boolean isRetainTokenText( ! LanguageHierarchy languageHierarchy, T id) { return languageHierarchy.isRetainTokenText(id); } *************** *** 365,385 **** return mti.language(); } ! public CharSequence text(MutableTextInput mti) { return mti.text(); } ! public InputAttributes inputAttributes(MutableTextInput mti) { return mti.inputAttributes(); } ! public Object inputSource(MutableTextInput mti) { return mti.inputSource(); } ! @SuppressWarnings("unchecked") ! public TokenFactory createTokenFactory(LexerInputOperation lexerInputOperation) { ! return new TokenFactory(lexerInputOperation); } } --- 364,389 ---- return mti.language(); } ! public LanguageEmbedding createLanguageEmbedding( ! Language language, int startSkipLength, int endSkipLength, boolean joinSections) { ! return new LanguageEmbedding(language, startSkipLength, endSkipLength, joinSections); ! } ! ! public CharSequence text(MutableTextInput mti) { return mti.text(); } ! public InputAttributes inputAttributes(MutableTextInput mti) { return mti.inputAttributes(); } ! public I inputSource(MutableTextInput mti) { return mti.inputSource(); } ! public TokenFactory createTokenFactory( ! LexerInputOperation lexerInputOperation) { ! return new TokenFactory(lexerInputOperation); } } Index: lexer/src/org/netbeans/spi/lexer/LanguageProvider.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/spi/lexer/LanguageProvider.java,v retrieving revision 1.5 diff -c -r1.5 LanguageProvider.java *** lexer/src/org/netbeans/spi/lexer/LanguageProvider.java 26 Oct 2006 20:45:25 -0000 1.5 --- lexer/src/org/netbeans/spi/lexer/LanguageProvider.java 28 Nov 2006 14:20:18 -0000 *************** *** 80,86 **** * embedded in a given token. * *

    If a Token contains text in a different language that could ! * further be used for lexing this Token the framework will try * to find out the Language of that language by asking * the Token's own Language first and then * by consulting registered LanguageProviders. The LanguageProviders --- 80,86 ---- * embedded in a given token. * *

    If a Token contains text in a different language that could ! * further be used for lexing of this Token the framework will try * to find out the Language of that language by asking * the Token's own Language first and then * by consulting registered LanguageProviders. The LanguageProviders *************** *** 88,98 **** * care about and null for the rest. The first non-null * LanguageEmbedding found will be used. * - * - * @param tokenLanguage The LanguagePath of the token, which - * embedded language should be returned. * @param token The Token to get the Language * for. * @param inputAttributes The attributes that could affect the creation of * the embedded Language. It may be null * if there are no extra attributes. --- 88,97 ---- * care about and null for the rest. The first non-null * LanguageEmbedding found will be used. * * @param token The Token to get the Language * for. + * @param languagePath The LanguagePath of the token, which + * embedded language should be returned. * @param inputAttributes The attributes that could affect the creation of * the embedded Language. It may be null * if there are no extra attributes. *************** *** 101,108 **** * or null if the token can't embedd any language * or the token is unknown to this LanguageProvider. */ ! public abstract LanguageEmbedding findLanguageEmbedding( ! LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes); /** * Add a listener for change notifications. --- 100,107 ---- * or null if the token can't embedd any language * or the token is unknown to this LanguageProvider. */ ! public abstract LanguageEmbedding findLanguageEmbedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes); /** * Add a listener for change notifications. Index: lexer/src/org/netbeans/spi/lexer/TokenFactory.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/spi/lexer/TokenFactory.java,v retrieving revision 1.3 diff -c -r1.3 TokenFactory.java *** lexer/src/org/netbeans/spi/lexer/TokenFactory.java 11 Oct 2006 14:30:12 -0000 1.3 --- lexer/src/org/netbeans/spi/lexer/TokenFactory.java 28 Nov 2006 14:20:18 -0000 *************** *** 59,67 **** "" // empty skip token text NOI18N ); ! private final LexerInputOperation operation; ! TokenFactory(LexerInputOperation operation) { this.operation = operation; } --- 59,67 ---- "" // empty skip token text NOI18N ); ! private final LexerInputOperation operation; ! TokenFactory(LexerInputOperation operation) { this.operation = operation; } *************** *** 150,157 **** if (operation.tokenRecognized(text.length(), false)) { // Create preprocessed token return new PreprocessedTextToken(id, operation.tokenLength()); } else if (operation.isFlyTokenAllowed()) { ! @SuppressWarnings("unchecked") // NOI18N ! LanguageOperation langOp = (LanguageOperation)operation.languageOperation(); return langOp.getFlyweightToken(id, text); } else { // return non-flyweight token return new DefaultToken(id, operation.tokenLength()); --- 150,156 ---- if (operation.tokenRecognized(text.length(), false)) { // Create preprocessed token return new PreprocessedTextToken(id, operation.tokenLength()); } else if (operation.isFlyTokenAllowed()) { ! LanguageOperation langOp = operation.languageOperation(); return langOp.getFlyweightToken(id, text); } else { // return non-flyweight token return new DefaultToken(id, operation.tokenLength()); Index: lexer/src/org/netbeans/spi/lexer/TokenHierarchyControl.java =================================================================== RCS file: /cvs/lexer/src/org/netbeans/spi/lexer/TokenHierarchyControl.java,v retrieving revision 1.4 diff -c -r1.4 TokenHierarchyControl.java *** lexer/src/org/netbeans/spi/lexer/TokenHierarchyControl.java 26 Oct 2006 20:45:25 -0000 1.4 --- lexer/src/org/netbeans/spi/lexer/TokenHierarchyControl.java 28 Nov 2006 14:20:18 -0000 *************** *** 35,51 **** private MutableTextInput input; ! private TokenHierarchyOperation operation; TokenHierarchyControl(MutableTextInput input) { this.input = input; } ! private void init() { Language language = input.language(); if (language != null) { ! this.operation = new TokenHierarchyOperation(input, language); } } public synchronized TokenHierarchy tokenHierarchy() { --- 35,55 ---- private MutableTextInput input; ! private TokenHierarchyOperation operation; TokenHierarchyControl(MutableTextInput input) { this.input = input; } ! private void init() { Language language = input.language(); if (language != null) { ! this.operation = createOperation(language); } + } + + private TokenHierarchyOperation createOperation(Language language) { + return new TokenHierarchyOperation(input, language); } public synchronized TokenHierarchy tokenHierarchy() { Index: lexer/test/unit/src/org/netbeans/api/lexer/InputAttributesTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/api/lexer/InputAttributesTest.java,v retrieving revision 1.3 diff -c -r1.3 InputAttributesTest.java *** lexer/test/unit/src/org/netbeans/api/lexer/InputAttributesTest.java 18 Oct 2006 16:19:28 -0000 1.3 --- lexer/test/unit/src/org/netbeans/api/lexer/InputAttributesTest.java 28 Nov 2006 14:20:18 -0000 *************** *** 63,70 **** String text = "public static private"; // Default version recognizes "static" keyword ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.PUBLIC, "public", 0); assertTrue(ts.moveNext()); --- 63,70 ---- String text = "public static private"; // Default version recognizes "static" keyword ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.PUBLIC, "public", 0); assertTrue(ts.moveNext()); Index: lexer/test/unit/src/org/netbeans/api/lexer/TokenSequenceTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/api/lexer/TokenSequenceTest.java,v retrieving revision 1.4 diff -c -r1.4 TokenSequenceTest.java *** lexer/test/unit/src/org/netbeans/api/lexer/TokenSequenceTest.java 18 Oct 2006 16:19:28 -0000 1.4 --- lexer/test/unit/src/org/netbeans/api/lexer/TokenSequenceTest.java 28 Nov 2006 14:20:18 -0000 *************** *** 45,52 **** public void testMove() { String text = "abc+defg"; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); // Fail if no "move*" method called yet try { --- 45,52 ---- public void testMove() { String text = "abc+defg"; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); // Fail if no "move*" method called yet try { *************** *** 87,93 **** // Test subsequences ! TokenSequence sub = ts.subSequence(1, 6); assertTrue(sub.moveNext()); LexerTestUtilities.assertTokenEquals(sub, SimpleTokenId.IDENTIFIER, "abc", 0); assertTrue(sub.moveNext()); --- 87,93 ---- // Test subsequences ! TokenSequence sub = ts.subSequence(1, 6); assertTrue(sub.moveNext()); LexerTestUtilities.assertTokenEquals(sub, SimpleTokenId.IDENTIFIER, "abc", 0); assertTrue(sub.moveNext()); *************** *** 117,125 **** assertTrue(skipTokenIds.contains(SimpleTokenId.MINUS)); assertFalse(skipTokenIds.contains(SimpleTokenId.IDENTIFIER)); ! TokenHierarchy hi = TokenHierarchy.create(text, false, SimpleTokenId.language(), skipTokenIds, null); ! TokenSequence ts = hi.tokenSequence(); // Fail if no "move*" method called yet try { --- 117,125 ---- assertTrue(skipTokenIds.contains(SimpleTokenId.MINUS)); assertFalse(skipTokenIds.contains(SimpleTokenId.IDENTIFIER)); ! TokenHierarchy hi = TokenHierarchy.create(text, false, SimpleTokenId.language(), skipTokenIds, null); ! TokenSequence ts = hi.tokenSequence(); // Fail if no "move*" method called yet try { *************** *** 165,171 **** // Test subsequences ! TokenSequence sub = ts.subSequence(1, 6); assertTrue(sub.moveNext()); LexerTestUtilities.assertTokenEquals(sub, SimpleTokenId.IDENTIFIER, "abc", 1); assertTrue(sub.moveNext()); --- 165,171 ---- // Test subsequences ! TokenSequence sub = ts.subSequence(1, 6); assertTrue(sub.moveNext()); LexerTestUtilities.assertTokenEquals(sub, SimpleTokenId.IDENTIFIER, "abc", 1); assertTrue(sub.moveNext()); *************** *** 228,235 **** Reader r = new StringReader(text); try { ! TokenHierarchy hi = TokenHierarchy.create(r, SimpleTokenId.language(), skipTokenIds, null); ! TokenSequence ts = hi.tokenSequence(); ts.tokenCount(); } finally { --- 228,235 ---- Reader r = new StringReader(text); try { ! TokenHierarchy hi = TokenHierarchy.create(r, SimpleTokenId.language(), skipTokenIds, null); ! TokenSequence ts = hi.tokenSequence(); ts.tokenCount(); } finally { *************** *** 239,246 **** public void testMoveEmpty() { String text = ""; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); // Expect no tokens assertFalse(ts.moveNext()); --- 239,246 ---- public void testMoveEmpty() { String text = ""; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); // Expect no tokens assertFalse(ts.moveNext()); *************** *** 256,262 **** assertEquals(ts.move(10), Integer.MAX_VALUE); // Test subsequences ! TokenSequence sub = ts.subSequence(1, 6); assertFalse(sub.moveNext()); sub = ts.subSequence(1, 6); assertEquals(Integer.MAX_VALUE, sub.move(1)); --- 256,262 ---- assertEquals(ts.move(10), Integer.MAX_VALUE); // Test subsequences ! TokenSequence sub = ts.subSequence(1, 6); assertFalse(sub.moveNext()); sub = ts.subSequence(1, 6); assertEquals(Integer.MAX_VALUE, sub.move(1)); *************** *** 274,281 **** public void testTokenSize() { String text = "abc+"; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); --- 274,281 ---- public void testTokenSize() { String text = "abc+"; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); Index: lexer/test/unit/src/org/netbeans/lib/lexer/BranchTokenListTest.java =================================================================== RCS file: lexer/test/unit/src/org/netbeans/lib/lexer/BranchTokenListTest.java diff -N lexer/test/unit/src/org/netbeans/lib/lexer/BranchTokenListTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/BranchTokenListTest.java 26 Oct 2006 20:45:25 -0000 1.4 --- /dev/null 1 Jan 1970 00:00:00 -0000 *************** *** 1,165 **** - /* - * Sun Public License Notice - * - * The contents of this file are subject to the Sun Public License - * Version 1.0 (the "License"). You may not use this file except in - * compliance with the License. A copy of the License is available at - * http://www.sun.com/ - * - * The Original Code is NetBeans. The Initial Developer of the Original - * Code is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun - * Microsystems, Inc. All Rights Reserved. - */ - package org.netbeans.lib.lexer; - - import javax.swing.text.Document; - import javax.swing.text.PlainDocument; - import junit.framework.TestCase; - import org.netbeans.api.lexer.Language; - import org.netbeans.api.lexer.TokenHierarchy; - import org.netbeans.api.lexer.TokenSequence; - import org.netbeans.lib.lexer.test.LexerTestUtilities; - import org.netbeans.lib.lexer.test.simple.SimpleJavadocTokenId; - import org.netbeans.lib.lexer.test.simple.SimpleTokenId; - - /** - * - * @author Jan Lahoda - */ - public class BranchTokenListTest extends TestCase { - - public BranchTokenListTest(String testName) { - super(testName); - } - - protected void setUp() throws Exception { - } - - public void testUpdateStartOffset() throws Exception { - Document d = new PlainDocument(); - - d.putProperty(Language.class, SimpleTokenId.language()); - - d.insertString(0, "ident ident /** @see X */", null); - - TokenHierarchy h = TokenHierarchy.get(d); - TokenSequence ts = h.tokenSequence(); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); - assertEquals(0, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); - assertEquals(5, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); - assertEquals(6, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); - assertEquals(11, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); - assertEquals(12, ts.offset()); - - TokenSequence inner = ts.embedded(); - - assertNotNull(inner); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(15, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.TAG, "@see"); - assertEquals(16, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(20, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.IDENT, "X"); - assertEquals(21, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(22, inner.offset()); - } - - public void testSnapshots() throws Exception { - Document d = new PlainDocument(); - - d.putProperty(Language.class, SimpleTokenId.language()); - - d.insertString(0, "ident ident /** @see X */", null); - - TokenHierarchy h = TokenHierarchy.get(d); - TokenSequence ts = h.tokenSequence(); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); - assertEquals(0, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); - assertEquals(5, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); - assertEquals(6, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); - assertEquals(11, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); - assertEquals(12, ts.offset()); - - TokenSequence inner = ts.embedded(); - - assertNotNull(inner); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(15, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.TAG, "@see"); - assertEquals(16, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(20, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.IDENT, "X"); - assertEquals(21, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(22, inner.offset()); - - - h = TokenHierarchy.get(d).createSnapshot(); - ts = h.tokenSequence(); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); - assertEquals(0, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); - assertEquals(5, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); - assertEquals(6, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); - assertEquals(11, ts.offset()); - - LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); - assertEquals(12, ts.offset()); - - inner = ts.embedded(); - - assertNotNull(inner); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(15, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.TAG, "@see"); - assertEquals(16, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(20, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.IDENT, "X"); - assertEquals(21, inner.offset()); - - LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); - assertEquals(22, inner.offset()); - } - } --- 0 ---- Index: lexer/test/unit/src/org/netbeans/lib/lexer/EmbeddedTokenListTest.java =================================================================== RCS file: lexer/test/unit/src/org/netbeans/lib/lexer/EmbeddedTokenListTest.java diff -N lexer/test/unit/src/org/netbeans/lib/lexer/EmbeddedTokenListTest.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/test/unit/src/org/netbeans/lib/lexer/EmbeddedTokenListTest.java 28 Nov 2006 14:20:18 -0000 *************** *** 0 **** --- 1,166 ---- + /* + * Sun Public License Notice + * + * The contents of this file are subject to the Sun Public License + * Version 1.0 (the "License"). You may not use this file except in + * compliance with the License. A copy of the License is available at + * http://www.sun.com/ + * + * The Original Code is NetBeans. The Initial Developer of the Original + * Code is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun + * Microsystems, Inc. All Rights Reserved. + */ + package org.netbeans.lib.lexer; + + import javax.swing.text.Document; + import javax.swing.text.PlainDocument; + import junit.framework.TestCase; + import org.netbeans.api.lexer.Language; + import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.api.lexer.TokenSequence; + import org.netbeans.lib.lexer.test.LexerTestUtilities; + import org.netbeans.lib.lexer.test.simple.SimpleJavadocTokenId; + import org.netbeans.lib.lexer.test.simple.SimpleTokenId; + + /** + * + * @author Jan Lahoda + */ + public class EmbeddedTokenListTest extends TestCase { + + public EmbeddedTokenListTest(String testName) { + super(testName); + } + + protected void setUp() throws Exception { + } + + public void testUpdateStartOffset() throws Exception { + Document d = new PlainDocument(); + + d.putProperty(Language.class, SimpleTokenId.language()); + + d.insertString(0, "ident ident /** @see X */", null); + + TokenHierarchy h = TokenHierarchy.get(d); + TokenSequence ts = h.tokenSequence(); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); + assertEquals(0, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); + assertEquals(5, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); + assertEquals(6, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); + assertEquals(11, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); + assertEquals(12, ts.offset()); + + TokenSequence inner = ts.embedded(); + + assertNotNull(inner); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(15, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.TAG, "@see"); + assertEquals(16, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(20, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.IDENT, "X"); + assertEquals(21, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(22, inner.offset()); + } + + public void testSnapshots() throws Exception { + Document d = new PlainDocument(); + + d.putProperty(Language.class, SimpleTokenId.language()); + + d.insertString(0, "ident ident /** @see X */", null); + + TokenHierarchy h = TokenHierarchy.get(d); + TokenSequence ts = h.tokenSequence(); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); + assertEquals(0, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); + assertEquals(5, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); + assertEquals(6, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); + assertEquals(11, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); + assertEquals(12, ts.offset()); + + TokenSequence inner = ts.embedded(); + + assertNotNull(inner); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(15, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.TAG, "@see"); + assertEquals(16, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(20, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.IDENT, "X"); + assertEquals(21, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(22, inner.offset()); + + + h = TokenHierarchy.get(d).createSnapshot(); + ts = h.tokenSequence(); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); + assertEquals(0, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); + assertEquals(5, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); + assertEquals(6, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.WHITESPACE, " "); + assertEquals(11, ts.offset()); + + LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); + assertEquals(12, ts.offset()); + + inner = ts.embedded(); + + assertNotNull(inner); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(15, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.TAG, "@see"); + assertEquals(16, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(20, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.IDENT, "X"); + assertEquals(21, inner.offset()); + + LexerTestUtilities.assertNextTokenEquals(inner, SimpleJavadocTokenId.OTHER_TEXT, " "); + assertEquals(22, inner.offset()); + } + } Index: lexer/test/unit/src/org/netbeans/lib/lexer/inc/SnapshotTokenListTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/inc/SnapshotTokenListTest.java,v retrieving revision 1.4 diff -c -r1.4 SnapshotTokenListTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/inc/SnapshotTokenListTest.java 26 Oct 2006 20:45:26 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/inc/SnapshotTokenListTest.java 28 Nov 2006 14:20:18 -0000 *************** *** 15,20 **** --- 15,21 ---- import javax.swing.text.Document; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.junit.NbTestCase; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 42,49 **** d.insertString(0, "ident ident /** @see X */", null); ! TokenHierarchy h = TokenHierarchy.get(d).createSnapshot(); ! TokenSequence ts = h.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); assertEquals(0, ts.offset()); --- 43,50 ---- d.insertString(0, "ident ident /** @see X */", null); ! TokenHierarchy h = TokenHierarchy.get(d).createSnapshot(); ! TokenSequence ts = h.tokenSequence(); LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ident"); assertEquals(0, ts.offset()); *************** *** 60,66 **** LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); assertEquals(12, ts.offset()); ! TokenSequence inner = ts.embedded(); assertNotNull(inner); --- 61,67 ---- LexerTestUtilities.assertNextTokenEquals(ts, SimpleTokenId.JAVADOC_COMMENT, "/** @see X */"); assertEquals(12, ts.offset()); ! TokenSequence inner = ts.embedded(); assertNotNull(inner); Index: lexer/test/unit/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventTest.java =================================================================== RCS file: lexer/test/unit/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventTest.java diff -N lexer/test/unit/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventTest.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/test/unit/src/org/netbeans/lib/lexer/inc/TokenHierarchyEventTest.java 28 Nov 2006 14:20:18 -0000 *************** *** 0 **** --- 1,157 ---- + /* + * Sun Public License Notice + * + * The contents of this file are subject to the Sun Public License + * Version 1.0 (the "License"). You may not use this file except in + * compliance with the License. A copy of the License is available at + * http://www.sun.com/ + * + * The Original Code is NetBeans. The Initial Developer of the Original + * Code is Sun Microsystems, Inc. Portions Copyright 1997-2000 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.lexer.inc; + + import javax.swing.text.Document; + import org.netbeans.api.lexer.Language; + import org.netbeans.api.lexer.TokenChange; + import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenHierarchyEvent; + import org.netbeans.api.lexer.TokenHierarchyListener; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.api.lexer.TokenSequence; + import org.netbeans.junit.NbTestCase; + import org.netbeans.lib.lexer.test.LexerTestUtilities; + import org.netbeans.lib.lexer.test.ModificationTextDocument; + import org.netbeans.lib.lexer.test.simple.SimplePlainTokenId; + import org.netbeans.lib.lexer.test.simple.SimpleTokenId; + import org.netbeans.spi.lexer.LanguageEmbedding; + + /** + * Test several simple lexer impls. + * + * @author mmetelka + */ + public class TokenHierarchyEventTest extends NbTestCase { + + public TokenHierarchyEventTest(String testName) { + super(testName); + } + + protected void setUp() throws java.lang.Exception { + } + + protected void tearDown() throws java.lang.Exception { + } + + public void testCreateEmbedding() throws Exception { + Document doc = new ModificationTextDocument(); + String text = "abc def ghi"; + doc.insertString(0, text, null); + // Assign a language to the document + doc.putProperty(Language.class, SimpleTokenId.language()); + TokenHierarchy hi = TokenHierarchy.get(doc); + THListener listener = new THListener(); + hi.addTokenHierarchyListener(listener); + TokenSequence ts = hi.tokenSequence(); + + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.WHITESPACE, " ", 3); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "def", 4); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.WHITESPACE, " ", 7); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "ghi", 8); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.WHITESPACE, " ", 11); + assertTrue(ts.moveNext()); + + // Do insert + doc.insertString(5, "x", null); + + // Check the fired event + TokenHierarchyEvent evt = listener.fetchLastEvent(); + assertNotNull(evt); + TokenChange tc = evt.tokenChange(); + assertNotNull(tc); + assertEquals(2, tc.index()); + assertEquals(15, tc.offset()); + assertEquals(0, tc.addedTokenCount()); + assertEquals(0, tc.removedTokenCount()); + assertEquals(SimpleTokenId.language(), tc.language()); + assertEquals(1, tc.embeddedChangeCount()); + TokenChange etc = tc.embeddedChange(0); + assertEquals(0, etc.index()); + assertEquals(18, etc.offset()); + assertEquals(0, etc.addedTokenCount()); // 0 to allow for lazy lexing where this would be unknowns + assertEquals(0, etc.removedTokenCount()); + assertEquals(SimpleTokenId.language(), etc.language()); + assertEquals(0, etc.embeddedChangeCount()); + + // Test the contents of the embedded sequence + TokenSequence ets = ts.embedded(); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "line", 18); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.WHITESPACE, " ", 22); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "comment", 23); + assertFalse(ets.moveNext()); + + // Move main TS back and try extra embedding on comment + assertTrue(ts.movePrevious()); + assertTrue(ts.createEmbedding(SimpleTokenId.language(), 2, 2)); + ets = ts.embedded(); // Should be the explicit one + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "def", 5); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.WHITESPACE, " ", 8); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "ghi", 9); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.WHITESPACE, " ", 12); + assertFalse(ets.moveNext()); + + // Get the default embedding - should be available as well + ets = ts.embedded(SimplePlainTokenId.language()); // Should be the explicit one + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WORD, "def", 5); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WHITESPACE, " ", 8); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WORD, "ghi", 9); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WHITESPACE, " ", 12); + assertFalse(ets.moveNext()); + } + + public void testEmbeddingCaching() throws Exception { + LanguageEmbedding e = LanguageEmbedding.create(SimpleTokenId.language(), 2, 1); + assertSame(SimpleTokenId.language(), e.language()); + assertSame(2, e.startSkipLength()); + assertSame(1, e.endSkipLength()); + LanguageEmbedding e2 = LanguageEmbedding.create(SimpleTokenId.language(), 2, 1); + assertSame(e, e2); + } + + private static final class THListener implements TokenHierarchyListener { + + private TokenHierarchyEvent lastEvent; + + public void tokenHierarchyChanged(TokenHierarchyEvent evt) { + this.lastEvent = evt; + } + + public TokenHierarchyEvent fetchLastEvent() { + TokenHierarchyEvent evt = lastEvent; + lastEvent = null; + return evt; + } + + } + + } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/LexerTestUtilities.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/LexerTestUtilities.java,v retrieving revision 1.4 diff -c -r1.4 LexerTestUtilities.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/LexerTestUtilities.java 7 Nov 2006 16:16:24 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/LexerTestUtilities.java 28 Nov 2006 14:20:18 -0000 *************** *** 57,63 **** /** * @see #assertTokenEquals(String, TokenSequence, TokenId, String, int) */ ! public static void assertTokenEquals(TokenSequence ts, TokenId id, String text, int offset) { assertTokenEquals(null, ts, id, text, offset); } --- 57,63 ---- /** * @see #assertTokenEquals(String, TokenSequence, TokenId, String, int) */ ! public static void assertTokenEquals(TokenSequence ts, TokenId id, String text, int offset) { assertTokenEquals(null, ts, id, text, offset); } *************** *** 67,75 **** * * @param offset expected offset. It may be -1 to prevent offset testing. */ ! public static void assertTokenEquals(String message, TokenSequence ts, TokenId id, String text, int offset) { message = messagePrefix(message); ! Token t = ts.token(); TokenId tId = t.id(); TestCase.assertEquals(message + "Invalid token.id()", id, tId); CharSequence tText = t.text(); --- 67,75 ---- * * @param offset expected offset. It may be -1 to prevent offset testing. */ ! public static void assertTokenEquals(String message, TokenSequence ts, TokenId id, String text, int offset) { message = messagePrefix(message); ! Token t = ts.token(); TokenId tId = t.id(); TestCase.assertEquals(message + "Invalid token.id()", id, tId); CharSequence tText = t.text(); *************** *** 92,103 **** } } ! public static void assertTokenEquals(TokenSequence ts, TokenId id, String text, int offset, int lookahead, Object state) { assertTokenEquals(null, ts, id, text, offset, lookahead, state); } ! public static void assertTokenEquals(String message, TokenSequence ts, TokenId id, String text, int offset, int lookahead, Object state) { assertTokenEquals(message, ts, id, text, offset); --- 92,103 ---- } } ! public static void assertTokenEquals(TokenSequence ts, TokenId id, String text, int offset, int lookahead, Object state) { assertTokenEquals(null, ts, id, text, offset, lookahead, state); } ! public static void assertTokenEquals(String message, TokenSequence ts, TokenId id, String text, int offset, int lookahead, Object state) { assertTokenEquals(message, ts, id, text, offset); *************** *** 133,143 **** /** * Assert that the next token in the token sequence */ ! public static void assertNextTokenEquals(TokenSequence ts, TokenId id, String text) { assertNextTokenEquals(null, ts, id, text); } ! public static void assertNextTokenEquals(String message, TokenSequence ts, TokenId id, String text) { String messagePrefix = messagePrefix(message); TestCase.assertTrue(messagePrefix + "No next token available", ts.moveNext()); assertTokenEquals(message, ts, id, text, -1); --- 133,143 ---- /** * Assert that the next token in the token sequence */ ! public static void assertNextTokenEquals(TokenSequence ts, TokenId id, String text) { assertNextTokenEquals(null, ts, id, text); } ! public static void assertNextTokenEquals(String message, TokenSequence ts, TokenId id, String text) { String messagePrefix = messagePrefix(message); TestCase.assertTrue(messagePrefix + "No next token available", ts.moveNext()); assertTokenEquals(message, ts, id, text, -1); *************** *** 147,154 **** * @see #assertTokenSequencesEqual(String,TokenSequence,TokenHierarchy,TokenSequence,TokenHierarchy,boolean) */ public static void assertTokenSequencesEqual( ! TokenSequence expected, TokenHierarchy expectedHi, ! TokenSequence actual, TokenHierarchy actualHi, boolean testLookaheadAndState) { assertTokenSequencesEqual(null, expected, expectedHi, actual, actualHi, testLookaheadAndState); } --- 147,154 ---- * @see #assertTokenSequencesEqual(String,TokenSequence,TokenHierarchy,TokenSequence,TokenHierarchy,boolean) */ public static void assertTokenSequencesEqual( ! TokenSequence expected, TokenHierarchy expectedHi, ! TokenSequence actual, TokenHierarchy actualHi, boolean testLookaheadAndState) { assertTokenSequencesEqual(null, expected, expectedHi, actual, actualHi, testLookaheadAndState); } *************** *** 170,177 **** * be false because snapshots do not hold lookaheads and states. */ public static void assertTokenSequencesEqual(String message, ! TokenSequence expected, TokenHierarchy expectedHi, ! TokenSequence actual, TokenHierarchy actualHi, boolean testLookaheadAndState) { boolean success = false; try { --- 170,177 ---- * be false because snapshots do not hold lookaheads and states. */ public static void assertTokenSequencesEqual(String message, ! TokenSequence expected, TokenHierarchy expectedHi, ! TokenSequence actual, TokenHierarchy actualHi, boolean testLookaheadAndState) { boolean success = false; try { *************** *** 192,201 **** } private static void assertTokensEqual(String message, ! TokenSequence ts, TokenHierarchy tokenHierarchy, ! TokenSequence ts2, TokenHierarchy tokenHierarchy2, boolean testLookaheadAndState) { ! Token t = ts.token(); ! Token t2 = ts2.token(); message = messagePrefix(message); TestCase.assertEquals(message + "Invalid token id", t.id(), t2.id()); --- 192,201 ---- } private static void assertTokensEqual(String message, ! TokenSequence ts, TokenHierarchy tokenHierarchy, ! TokenSequence ts2, TokenHierarchy tokenHierarchy2, boolean testLookaheadAndState) { ! Token t = ts.token(); ! Token t2 = ts2.token(); message = messagePrefix(message); TestCase.assertEquals(message + "Invalid token id", t.id(), t2.id()); *************** *** 218,224 **** * @param ts non-null token sequence. * @return number of flyweight tokens in the token sequence. */ ! public static int flyweightTokenCount(TokenSequence ts) { int flyTokenCount = 0; ts.moveIndex(0); while (ts.moveNext()) { --- 218,224 ---- * @param ts non-null token sequence. * @return number of flyweight tokens in the token sequence. */ ! public static int flyweightTokenCount(TokenSequence ts) { int flyTokenCount = 0; ts.moveIndex(0); while (ts.moveNext()) { *************** *** 237,243 **** * @return number of characters contained in the flyweight tokens * in the token sequence. */ ! public static int flyweightTextLength(TokenSequence ts) { int flyTokenTextLength = 0; ts.moveIndex(0); while (ts.moveNext()) { --- 237,243 ---- * @return number of characters contained in the flyweight tokens * in the token sequence. */ ! public static int flyweightTextLength(TokenSequence ts) { int flyTokenTextLength = 0; ts.moveIndex(0); while (ts.moveNext()) { *************** *** 255,261 **** * @return non-null list containing number of the flyweight tokens that have the length * equal to the index in the list. */ ! public static List flyweightDistribution(TokenSequence ts) { List distribution = new ArrayList(); ts.moveIndex(0); while (ts.moveNext()) { --- 255,261 ---- * @return non-null list containing number of the flyweight tokens that have the length * equal to the index in the list. */ ! public static List flyweightDistribution(TokenSequence ts) { List distribution = new ArrayList(); ts.moveIndex(0); while (ts.moveNext()) { *************** *** 300,308 **** } public static void incCheck(Document doc, boolean nested) { ! TokenHierarchy thInc = TokenHierarchy.get(doc); ! @SuppressWarnings("unchecked") ! Language language = (Language) doc.getProperty(Language.class); String docText = null; try { --- 300,307 ---- } public static void incCheck(Document doc, boolean nested) { ! TokenHierarchy thInc = TokenHierarchy.get(doc); ! Language language = (Language) doc.getProperty(Language.class); String docText = null; try { *************** *** 311,317 **** e.printStackTrace(); TestCase.fail("BadLocationException occurred"); } ! TokenHierarchy thBatch = TokenHierarchy.create(docText, language); boolean success = false; try { // Compare lookaheads and states as well --- 310,316 ---- e.printStackTrace(); TestCase.fail("BadLocationException occurred"); } ! TokenHierarchy thBatch = TokenHierarchy.create(docText, language); boolean success = false; try { // Compare lookaheads and states as well *************** *** 321,327 **** } finally { if (!success) { System.err.println("BATCH token sequence dump:\n" + thBatch.tokenSequence()); ! TokenHierarchy lastHi = (TokenHierarchy)doc.getProperty(LAST_TOKEN_HIERARCHY); if (lastHi != null) { System.err.println("PREVIOUS token sequence dump:\n" + lastHi.tokenSequence()); } --- 320,326 ---- } finally { if (!success) { System.err.println("BATCH token sequence dump:\n" + thBatch.tokenSequence()); ! TokenHierarchy lastHi = (TokenHierarchy)doc.getProperty(LAST_TOKEN_HIERARCHY); if (lastHi != null) { System.err.println("PREVIOUS token sequence dump:\n" + lastHi.tokenSequence()); } *************** *** 329,335 **** } // Check the change since last modification ! TokenHierarchy lastHi = (TokenHierarchy)doc.getProperty(LAST_TOKEN_HIERARCHY); if (lastHi != null) { // TODO comparison } --- 328,334 ---- } // Check the change since last modification ! TokenHierarchy lastHi = (TokenHierarchy)doc.getProperty(LAST_TOKEN_HIERARCHY); if (lastHi != null) { // TODO comparison } *************** *** 349,355 **** *
    * The method uses reflection to get reference to tokenList field in token sequence. */ ! public static int lookahead(TokenSequence ts) { return tokenList(ts).lookahead(ts.index()); } --- 348,354 ---- *
    * The method uses reflection to get reference to tokenList field in token sequence. */ ! public static int lookahead(TokenSequence ts) { return tokenList(ts).lookahead(ts.index()); } *************** *** 358,364 **** *
    * The method uses reflection to get reference to tokenList field in token sequence. */ ! public static Object state(TokenSequence ts) { return tokenList(ts).state(ts.index()); } --- 357,363 ---- *
    * The method uses reflection to get reference to tokenList field in token sequence. */ ! public static Object state(TokenSequence ts) { return tokenList(ts).state(ts.index()); } *************** *** 411,423 **** /** * Get token list from the given token sequence for testing purposes. */ ! public static TokenList tokenList(TokenSequence ts) { try { if (tokenListField == null) { tokenListField = ts.getClass().getDeclaredField("tokenList"); tokenListField.setAccessible(true); } ! return (TokenList)tokenListField.get(ts); } catch (Exception e) { TestCase.fail(e.getMessage()); return null; // never reached --- 410,424 ---- /** * Get token list from the given token sequence for testing purposes. */ ! public static TokenList tokenList(TokenSequence ts) { try { if (tokenListField == null) { tokenListField = ts.getClass().getDeclaredField("tokenList"); tokenListField.setAccessible(true); } ! @SuppressWarnings("unchecked") ! TokenList tl = (TokenList)tokenListField.get(ts); ! return tl; } catch (Exception e) { TestCase.fail(e.getMessage()); return null; // never reached Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/TestRandomModify.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/TestRandomModify.java,v retrieving revision 1.3 diff -c -r1.3 TestRandomModify.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/TestRandomModify.java 26 Oct 2006 20:45:26 -0000 1.3 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/TestRandomModify.java 28 Nov 2006 14:20:19 -0000 *************** *** 249,256 **** junit.framework.TestCase.fail(); TokenHierarchy hi = TokenHierarchy.get(doc); TokenHierarchy snapshot = hi.createSnapshot(); ! @SuppressWarnings("unchecked") ! Language language = (Language) doc.getProperty(Language.class); TokenHierarchy batchMirror = TokenHierarchy.create(doc.getText(0, doc.getLength()), language); snapshots.add(new SnapshotDescription(snapshot, batchMirror)); --- 249,255 ---- junit.framework.TestCase.fail(); TokenHierarchy hi = TokenHierarchy.get(doc); TokenHierarchy snapshot = hi.createSnapshot(); ! Language language = (Language) doc.getProperty(Language.class); TokenHierarchy batchMirror = TokenHierarchy.create(doc.getText(0, doc.getLength()), language); snapshots.add(new SnapshotDescription(snapshot, batchMirror)); *************** *** 303,313 **** doc.remove(0, doc.getLength()); } ! public final Language language() { ! return (Language)doc.getProperty(Language.class); } ! public final void setLanguage(Language language) { doc.putProperty(Language.class, language); } --- 302,312 ---- doc.remove(0, doc.getLength()); } ! public final Language language() { ! return (Language)doc.getProperty(Language.class); } ! public final void setLanguage(Language language) { doc.putProperty(Language.class, language); } *************** *** 318,324 **** protected void checkConsistency() throws Exception { if (!isSkipLexerConsistencyCheck()) { if (isDebugHierarchy()) { ! TokenHierarchy hi = TokenHierarchy.get(doc); if (hi != null) { System.err.println("DEBUG hierarchy:\n" + hi.tokenSequence()); } --- 317,323 ---- protected void checkConsistency() throws Exception { if (!isSkipLexerConsistencyCheck()) { if (isDebugHierarchy()) { ! TokenHierarchy hi = TokenHierarchy.get(doc); if (hi != null) { System.err.println("DEBUG hierarchy:\n" + hi.tokenSequence()); } *************** *** 328,335 **** for (int i = 0; i < snapshots.size(); i++) { SnapshotDescription sd = snapshots.get(i); ! TokenHierarchy bm = sd.batchMirror(); ! TokenHierarchy s = sd.snapshot(); if (isDebugOperation()) { System.err.println("Comparing snapshot " + i + " of " + snapshots.size()); } --- 327,334 ---- for (int i = 0; i < snapshots.size(); i++) { SnapshotDescription sd = snapshots.get(i); ! TokenHierarchy bm = sd.batchMirror(); ! TokenHierarchy s = sd.snapshot(); if (isDebugOperation()) { System.err.println("Comparing snapshot " + i + " of " + snapshots.size()); } *************** *** 342,350 **** private static final class SnapshotDescription { ! private final TokenHierarchy snapshot; ! private final TokenHierarchy batchMirror; public SnapshotDescription(TokenHierarchy snapshot, TokenHierarchy batchMirror) { this.snapshot = snapshot; --- 341,349 ---- private static final class SnapshotDescription { ! private final TokenHierarchy snapshot; ! private final TokenHierarchy batchMirror; public SnapshotDescription(TokenHierarchy snapshot, TokenHierarchy batchMirror) { this.snapshot = snapshot; Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/DocumentUpdateTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/DocumentUpdateTest.java,v retrieving revision 1.4 diff -c -r1.4 DocumentUpdateTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/DocumentUpdateTest.java 26 Oct 2006 20:45:26 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/DocumentUpdateTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 15,20 **** --- 15,21 ---- import javax.swing.text.Document; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.junit.NbTestCase; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 40,50 **** d.insertString(0, "\"\\t\\b\\t test\"", null); ! TokenHierarchy h = TokenHierarchy.get(d); h.tokenSequence().tokenCount(); ! TokenSequence s = h.tokenSequence(); assertTrue(s.moveNext()); --- 41,51 ---- d.insertString(0, "\"\\t\\b\\t test\"", null); ! TokenHierarchy h = TokenHierarchy.get(d); h.tokenSequence().tokenCount(); ! TokenSequence s = h.tokenSequence(); assertTrue(s.moveNext()); *************** *** 60,70 **** d.insertString(0, "\"\\t\\b\\b\\t sfdsffffffffff\"", null); ! TokenHierarchy h = TokenHierarchy.get(d); h.tokenSequence().tokenCount(); ! TokenSequence s = h.tokenSequence(); assertTrue(s.moveNext()); --- 61,71 ---- d.insertString(0, "\"\\t\\b\\b\\t sfdsffffffffff\"", null); ! TokenHierarchy h = TokenHierarchy.get(d); h.tokenSequence().tokenCount(); ! TokenSequence s = h.tokenSequence(); assertTrue(s.moveNext()); *************** *** 80,90 **** d.insertString(0, "\"t\"", null); ! TokenHierarchy h = TokenHierarchy.get(d); h.tokenSequence().tokenCount(); ! TokenSequence s = h.tokenSequence(); assertTrue(s.moveNext()); --- 81,91 ---- d.insertString(0, "\"t\"", null); ! TokenHierarchy h = TokenHierarchy.get(d); h.tokenSequence().tokenCount(); ! TokenSequence s = h.tokenSequence(); assertTrue(s.moveNext()); *************** *** 100,106 **** assertTrue(s.moveNext()); ! TokenSequence e = s.embedded(); assertNotNull(e); --- 101,107 ---- assertTrue(s.moveNext()); ! TokenSequence e = s.embedded(); assertNotNull(e); Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenHierarchySnapshotTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenHierarchySnapshotTest.java,v retrieving revision 1.4 diff -c -r1.4 TokenHierarchySnapshotTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenHierarchySnapshotTest.java 26 Oct 2006 20:45:26 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenHierarchySnapshotTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 17,22 **** --- 17,23 ---- import junit.framework.TestCase; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; import org.netbeans.lib.lexer.test.ModificationTextDocument; *************** *** 43,51 **** Document doc = new ModificationTextDocument(); // Assign a language to the document doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); assertNotNull("Null token hierarchy for document", hi); ! TokenSequence ts = hi.tokenSequence(); assertFalse(ts.moveNext()); // Insert text into document --- 44,52 ---- Document doc = new ModificationTextDocument(); // Assign a language to the document doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); assertNotNull("Null token hierarchy for document", hi); ! TokenSequence ts = hi.tokenSequence(); assertFalse(ts.moveNext()); // Insert text into document *************** *** 68,75 **** // Create snapshot1 and check hierarchy String hi1text = doc.getText(0, doc.getLength()); ! TokenHierarchy hi1 = TokenHierarchy.create(hi1text, SimpleTokenId.language()); ! TokenHierarchy snapshot1 = hi.createSnapshot(); assertEquals(snapshot1.snapshotOf(), hi); assertFalse(snapshot1.isSnapshotReleased()); --- 69,76 ---- // Create snapshot1 and check hierarchy String hi1text = doc.getText(0, doc.getLength()); ! TokenHierarchy hi1 = TokenHierarchy.create(hi1text, SimpleTokenId.language()); ! TokenHierarchy snapshot1 = hi.createSnapshot(); assertEquals(snapshot1.snapshotOf(), hi); assertFalse(snapshot1.isSnapshotReleased()); *************** *** 94,101 **** // Create snapshot2 and check hierarchy String hi2text = doc.getText(0, doc.getLength()); ! TokenHierarchy hi2 = TokenHierarchy.create(hi2text, SimpleTokenId.language()); ! TokenHierarchy snapshot2 = hi.createSnapshot(); assertEquals(snapshot2.snapshotOf(), hi); // Check that all the non-fly tokens are mutable --- 95,102 ---- // Create snapshot2 and check hierarchy String hi2text = doc.getText(0, doc.getLength()); ! TokenHierarchy hi2 = TokenHierarchy.create(hi2text, SimpleTokenId.language()); ! TokenHierarchy snapshot2 = hi.createSnapshot(); assertEquals(snapshot2.snapshotOf(), hi); // Check that all the non-fly tokens are mutable Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenListUpdaterTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenListUpdaterTest.java,v retrieving revision 1.5 diff -c -r1.5 TokenListUpdaterTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenListUpdaterTest.java 26 Oct 2006 20:45:26 -0000 1.5 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/inc/TokenListUpdaterTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 18,23 **** --- 18,24 ---- import junit.framework.TestCase; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; import org.netbeans.lib.lexer.test.ModificationTextDocument; *************** *** 47,55 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); assertNotNull("Null token hierarchy for document", hi); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); assertTrue(ts.moveNext()); --- 48,56 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); assertNotNull("Null token hierarchy for document", hi); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); assertTrue(ts.moveNext()); *************** *** 85,92 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); assertTrue(ts.moveNext()); --- 86,93 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); assertTrue(ts.moveNext()); *************** *** 114,121 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); --- 115,122 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); *************** *** 141,148 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); --- 142,149 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); *************** *** 172,179 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); assertTrue(ts.moveNext()); --- 173,180 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); assertTrue(ts.moveNext()); *************** *** 205,212 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); --- 206,213 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); *************** *** 236,247 **** doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); // Insert "-" doc.insertString(2, "-", null); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); assertTrue(ts.moveNext()); --- 237,248 ---- doc.insertString(0, text, null); doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); // Insert "-" doc.insertString(2, "-", null); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "a", 0); assertTrue(ts.moveNext()); Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/CustomEmbeddingTest.java =================================================================== RCS file: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/CustomEmbeddingTest.java diff -N lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/CustomEmbeddingTest.java *** /dev/null 1 Jan 1970 00:00:00 -0000 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/CustomEmbeddingTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 0 **** --- 1,139 ---- + /* + * Sun Public License Notice + * + * The contents of this file are subject to the Sun Public License + * Version 1.0 (the "License"). You may not use this file except in + * compliance with the License. A copy of the License is available at + * http://www.sun.com/ + * + * The Original Code is NetBeans. The Initial Developer of the Original + * Code is Sun Microsystems, Inc. Portions Copyright 1997-2000 Sun + * Microsystems, Inc. All Rights Reserved. + */ + + package org.netbeans.lib.lexer.test.simple; + + import org.netbeans.api.lexer.TokenChange; + import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenHierarchyEvent; + import org.netbeans.api.lexer.TokenHierarchyListener; + import org.netbeans.api.lexer.TokenId; + import org.netbeans.api.lexer.TokenSequence; + import org.netbeans.junit.NbTestCase; + import org.netbeans.lib.lexer.test.LexerTestUtilities; + import org.netbeans.lib.lexer.test.simple.SimplePlainTokenId; + import org.netbeans.spi.lexer.LanguageEmbedding; + + /** + * Test several simple lexer impls. + * + * @author mmetelka + */ + public class CustomEmbeddingTest extends NbTestCase { + + public CustomEmbeddingTest(String testName) { + super(testName); + } + + protected void setUp() throws java.lang.Exception { + } + + protected void tearDown() throws java.lang.Exception { + } + + public void testCreateEmbedding() { + String text = "abc/*def ghi */// line comment"; + TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); + THListener listener = new THListener(); + hi.addTokenHierarchyListener(listener); + TokenSequence ts = hi.tokenSequence(); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.BLOCK_COMMENT, "/*def ghi */", 3); + assertTrue(ts.moveNext()); + LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.LINE_COMMENT, "// line comment", 15); + assertTrue(ts.createEmbedding(SimpleTokenId.language(), 3, 0)); + + // Check the fired event + TokenHierarchyEvent evt = listener.fetchLastEvent(); + assertNotNull(evt); + TokenChange tc = evt.tokenChange(); + assertNotNull(tc); + assertEquals(2, tc.index()); + assertEquals(15, tc.offset()); + assertEquals(0, tc.addedTokenCount()); + assertEquals(0, tc.removedTokenCount()); + assertEquals(SimpleTokenId.language(), tc.language()); + assertEquals(1, tc.embeddedChangeCount()); + TokenChange etc = tc.embeddedChange(0); + assertEquals(0, etc.index()); + assertEquals(18, etc.offset()); + assertEquals(0, etc.addedTokenCount()); // 0 to allow for lazy lexing where this would be unknowns + assertEquals(0, etc.removedTokenCount()); + assertEquals(SimpleTokenId.language(), etc.language()); + assertEquals(0, etc.embeddedChangeCount()); + + // Test the contents of the embedded sequence + TokenSequence ets = ts.embedded(); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "line", 18); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.WHITESPACE, " ", 22); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "comment", 23); + assertFalse(ets.moveNext()); + + // Move main TS back and try extra embedding on comment + assertTrue(ts.movePrevious()); + assertTrue(ts.createEmbedding(SimpleTokenId.language(), 2, 2)); + ets = ts.embedded(); // Should be the explicit one + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "def", 5); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.WHITESPACE, " ", 8); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.IDENTIFIER, "ghi", 9); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimpleTokenId.WHITESPACE, " ", 12); + assertFalse(ets.moveNext()); + + // Get the default embedding - should be available as well + ets = ts.embedded(SimplePlainTokenId.language()); // Should be the explicit one + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WORD, "def", 5); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WHITESPACE, " ", 8); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WORD, "ghi", 9); + assertTrue(ets.moveNext()); + LexerTestUtilities.assertTokenEquals(ets, SimplePlainTokenId.WHITESPACE, " ", 12); + assertFalse(ets.moveNext()); + } + + public void testEmbeddingCaching() throws Exception { + LanguageEmbedding e = LanguageEmbedding.create(SimpleTokenId.language(), 2, 1); + assertSame(SimpleTokenId.language(), e.language()); + assertSame(2, e.startSkipLength()); + assertSame(1, e.endSkipLength()); + LanguageEmbedding e2 = LanguageEmbedding.create(SimpleTokenId.language(), 2, 1); + assertSame(e, e2); + } + + private static final class THListener implements TokenHierarchyListener { + + private TokenHierarchyEvent lastEvent; + + public void tokenHierarchyChanged(TokenHierarchyEvent evt) { + this.lastEvent = evt; + } + + public TokenHierarchyEvent fetchLastEvent() { + TokenHierarchyEvent evt = lastEvent; + lastEvent = null; + return evt; + } + + } + + } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/FlyTokensTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/FlyTokensTest.java,v retrieving revision 1.3 diff -c -r1.3 FlyTokensTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/FlyTokensTest.java 18 Oct 2006 16:19:25 -0000 1.3 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/FlyTokensTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 14,21 **** package org.netbeans.lib.lexer.test.simple; import junit.framework.TestCase; - import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.test.LexerTestUtilities; --- 14,21 ---- package org.netbeans.lib.lexer.test.simple; import junit.framework.TestCase; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.LexerUtilsConstants; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 41,48 **** // Both "public" and " " are flyweight String text = "public public public public public public public "; int commentTextStartOffset = 5; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); int firstNonFlyIndex = -1; int secondNonFlyIndex = -1; int tokenIndex = 0; --- 41,48 ---- // Both "public" and " " are flyweight String text = "public public public public public public public "; int commentTextStartOffset = 5; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); int firstNonFlyIndex = -1; int secondNonFlyIndex = -1; int tokenIndex = 0; Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguage.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguage.java,v retrieving revision 1.4 diff -c -r1.4 GenLanguage.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguage.java 26 Oct 2006 20:45:27 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguage.java 28 Nov 2006 14:20:19 -0000 *************** *** 66,72 **** public static final TokenId PRIVATE_ID = LanguageHierarchy.newId("PRIVATE", PRIVATE, "keyword"); public static final TokenId STATIC_ID = LanguageHierarchy.newId("STATIC", STATIC, "keyword"); ! private static final Language desc = new LanguageHierarchy() { protected Collection createTokenIds() { return Arrays.asList(new TokenId[] { IDENTIFIER_ID, --- 66,72 ---- public static final TokenId PRIVATE_ID = LanguageHierarchy.newId("PRIVATE", PRIVATE, "keyword"); public static final TokenId STATIC_ID = LanguageHierarchy.newId("STATIC", STATIC, "keyword"); ! private static final Language language = new LanguageHierarchy() { protected Collection createTokenIds() { return Arrays.asList(new TokenId[] { IDENTIFIER_ID, *************** *** 87,108 **** return cats; } ! public Lexer createLexer(LexerRestartInfo info) { return null; } - public LanguageEmbedding embedding(Token token, boolean tokenComplete, - LanguagePath languagePath, InputAttributes inputAttributes) { - return null; // No embedding - } - public String mimeType() { return "text/x-gen"; } }.language(); ! public static Language language() { ! return desc; } private GenLanguage() { --- 87,103 ---- return cats; } ! public Lexer createLexer(LexerRestartInfo info) { return null; } public String mimeType() { return "text/x-gen"; } }.language(); ! public static Language language() { ! return language; } private GenLanguage() { Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguageTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguageTest.java,v retrieving revision 1.3 diff -c -r1.3 GenLanguageTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguageTest.java 26 Oct 2006 20:45:27 -0000 1.3 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/GenLanguageTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 45,52 **** public void testTokenIds() { // Check that token ids are all present and correctly ordered ! Language language = GenLanguage.language(); ! Set ids = language.tokenIds(); assertTrue("Invalid ids.size() - expected " + IDS_SIZE, ids.size() == IDS_SIZE); TokenId[] idArray = { --- 45,52 ---- public void testTokenIds() { // Check that token ids are all present and correctly ordered ! Language language = GenLanguage.language(); ! Set ids = language.tokenIds(); assertTrue("Invalid ids.size() - expected " + IDS_SIZE, ids.size() == IDS_SIZE); TokenId[] idArray = { *************** *** 66,79 **** }; // Check operations with ids ! Collection testIds = Arrays.asList(idArray); LexerTestUtilities.assertCollectionsEqual("Ids do not match with test ones", ids, testIds); // Check that ids.iterator() is ordered by ordinal int ind = 0; ! for (Iterator it = ids.iterator(); it.hasNext();) { ! TokenId id = (TokenId) it.next(); assertTrue("Token ids not sorted by ordinal at index=" + ind, id == idArray[ind]); ind++; assertSame(language.tokenId(id.name()), id); --- 66,78 ---- }; // Check operations with ids ! Collection testIds = Arrays.asList(idArray); LexerTestUtilities.assertCollectionsEqual("Ids do not match with test ones", ids, testIds); // Check that ids.iterator() is ordered by ordinal int ind = 0; ! for (TokenId id : ids) { assertTrue("Token ids not sorted by ordinal at index=" + ind, id == idArray[ind]); ind++; assertSame(language.tokenId(id.name()), id); *************** *** 153,159 **** language.tokenCategoryMembers("comment") ); ! @SuppressWarnings("unchecked") List testIdCats = language.tokenCategories(GenLanguage.IDENTIFIER_ID); LexerTestUtilities.assertCollectionsEqual( Arrays.asList(new String[] { --- 152,158 ---- language.tokenCategoryMembers("comment") ); ! List testIdCats = language.tokenCategories(GenLanguage.IDENTIFIER_ID); LexerTestUtilities.assertCollectionsEqual( Arrays.asList(new String[] { *************** *** 162,168 **** testIdCats ); ! @SuppressWarnings("unchecked") List testIdCats2 = language.tokenCategories(GenLanguage.PLUS_ID); LexerTestUtilities.assertCollectionsEqual( Arrays.asList(new String[] { --- 161,167 ---- testIdCats ); ! List testIdCats2 = language.tokenCategories(GenLanguage.PLUS_ID); LexerTestUtilities.assertCollectionsEqual( Arrays.asList(new String[] { *************** *** 174,180 **** // Check Language.merge() ! @SuppressWarnings("unchecked") Collection mergedIds = language.merge( Arrays.asList(new TokenId[] { GenLanguage.IDENTIFIER_ID }), language.merge(language.tokenCategoryMembers("comment"), --- 173,179 ---- // Check Language.merge() ! Collection mergedIds = language.merge( Arrays.asList(new TokenId[] { GenLanguage.IDENTIFIER_ID }), language.merge(language.tokenCategoryMembers("comment"), Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleCharTokenId.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleCharTokenId.java,v retrieving revision 1.4 diff -c -r1.4 SimpleCharTokenId.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleCharTokenId.java 26 Oct 2006 20:45:27 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleCharTokenId.java 28 Nov 2006 14:20:19 -0000 *************** *** 55,63 **** return new SimpleCharLexer(info); } ! public LanguageEmbedding embedding( ! Token token, boolean tokenComplete, ! LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } --- 55,62 ---- return new SimpleCharLexer(info); } ! public LanguageEmbedding embedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleJavadocTokenId.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleJavadocTokenId.java,v retrieving revision 1.5 diff -c -r1.5 SimpleJavadocTokenId.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleJavadocTokenId.java 26 Oct 2006 20:45:27 -0000 1.5 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleJavadocTokenId.java 28 Nov 2006 14:20:19 -0000 *************** *** 70,78 **** return new SimpleJavadocLexer(info); } ! public LanguageEmbedding embedding( ! Token token, boolean tokenComplete, ! LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } --- 70,77 ---- return new SimpleJavadocLexer(info); } ! public LanguageEmbedding embedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLanguageProvider.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLanguageProvider.java,v retrieving revision 1.7 diff -c -r1.7 SimpleLanguageProvider.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLanguageProvider.java 26 Oct 2006 20:45:27 -0000 1.7 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLanguageProvider.java 28 Nov 2006 14:20:19 -0000 *************** *** 65,83 **** } } ! public LanguageEmbedding findLanguageEmbedding(LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes) { ! if ("text/x-simple-plain".equals(tokenLanguage.mimePath()) && token.id().name().equals("WORD")) { ! return new LanguageEmbedding() { ! public int endSkipLength() { ! return 0; ! } ! public Language language() { ! return SimpleCharTokenId.language(); ! } ! public int startSkipLength() { ! return 0; ! } ! }; } else { return null; } --- 65,74 ---- } } ! public LanguageEmbedding findLanguageEmbedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { ! if ("text/x-simple-plain".equals(languagePath.mimePath()) && token.id().name().equals("WORD")) { ! return LanguageEmbedding.create(SimpleCharTokenId.language(), 0, 0); } else { return null; } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerBatchTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerBatchTest.java,v retrieving revision 1.4 diff -c -r1.4 SimpleLexerBatchTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerBatchTest.java 18 Oct 2006 16:19:27 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerBatchTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 14,22 **** package org.netbeans.lib.lexer.test.simple; import junit.framework.TestCase; - import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenHierarchy; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; --- 14,22 ---- package org.netbeans.lib.lexer.test.simple; import junit.framework.TestCase; import org.netbeans.api.lexer.Token; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; *************** *** 41,48 **** String commentText = "/* test comment */"; String text = "abc+ " + commentText + "def public publica publi static x"; int commentTextStartOffset = 5; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); assertTrue(ts.moveNext()); --- 41,48 ---- String commentText = "/* test comment */"; String text = "abc+ " + commentText + "def public publica publi static x"; int commentTextStartOffset = 5; ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); ! TokenSequence ts = hi.tokenSequence(); assertTrue(ts.moveNext()); LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.IDENTIFIER, "abc", 0); assertTrue(ts.moveNext()); *************** *** 84,90 **** LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.BLOCK_COMMENT, commentText, commentTextStartOffset); // Test embedded token sequence ! TokenSequence embedded = ts.embedded(); assertNotNull("Null embedded sequence", embedded); assertTrue(embedded.moveNext()); offset = commentTextStartOffset + 2; // skip "/*" --- 84,90 ---- LexerTestUtilities.assertTokenEquals(ts, SimpleTokenId.BLOCK_COMMENT, commentText, commentTextStartOffset); // Test embedded token sequence ! TokenSequence embedded = ts.embedded(); assertNotNull("Null embedded sequence", embedded); assertTrue(embedded.moveNext()); offset = commentTextStartOffset + 2; // skip "/*" *************** *** 114,125 **** long tm; tm = System.currentTimeMillis(); ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); tm = System.currentTimeMillis() - tm; assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast tm = System.currentTimeMillis(); ! TokenSequence ts = hi.tokenSequence(); tm = System.currentTimeMillis() - tm; assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast --- 114,125 ---- long tm; tm = System.currentTimeMillis(); ! TokenHierarchy hi = TokenHierarchy.create(text, SimpleTokenId.language()); tm = System.currentTimeMillis() - tm; assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast tm = System.currentTimeMillis(); ! TokenSequence ts = hi.tokenSequence(); tm = System.currentTimeMillis() - tm; assertTrue("Timeout tm = " + tm + "msec", tm < 100); // Should be fast Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerIncTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerIncTest.java,v retrieving revision 1.5 diff -c -r1.5 SimpleLexerIncTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerIncTest.java 26 Oct 2006 20:45:27 -0000 1.5 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleLexerIncTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 18,23 **** --- 18,24 ---- import junit.framework.TestCase; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; import org.netbeans.lib.lexer.test.ModificationTextDocument; *************** *** 43,51 **** Document doc = new ModificationTextDocument(); // Assign a language to the document doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); assertNotNull("Null token hierarchy for document", hi); ! TokenSequence ts = hi.tokenSequence(); assertFalse(ts.moveNext()); // Insert text into document --- 44,52 ---- Document doc = new ModificationTextDocument(); // Assign a language to the document doc.putProperty(Language.class, SimpleTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); assertNotNull("Null token hierarchy for document", hi); ! TokenSequence ts = hi.tokenSequence(); assertFalse(ts.moveNext()); // Insert text into document Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimplePlainTokenId.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimplePlainTokenId.java,v retrieving revision 1.4 diff -c -r1.4 SimplePlainTokenId.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimplePlainTokenId.java 26 Oct 2006 20:45:27 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimplePlainTokenId.java 28 Nov 2006 14:20:19 -0000 *************** *** 60,68 **** return new SimplePlainLexer(info); } ! public LanguageEmbedding embedding( ! Token token, boolean tokenComplete, ! LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } --- 60,67 ---- return new SimplePlainLexer(info); } ! public LanguageEmbedding embedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleStringTokenId.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleStringTokenId.java,v retrieving revision 1.4 diff -c -r1.4 SimpleStringTokenId.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleStringTokenId.java 26 Oct 2006 20:45:27 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleStringTokenId.java 28 Nov 2006 14:20:19 -0000 *************** *** 76,84 **** return new SimpleStringLexer(info); } ! public LanguageEmbedding embedding( ! Token token, boolean tokenComplete, ! LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } --- 76,83 ---- return new SimpleStringLexer(info); } ! public LanguageEmbedding embedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { return null; // No embedding } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTestEmbedding.java =================================================================== RCS file: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTestEmbedding.java diff -N lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTestEmbedding.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTestEmbedding.java 26 Oct 2006 20:45:27 -0000 1.5 --- /dev/null 1 Jan 1970 00:00:00 -0000 *************** *** 1,39 **** - /* - * Sun Public License Notice - * - * The contents of this file are subject to the Sun Public License - * Version 1.0 (the "License"). You may not use this file except in - * compliance with the License. A copy of the License is available at - * http://www.sun.com/ - * - * The Original Code is NetBeans. The Initial Developer of the Original - * Code is Sun Microsystems, Inc. Portions Copyright 1997-2000 Sun - * Microsystems, Inc. All Rights Reserved. - */ - - package org.netbeans.lib.lexer.test.simple; - - import org.netbeans.api.lexer.Language; - import org.netbeans.api.lexer.TokenId; - import org.netbeans.spi.lexer.LanguageEmbedding; - - /** - * Embedding of simple plain language into simple language. - * - * @author mmetelka - */ - public final class SimpleTestEmbedding extends LanguageEmbedding { - - public Language language() { - return SimplePlainTokenId.language(); - } - - public int startSkipLength() { - return 2; // skip initial "/*" - } - - public int endSkipLength() { - return 2; // skip ending "*/" - } - - } --- 0 ---- Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTokenId.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTokenId.java,v retrieving revision 1.4 diff -c -r1.4 SimpleTokenId.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTokenId.java 26 Oct 2006 20:45:27 -0000 1.4 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/simple/SimpleTokenId.java 28 Nov 2006 14:20:19 -0000 *************** *** 110,153 **** return new SimpleLexer(info); } ! public LanguageEmbedding embedding( ! Token token, boolean tokenComplete, ! LanguagePath languagePath,InputAttributes inputAttributes) { // Test language embedding in the block comment switch (token.id()) { case BLOCK_COMMENT: ! return new SimpleTestEmbedding(); case JAVADOC_COMMENT: ! return new LanguageEmbedding() { ! public Language language() { ! return SimpleJavadocTokenId.language(); ! } ! ! public int startSkipLength() { ! return 3; ! } ! ! public int endSkipLength() { ! return 2; ! } ! }; case STRING_LITERAL: case STRING_LITERAL_INCOMPLETE: ! return new LanguageEmbedding() { ! public Language language() { ! return SimpleStringTokenId.language(); ! } ! ! public int startSkipLength() { ! return 1; ! } ! ! public int endSkipLength() { ! return 1; ! } ! }; } return null; // No embedding } --- 110,128 ---- return new SimpleLexer(info); } ! public LanguageEmbedding embedding( ! Token token, LanguagePath languagePath, InputAttributes inputAttributes) { // Test language embedding in the block comment switch (token.id()) { case BLOCK_COMMENT: ! return LanguageEmbedding.create(SimplePlainTokenId.language(), 2, 2); case JAVADOC_COMMENT: ! return LanguageEmbedding.create(SimpleJavadocTokenId.language(), 3, 2); case STRING_LITERAL: case STRING_LITERAL_INCOMPLETE: ! return LanguageEmbedding.create(SimpleStringTokenId.language(), 1, 1); } return null; // No embedding } Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/state/InvalidLexerOperationTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/state/InvalidLexerOperationTest.java,v retrieving revision 1.2 diff -c -r1.2 InvalidLexerOperationTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/state/InvalidLexerOperationTest.java 26 Oct 2006 20:45:28 -0000 1.2 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/state/InvalidLexerOperationTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 18,23 **** --- 18,24 ---- import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; import org.netbeans.lib.lexer.test.ModificationTextDocument; *************** *** 50,57 **** doc.insertString(0, text, null); // Put the language now into the document so that lexing starts from scratch doc.putProperty(Language.class, StateTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); ts = hi.tokenSequence(); assertTrue(ts.moveNext()); --- 51,58 ---- doc.insertString(0, text, null); // Put the language now into the document so that lexing starts from scratch doc.putProperty(Language.class, StateTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); ts = hi.tokenSequence(); assertTrue(ts.moveNext()); Index: lexer/test/unit/src/org/netbeans/lib/lexer/test/state/StateLexerIncTest.java =================================================================== RCS file: /cvs/lexer/test/unit/src/org/netbeans/lib/lexer/test/state/StateLexerIncTest.java,v retrieving revision 1.2 diff -c -r1.2 StateLexerIncTest.java *** lexer/test/unit/src/org/netbeans/lib/lexer/test/state/StateLexerIncTest.java 26 Oct 2006 20:45:28 -0000 1.2 --- lexer/test/unit/src/org/netbeans/lib/lexer/test/state/StateLexerIncTest.java 28 Nov 2006 14:20:19 -0000 *************** *** 18,23 **** --- 18,24 ---- import org.netbeans.api.lexer.InputAttributes; import org.netbeans.api.lexer.Language; import org.netbeans.api.lexer.TokenHierarchy; + import org.netbeans.api.lexer.TokenId; import org.netbeans.api.lexer.TokenSequence; import org.netbeans.lib.lexer.test.LexerTestUtilities; import org.netbeans.lib.lexer.test.ModificationTextDocument; *************** *** 45,52 **** InputAttributes attrs = new InputAttributes(); doc.putProperty(InputAttributes.class, attrs); doc.putProperty(Language.class, StateTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertFalse(ts.moveNext()); // Insert text into document --- 46,53 ---- InputAttributes attrs = new InputAttributes(); doc.putProperty(InputAttributes.class, attrs); doc.putProperty(Language.class, StateTokenId.language()); ! TokenHierarchy hi = TokenHierarchy.get(doc); ! TokenSequence ts = hi.tokenSequence(); assertFalse(ts.moveNext()); // Insert text into document Index: web/jspsyntax/lexer/src/org/netbeans/api/jsp/lexer/JspTokenId.java =================================================================== RCS file: /cvs/web/jspsyntax/lexer/src/org/netbeans/api/jsp/lexer/JspTokenId.java,v retrieving revision 1.5 diff -c -r1.5 JspTokenId.java *** web/jspsyntax/lexer/src/org/netbeans/api/jsp/lexer/JspTokenId.java 22 Nov 2006 17:37:18 -0000 1.5 --- web/jspsyntax/lexer/src/org/netbeans/api/jsp/lexer/JspTokenId.java 28 Nov 2006 14:21:41 -0000 *************** *** 93,125 **** LanguagePath languagePath, InputAttributes inputAttributes) { switch(token.id()) { case TEXT: ! return new LanguageEmbedding() { ! public Language language() { ! return HTMLTokenId.language(); ! } ! ! public int startSkipLength() { ! return 0; ! } ! ! public int endSkipLength() { ! return 0; ! } ! }; case EL: ! return new LanguageEmbedding() { ! public Language language() { ! return ELTokenId.language(); ! } ! ! public int startSkipLength() { ! return 2; ! } ! ! public int endSkipLength() { ! return 1; ! } ! }; // case SCRIPTLET: // return new LanguageEmbedding() { // public LanguageDescription language() { --- 93,101 ---- LanguagePath languagePath, InputAttributes inputAttributes) { switch(token.id()) { case TEXT: ! return LanguageEmbedding.create(HTMLTokenId.language(), 0, 0); case EL: ! return LanguageEmbedding.create(ELTokenId.language(), 2, 1); // case SCRIPTLET: // return new LanguageEmbedding() { // public LanguageDescription language() {