Index: editor/util/manifest.mf
===================================================================
RCS file: /cvs/editor/util/manifest.mf,v
retrieving revision 1.14
diff -c -r1.14 manifest.mf
*** editor/util/manifest.mf 17 Oct 2006 02:28:44 -0000 1.14
--- editor/util/manifest.mf 28 Nov 2006 14:19:00 -0000
***************
*** 1,4 ****
Manifest-Version: 1.0
OpenIDE-Module: org.netbeans.modules.editor.util/1
OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/editor/util/Bundle.properties
! OpenIDE-Module-Specification-Version: 1.13
--- 1,4 ----
Manifest-Version: 1.0
OpenIDE-Module: org.netbeans.modules.editor.util/1
OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/editor/util/Bundle.properties
! OpenIDE-Module-Specification-Version: 1.14
Index: editor/util/api/apichanges.xml
===================================================================
RCS file: /cvs/editor/util/api/apichanges.xml,v
retrieving revision 1.7
diff -c -r1.7 apichanges.xml
*** editor/util/api/apichanges.xml 17 Oct 2006 02:28:44 -0000 1.7
--- editor/util/api/apichanges.xml 28 Nov 2006 14:19:00 -0000
***************
*** 82,87 ****
--- 82,101 ----
+ Added ArrayUtilities.unmodifiableList().
+
+
+
+
+
+
+ ArrayUtilities.unmodifiableList() return simple unmodifiable list
+ for the given object array.
+
+
+
+
+
ListenerList.getListeners() return type changed.
Index: editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java
===================================================================
RCS file: /cvs/editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java,v
retrieving revision 1.2
diff -c -r1.2 ArrayUtilities.java
*** editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java 4 Oct 2006 17:02:53 -0000 1.2
--- editor/util/src/org/netbeans/lib/editor/util/ArrayUtilities.java 28 Nov 2006 14:19:00 -0000
***************
*** 19,24 ****
--- 19,28 ----
package org.netbeans.lib.editor.util;
+ import java.util.AbstractList;
+ import java.util.List;
+ import java.util.RandomAccess;
+
/**
* Utility methods related to arrays.
*
***************
*** 183,188 ****
--- 187,204 ----
sb.append("]: ");
}
+ /**
+ * Return unmodifiable list for the given array.
+ *
+ * Unlike Collections.unmodifiableList()
this method
+ * does not use any extra wrappers etc.
+ *
+ * @since 1.14
+ */
+ public static List unmodifiableList(E[] array) {
+ return new UnmodifiableList(array);
+ }
+
public static String toString(Object[] array) {
StringBuilder sb = new StringBuilder();
int maxDigitCount = digitCount(array.length);
***************
*** 203,208 ****
--- 219,265 ----
sb.append('\n');
}
return sb.toString();
+ }
+
+ private static final class UnmodifiableList extends AbstractList
+ implements RandomAccess {
+
+ private E[] array;
+
+ UnmodifiableList(E[] array) {
+ this.array = array;
+ }
+
+ public E get(int index) {
+ if (index >= 0 && index < array.length) {
+ return array[index];
+ } else {
+ throw new IndexOutOfBoundsException("index = " + index + ", size = " + array.length); //NOI18N
+ }
+ }
+
+ public int size() {
+ return array.length;
+ }
+
+
+ public Object[] toArray() {
+ return array.clone();
+ }
+
+ public T[] toArray(T[] a) {
+ if (a.length < array.length) {
+ @SuppressWarnings("unchecked")
+ T[] aa = (T[])java.lang.reflect.Array.
+ newInstance(a.getClass().getComponentType(), array.length);
+ a = aa;
+ }
+ System.arraycopy(array, 0, a, 0, array.length);
+ if (a.length > array.length)
+ a[array.length] = null;
+ return a;
+ }
+
}
}
Index: editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java
===================================================================
RCS file: editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java
diff -N editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java
*** /dev/null 1 Jan 1970 00:00:00 -0000
--- editor/util/test/unit/src/org/netbeans/lib/editor/util/ArrayUtilitiesTest.java 28 Nov 2006 14:19:00 -0000
***************
*** 0 ****
--- 1,52 ----
+ /*
+ * The contents of this file are subject to the terms of the Common Development
+ * and Distribution License (the License). You may not use this file except in
+ * compliance with the License.
+ *
+ * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
+ * or http://www.netbeans.org/cddl.txt.
+ *
+ * When distributing Covered Code, include this CDDL Header Notice in each file
+ * and include the License file at http://www.netbeans.org/cddl.txt.
+ * If applicable, add the following below the CDDL Header, with the fields
+ * enclosed by brackets [] replaced by your own identifying information:
+ * "Portions Copyrighted [year] [name of copyright owner]"
+ *
+ * The Original Software is NetBeans. The Initial Developer of the Original
+ * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
+ * Microsystems, Inc. All Rights Reserved.
+ */
+
+ package org.netbeans.lib.editor.util;
+
+ import java.util.Arrays;
+ import java.util.List;
+ import org.netbeans.junit.NbTestCase;
+
+ public class ArrayUtilitiesTest extends NbTestCase {
+
+ public ArrayUtilitiesTest(String testName) {
+ super(testName);
+ }
+
+ public void testUnmodifiableList() throws Exception {
+ String[] arr = new String[] { "haf", "cau", "test" };
+ List l = ArrayUtilities.unmodifiableList(arr);
+ assertEquals("haf", l.get(0));
+ assertEquals("cau", l.get(1));
+ assertEquals("test", l.get(2));
+ try {
+ l.add("no");
+ fail("Modifiable!");
+ } catch (UnsupportedOperationException e) {
+ // Expected
+ }
+ assertEquals(3, l.size());
+ Object a[] = l.toArray();
+ assertTrue(Arrays.equals(arr, a));
+ a = l.toArray(new String[2]);
+ assertTrue(Arrays.equals(arr, a));
+
+ }
+
+ }
Index: html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java
===================================================================
RCS file: /cvs/html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java,v
retrieving revision 1.3
diff -c -r1.3 HTMLLexer.java
*** html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java 28 Nov 2006 02:21:06 -0000 1.3
--- html/lexer/src/org/netbeans/lib/html/lexer/HTMLLexer.java 28 Nov 2006 14:19:07 -0000
***************
*** 30,36 ****
* @version 1.00
*/
! public class HTMLLexer implements Lexer {
private static final int EOF = LexerInput.EOF;
--- 30,36 ----
* @version 1.00
*/
! public final class HTMLLexer implements Lexer {
private static final int EOF = LexerInput.EOF;
***************
*** 39,45 ****
private TokenFactory tokenFactory;
public Object state() {
! return subState * 1000000 + state * 1000 + scriptState;
}
--- 39,45 ----
private TokenFactory tokenFactory;
public Object state() {
! return null;
}
Index: html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java
===================================================================
RCS file: /cvs/html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java,v
retrieving revision 1.3
diff -c -r1.3 HTMLLexerBatchTest.java
*** html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java 23 Oct 2006 14:50:47 -0000 1.3
--- html/lexer/test/unit/src/org/netbeans/lib/html/lexer/HTMLLexerBatchTest.java 28 Nov 2006 14:19:07 -0000
***************
*** 22,27 ****
--- 22,28 ----
import junit.framework.TestCase;
import org.netbeans.api.html.lexer.HTMLTokenId;
import org.netbeans.api.lexer.TokenHierarchy;
+ import org.netbeans.api.lexer.TokenId;
import org.netbeans.api.lexer.TokenSequence;
import org.netbeans.lib.lexer.test.LexerTestUtilities;
***************
*** 47,54 ****
public void testJspTags() {
String text = "abc>def ";
! TokenHierarchy hi = TokenHierarchy.create(text, HTMLTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN_SYMBOL, "<");
LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN, "jsp:useBean");
LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.WS, " ");
--- 48,55 ----
public void testJspTags() {
String text = "abc>def ";
! TokenHierarchy> hi = TokenHierarchy.create(text, HTMLTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN_SYMBOL, "<");
LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.TAG_OPEN, "jsp:useBean");
LexerTestUtilities.assertNextTokenEquals(ts, HTMLTokenId.WS, " ");
Index: java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java
===================================================================
RCS file: /cvs/java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java,v
retrieving revision 1.5
diff -c -r1.5 JavaTokenId.java
*** java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java 26 Oct 2006 20:45:19 -0000 1.5
--- java/lexer/src/org/netbeans/api/java/lexer/JavaTokenId.java 28 Nov 2006 14:19:49 -0000
***************
*** 238,277 ****
return new JavaLexer(info);
}
! protected LanguageEmbedding embedding(
! Token token, boolean tokenComplete,
! LanguagePath languagePath, InputAttributes inputAttributes) {
// Test language embedding in the block comment
switch (token.id()) {
case JAVADOC_COMMENT:
! return new LanguageEmbedding() {
! public Language extends TokenId> language() {
! return JavadocTokenId.language();
! }
!
! public int startSkipLength() {
! return 3;
! }
!
! public int endSkipLength() {
! return 2;
! }
! };
case STRING_LITERAL:
case STRING_LITERAL_INCOMPLETE:
! return new LanguageEmbedding() {
! public Language extends TokenId> language() {
! return JavaStringTokenId.language();
! }
!
! public int startSkipLength() {
! return 1;
! }
!
! public int endSkipLength() {
! return 1;
! }
! };
}
return null; // No embedding
}
--- 238,252 ----
return new JavaLexer(info);
}
! protected LanguageEmbedding extends TokenId> embedding(
! Token token, LanguagePath languagePath, InputAttributes inputAttributes) {
// Test language embedding in the block comment
switch (token.id()) {
case JAVADOC_COMMENT:
! return LanguageEmbedding.create(JavadocTokenId.language(), 3, 2);
case STRING_LITERAL:
case STRING_LITERAL_INCOMPLETE:
! return LanguageEmbedding.create(JavaStringTokenId.language(), 1, 1);
}
return null; // No embedding
}
Index: java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java
===================================================================
RCS file: /cvs/java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java,v
retrieving revision 1.1
diff -c -r1.1 JavaFlyTokensTest.java
*** java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java 18 Oct 2006 11:35:58 -0000 1.1
--- java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaFlyTokensTest.java 28 Nov 2006 14:19:49 -0000
***************
*** 25,30 ****
--- 25,31 ----
import java.nio.CharBuffer;
import org.netbeans.api.java.lexer.JavaTokenId;
import org.netbeans.api.lexer.TokenHierarchy;
+ import org.netbeans.api.lexer.TokenId;
import org.netbeans.api.lexer.TokenSequence;
import org.netbeans.junit.NbTestCase;
import org.netbeans.lib.lexer.test.LexerTestUtilities;
***************
*** 57,64 ****
r.read(cb);
cb.rewind();
String text = cb.toString();
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
System.err.println("Flyweight tokens: " + LexerTestUtilities.flyweightTokenCount(ts)
+ "\nTotal tokens: " + ts.tokenCount()
--- 58,65 ----
r.read(cb);
cb.rewind();
String text = cb.toString();
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
System.err.println("Flyweight tokens: " + LexerTestUtilities.flyweightTokenCount(ts)
+ "\nTotal tokens: " + ts.tokenCount()
Index: java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java
===================================================================
RCS file: /cvs/java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java,v
retrieving revision 1.1
diff -c -r1.1 JavaLexerBatchTest.java
*** java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java 18 Oct 2006 11:35:58 -0000 1.1
--- java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaLexerBatchTest.java 28 Nov 2006 14:19:49 -0000
***************
*** 24,29 ****
--- 24,30 ----
import org.netbeans.api.java.lexer.JavaTokenId;
import org.netbeans.api.java.lexer.JavadocTokenId;
import org.netbeans.api.lexer.TokenHierarchy;
+ import org.netbeans.api.lexer.TokenId;
import org.netbeans.api.lexer.TokenSequence;
import org.netbeans.lib.lexer.test.LexerTestUtilities;
***************
*** 48,55 ****
public void testComments() {
String text = "/*ml-comment*//**//***//**\n*javadoc-comment*//* a";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/*ml-comment*/");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/**/");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/***/");
--- 49,56 ----
public void testComments() {
String text = "/*ml-comment*//**//***//**\n*javadoc-comment*//* a";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/*ml-comment*/");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.BLOCK_COMMENT, "/**/");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/***/");
***************
*** 59,66 ****
public void testIdentifiers() {
String text = "a ab aB2 2a x\nyZ\r\nz";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "a");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ab");
--- 60,67 ----
public void testIdentifiers() {
String text = "a ab aB2 2a x\nyZ\r\nz";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "a");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ab");
***************
*** 79,86 ****
public void testCharLiterals() {
String text = "'' 'a''' '\\'' '\\\\' '\\\\\\'' '\\n' 'a";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "''");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "'a'");
--- 80,87 ----
public void testCharLiterals() {
String text = "'' 'a''' '\\'' '\\\\' '\\\\\\'' '\\n' 'a";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "''");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CHAR_LITERAL, "'a'");
***************
*** 99,106 ****
public void testStringLiterals() {
String text = "\"\" \"a\"\"\" \"\\\"\" \"\\\\\" \"\\\\\\\"\" \"\\n\" \"a";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"\"");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"a\"");
--- 100,107 ----
public void testStringLiterals() {
String text = "\"\" \"a\"\"\" \"\\\"\" \"\\\\\" \"\\\\\\\"\" \"\\n\" \"a";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"\"");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"a\"");
***************
*** 120,127 ****
public void testNumberLiterals() {
String text = "0 00 09 1 12 0L 1l 12L 0x1 0xf 0XdE 0Xbcy" +
" 09.5 1.5f 2.5d 6d 7e3 6.1E-7f 0xa.5dp+12d .3";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "0");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "00");
--- 121,128 ----
public void testNumberLiterals() {
String text = "0 00 09 1 12 0L 1l 12L 0x1 0xf 0XdE 0Xbcy" +
" 09.5 1.5f 2.5d 6d 7e3 6.1E-7f 0xa.5dp+12d .3";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "0");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.INT_LITERAL, "00");
***************
*** 166,173 ****
public void testOperators() {
String text = "^ ^= % %= * *= / /= = ==";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARET, "^");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARETEQ, "^=");
--- 167,174 ----
public void testOperators() {
String text = "^ ^= % %= * *= / /= = ==";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARET, "^");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.CARETEQ, "^=");
***************
*** 197,204 ****
"synchronized this throw throws transient try void volatile while " +
"null true false";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ABSTRACT, "abstract");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ASSERT, "assert");
--- 198,205 ----
"synchronized this throw throws transient try void volatile while " +
"null true false";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ABSTRACT, "abstract");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.ASSERT, "assert");
***************
*** 310,317 ****
public void testNonKeywords() {
String text = "abstracta assertx b br car dou doubl finall im i ifa inti throwsx";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "abstracta");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "assertx");
--- 311,318 ----
public void testNonKeywords() {
String text = "abstracta assertx b br car dou doubl finall im i ifa inti throwsx";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "abstracta");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.WHITESPACE, " ");
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "assertx");
***************
*** 342,349 ****
public void testEmbedding() {
String text = "ddx \"d\\t\\br\" /** @see X */";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ddx");
assertEquals(0, ts.offset());
--- 343,350 ----
public void testEmbedding() {
String text = "ddx \"d\\t\\br\" /** @see X */";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.IDENTIFIER, "ddx");
assertEquals(0, ts.offset());
***************
*** 352,358 ****
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"d\\t\\br\"");
assertEquals(4, ts.offset());
! TokenSequence es = ts.embedded();
LexerTestUtilities.assertNextTokenEquals(es, JavaStringTokenId.TEXT, "d");
assertEquals(5, es.offset());
--- 353,359 ----
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.STRING_LITERAL, "\"d\\t\\br\"");
assertEquals(4, ts.offset());
! TokenSequence extends TokenId> es = ts.embedded();
LexerTestUtilities.assertNextTokenEquals(es, JavaStringTokenId.TEXT, "d");
assertEquals(5, es.offset());
***************
*** 370,376 ****
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/** @see X */");
assertEquals(13, ts.offset());
! TokenSequence ds = ts.embedded();
LexerTestUtilities.assertNextTokenEquals(ds, JavadocTokenId.OTHER_TEXT, " ");
assertEquals(16, ds.offset());
--- 371,377 ----
LexerTestUtilities.assertNextTokenEquals(ts, JavaTokenId.JAVADOC_COMMENT, "/** @see X */");
assertEquals(13, ts.offset());
! TokenSequence extends TokenId> ds = ts.embedded();
LexerTestUtilities.assertNextTokenEquals(ds, JavadocTokenId.OTHER_TEXT, " ");
assertEquals(16, ds.offset());
Index: java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java
===================================================================
RCS file: /cvs/java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java,v
retrieving revision 1.4
diff -c -r1.4 JavaStringLexerTest.java
*** java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java 7 Nov 2006 16:31:43 -0000 1.4
--- java/lexer/test/unit/src/org/netbeans/lib/java/lexer/JavaStringLexerTest.java 28 Nov 2006 14:19:49 -0000
***************
*** 20,25 ****
--- 20,26 ----
import org.netbeans.api.java.lexer.JavaStringTokenId;
import org.netbeans.api.lexer.TokenHierarchy;
+ import org.netbeans.api.lexer.TokenId;
import org.netbeans.api.lexer.TokenSequence;
import org.netbeans.junit.NbTestCase;
import org.netbeans.lib.lexer.test.LexerTestUtilities;
***************
*** 40,55 ****
public void testNextToken1() {
String text = "t";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaStringTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, "t");
}
public void testNextToken2() {
String text = "\\t\\b\\b\\t \\tabc\\rsddfdsffffffffff";
! TokenHierarchy hi = TokenHierarchy.create(text, JavaStringTokenId.language());
! TokenSequence ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b");
--- 41,56 ----
public void testNextToken1() {
String text = "t";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaStringTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TEXT, "t");
}
public void testNextToken2() {
String text = "\\t\\b\\b\\t \\tabc\\rsddfdsffffffffff";
! TokenHierarchy> hi = TokenHierarchy.create(text, JavaStringTokenId.language());
! TokenSequence extends TokenId> ts = hi.tokenSequence();
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.TAB, "\\t");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b");
LexerTestUtilities.assertNextTokenEquals(ts, JavaStringTokenId.BACKSPACE, "\\b");
Index: languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java
===================================================================
RCS file: /cvs/languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java,v
retrieving revision 1.1
diff -c -r1.1 SLanguageProvider.java
*** languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java 7 Nov 2006 12:17:28 -0000 1.1
--- languages/engine/src/org/netbeans/modules/languages/lexer/SLanguageProvider.java 28 Nov 2006 14:20:05 -0000
***************
*** 35,41 ****
return null;
}
! public LanguageEmbedding findLanguageEmbedding (LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes) {
return null;
}
}
--- 35,41 ----
return null;
}
! public LanguageEmbedding extends TokenId> findLanguageEmbedding (Token extends TokenId> token, LanguagePath tokenLanguage, InputAttributes inputAttributes) {
return null;
}
}
Index: lexer/arch.xml
===================================================================
RCS file: /cvs/lexer/arch.xml,v
retrieving revision 1.5
diff -c -r1.5 arch.xml
*** lexer/arch.xml 8 Nov 2006 18:49:18 -0000 1.5
--- lexer/arch.xml 28 Nov 2006 14:20:12 -0000
***************
*** 4,10 ****
]>
--- 4,10 ----
]>
***************
*** 1582,1587 ****
--- 1582,1613 ----
There are no plans to deprecated any part of the present API
and it should be evolved in a compatible way.
+
+
+
+
+
+
+
+
+
+ No.
Index: lexer/manifest.mf
===================================================================
RCS file: /cvs/lexer/manifest.mf,v
retrieving revision 1.11
diff -c -r1.11 manifest.mf
*** lexer/manifest.mf 17 Oct 2006 22:36:33 -0000 1.11
--- lexer/manifest.mf 28 Nov 2006 14:20:12 -0000
***************
*** 1,4 ****
OpenIDE-Module: org.netbeans.modules.lexer/2
OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/lexer/Bundle.properties
! OpenIDE-Module-Specification-Version: 1.10.0
OpenIDE-Module-Recommends: org.netbeans.spi.lexer.LanguageProvider
--- 1,4 ----
OpenIDE-Module: org.netbeans.modules.lexer/2
OpenIDE-Module-Localizing-Bundle: org/netbeans/lib/lexer/Bundle.properties
! OpenIDE-Module-Specification-Version: 1.11.0
OpenIDE-Module-Recommends: org.netbeans.spi.lexer.LanguageProvider
Index: lexer/api/apichanges.xml
===================================================================
RCS file: /cvs/lexer/api/apichanges.xml,v
retrieving revision 1.8
diff -c -r1.8 apichanges.xml
*** lexer/api/apichanges.xml 17 Oct 2006 22:36:33 -0000 1.8
--- lexer/api/apichanges.xml 28 Nov 2006 14:20:12 -0000
***************
*** 91,121 ****
!
--- 91,143 ----
!
!
! Adding custom embedding creation TokenSequence.createEmbedding()
!
!
!
!
! Extracted TokenHierarchyEvent.Type
inner class
! into TokenHierarchyEventType
top-level class.
!
! Adding TokenSequence.createEmbedding()
! method for creation of a custom embedding.
! TokenHierarchyEventType.EMBEDDING
fired
! after embedding creation.
!
! Affected offset information (affectedStartOffset()
! and affectedEndOffset()
) moved
! from TokenChange
to TokenHierarchyEvent
!
! There can be now more than one embedded change in a TokenChange.
!
! Removed tokenComplete
parameter from
! LanguageHierarchy.embedding()
because the token incompletness
! will be handled in a different way.
!
! Swapped order of token
and languagePath
! parameters in LanguageProvider
to be in sync with
! LanguageHierarchy.embedding()
.
!
! LanguageEmbedding
is now a final class
! (instead of abstract class) with private constructor
! and static create()
method. That allows better control
! over the evolution of the class and it also allows to cache the created embeddings
! to save memory.
!
! LanguageEmbedding
is now generified with the
! T extends TokenId
which is a generification
! of the language which it contains.
!
! TokenHierarchy.languagePaths()
set contains all language paths
! used in the token hierarchy. TokenHierarchyEventType.LANGUAGE_PATHS
! fired after change of that set.
Index: lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java
===================================================================
RCS file: /cvs/lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java,v
retrieving revision 1.6
diff -c -r1.6 LexerLayer.java
*** lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java 8 Nov 2006 16:44:10 -0000 1.6
--- lexer/editorbridge/src/org/netbeans/modules/lexer/editorbridge/LexerLayer.java 28 Nov 2006 14:20:13 -0000
***************
*** 336,343 ****
public void tokenHierarchyChanged(TokenHierarchyEvent evt) {
javax.swing.plaf.TextUI ui = (javax.swing.plaf.TextUI)component.getUI();
! int startRepaintOffset = evt.tokenChange().modifiedTokensStartOffset();
! int endRepaintOffset = Math.max(evt.tokenChange().addedTokensEndOffset(), startRepaintOffset + 1);
ui.damageRange(component, startRepaintOffset, endRepaintOffset);
}
--- 336,343 ----
public void tokenHierarchyChanged(TokenHierarchyEvent evt) {
javax.swing.plaf.TextUI ui = (javax.swing.plaf.TextUI)component.getUI();
! int startRepaintOffset = evt.affectedStartOffset();
! int endRepaintOffset = Math.max(evt.affectedEndOffset(), startRepaintOffset + 1);
ui.damageRange(component, startRepaintOffset, endRepaintOffset);
}
Index: lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java
===================================================================
RCS file: /cvs/lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java,v
retrieving revision 1.3
diff -c -r1.3 LanguagesEmbeddingMap.java
*** lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java 12 Oct 2006 03:28:45 -0000 1.3
--- lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/LanguagesEmbeddingMap.java 28 Nov 2006 14:20:13 -0000
***************
*** 20,25 ****
--- 20,26 ----
package org.netbeans.modules.lexer.nbbridge;
import java.util.Map;
+ import org.netbeans.api.lexer.TokenId;
import org.netbeans.spi.lexer.LanguageEmbedding;
/**
***************
*** 28,41 ****
*/
public final class LanguagesEmbeddingMap {
! private Map map;
/** Creates a new instance of LanguagesEmbeddingMap */
! public LanguagesEmbeddingMap(Map map) {
this.map = map;
}
! public synchronized LanguageEmbedding getLanguageEmbeddingForTokenName(String tokenName) {
return map.get(tokenName);
}
}
--- 29,42 ----
*/
public final class LanguagesEmbeddingMap {
! private Map> map;
/** Creates a new instance of LanguagesEmbeddingMap */
! public LanguagesEmbeddingMap(Map> map) {
this.map = map;
}
! public synchronized LanguageEmbedding extends TokenId> getLanguageEmbeddingForTokenName(String tokenName) {
return map.get(tokenName);
}
}
Index: lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java
===================================================================
RCS file: /cvs/lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java,v
retrieving revision 1.5
diff -c -r1.5 MimeLookupFolderInfo.java
*** lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java 7 Nov 2006 17:15:13 -0000 1.5
--- lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupFolderInfo.java 28 Nov 2006 14:20:13 -0000
***************
*** 60,66 ****
}
public Object createInstance(List fileObjectList) {
! HashMap map = new HashMap();
for(Object o : fileObjectList) {
assert o instanceof FileObject : "fileObjectList should contain FileObjects and not " + o; //NOI18N
--- 60,67 ----
}
public Object createInstance(List fileObjectList) {
! HashMap> map
! = new HashMap>();
for(Object o : fileObjectList) {
assert o instanceof FileObject : "fileObjectList should contain FileObjects and not " + o; //NOI18N
***************
*** 75,81 ****
if (isMimeTypeValid(mimeType)) {
Language extends TokenId> language = LanguageManager.getInstance().findLanguage(mimeType);
if (language != null) {
! map.put(f.getName(), new EL(language, startSkipLength, endSkipLength));
} else {
LOG.warning("Can't find Language for mime type '" + mimeType + "', ignoring."); //NOI18N
}
--- 76,82 ----
if (isMimeTypeValid(mimeType)) {
Language extends TokenId> language = LanguageManager.getInstance().findLanguage(mimeType);
if (language != null) {
! map.put(f.getName(), LanguageEmbedding.create(language, startSkipLength, endSkipLength));
} else {
LOG.warning("Can't find Language for mime type '" + mimeType + "', ignoring."); //NOI18N
}
***************
*** 136,163 ****
}
}
- private static final class EL extends LanguageEmbedding {
-
- private Language extends TokenId> language;
- private int startSkipLength;
- private int endSkipLength;
-
- public EL(Language extends TokenId> language, int startSkipLength, int endSkipLength) {
- this.language = language;
- this.startSkipLength = startSkipLength;
- this.endSkipLength = endSkipLength;
- }
-
- public Language extends TokenId> language() {
- return language;
- }
-
- public int startSkipLength() {
- return startSkipLength;
- }
-
- public int endSkipLength() {
- return endSkipLength;
- }
- } // End of EL class
}
--- 137,140 ----
Index: lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java
===================================================================
RCS file: /cvs/lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java,v
retrieving revision 1.5
diff -c -r1.5 MimeLookupLanguageProvider.java
*** lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java 26 Oct 2006 20:45:21 -0000 1.5
--- lexer/nbbridge/src/org/netbeans/modules/lexer/nbbridge/MimeLookupLanguageProvider.java 28 Nov 2006 14:20:13 -0000
***************
*** 45,52 ****
return (Language extends TokenId>)lookup.lookup(Language.class);
}
! public LanguageEmbedding findLanguageEmbedding(LanguagePath tokenLanguage, Token token, InputAttributes inputAttributes) {
! Lookup lookup = MimeLookup.getLookup(MimePath.parse(tokenLanguage.mimePath()));
LanguagesEmbeddingMap map = lookup.lookup(LanguagesEmbeddingMap.class);
return map == null ? null : map.getLanguageEmbeddingForTokenName(token.id().name());
}
--- 45,53 ----
return (Language extends TokenId>)lookup.lookup(Language.class);
}
! public LanguageEmbedding extends TokenId> findLanguageEmbedding(
! Token extends TokenId> token, LanguagePath languagePath, InputAttributes inputAttributes) {
! Lookup lookup = MimeLookup.getLookup(MimePath.parse(languagePath.mimePath()));
LanguagesEmbeddingMap map = lookup.lookup(LanguagesEmbeddingMap.class);
return map == null ? null : map.getLanguageEmbeddingForTokenName(token.id().name());
}
Index: lexer/nbproject/project.xml
===================================================================
RCS file: /cvs/lexer/nbproject/project.xml,v
retrieving revision 1.10
diff -c -r1.10 project.xml
*** lexer/nbproject/project.xml 7 Nov 2006 23:43:47 -0000 1.10
--- lexer/nbproject/project.xml 28 Nov 2006 14:20:14 -0000
***************
*** 29,35 ****
1
! 1.12
--- 29,35 ----
1
! 1.14
Index: lexer/src/org/netbeans/api/lexer/Language.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/Language.java,v
retrieving revision 1.5
diff -c -r1.5 Language.java
*** lexer/src/org/netbeans/api/lexer/Language.java 26 Oct 2006 20:45:22 -0000 1.5
--- lexer/src/org/netbeans/api/lexer/Language.java 28 Nov 2006 14:20:14 -0000
***************
*** 32,37 ****
--- 32,39 ----
import org.netbeans.lib.lexer.LexerSpiPackageAccessor;
import org.netbeans.lib.lexer.TokenIdSet;
import org.netbeans.lib.lexer.TokenHierarchyOperation;
+ import org.netbeans.lib.lexer.inc.TokenChangeInfo;
+ import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo;
import org.netbeans.lib.lexer.inc.TokenListChange;
import org.netbeans.spi.lexer.LanguageHierarchy;
***************
*** 404,410 ****
// List.get(0) is a Map[category, list-of-[category]].
// List.get(1) is a Map[category1, Map[category2, list-of-[category1;category2]]].
// etc.
! List> catMapsList = new ArrayList>(4);
// All categories for a single token id
List idCats = new ArrayList(4);
for (T id : ids) {
--- 406,412 ----
// List.get(0) is a Map[category, list-of-[category]].
// List.get(1) is a Map[category1, Map[category2, list-of-[category1;category2]]].
// etc.
! List> catMapsList = new ArrayList>(4);
// All categories for a single token id
List idCats = new ArrayList(4);
for (T id : ids) {
***************
*** 431,439 ****
*
* @param catMapsList non-null list of cached maps.
*
! * List.get(0) is a Map[category, list-of-[category]].
*
! * List.get(1) is a Map[category1, Map[category2, list-of-[category1;category2]]].
*
* etc.
*
--- 433,441 ----
*
* @param catMapsList non-null list of cached maps.
*
! * List.get(0) is a Map[category, list-containing-[category]].
*
! * List.get(1) is a Map[category1, Map[category2, list-containing-[category1;category2]]].
*
* etc.
*
***************
*** 443,475 ****
* of all categories or 1 for returning non-primary categories.
* @return non-null cached list of categories with contents equal to idCats.
*/
! @SuppressWarnings("unchecked")
! private static List findCatList(List> catMapsList, List idCats, int startIndex) {
int size = idCats.size() - startIndex;
if (size <= 0) {
return Collections.emptyList();
}
while (catMapsList.size() < size) {
! catMapsList.add(new HashMap>());
}
// Find the catList as the last item in the cascaded search through the maps
! Map m = catMapsList.get(--size);
! for (int i = startIndex; i < size; i++) {
! Map catMap = (Map)m.get(idCats.get(i));
if (catMap == null) {
! catMap = new HashMap();
! ((Map)m).put(idCats.get(i), catMap);
}
m = catMap;
}
List catList = (List)m.get(idCats.get(size));
if (catList == null) {
catList = new ArrayList(idCats.size() - startIndex);
catList.addAll((startIndex > 0)
? idCats.subList(startIndex, idCats.size())
: idCats);
! ((Map>)m).put(idCats.get(size), catList);
}
return catList;
}
--- 445,479 ----
* of all categories or 1 for returning non-primary categories.
* @return non-null cached list of categories with contents equal to idCats.
*/
! private static List findCatList(List> catMapsList, List idCats, int startIndex) {
int size = idCats.size() - startIndex;
if (size <= 0) {
return Collections.emptyList();
}
while (catMapsList.size() < size) {
! catMapsList.add(new HashMap());
}
// Find the catList as the last item in the cascaded search through the maps
! Map m = catMapsList.get(--size);
! for (int i = startIndex; i < size; i++) {
! @SuppressWarnings("unchecked")
! Map catMap = (Map)m.get(idCats.get(i));
if (catMap == null) {
! catMap = new HashMap();
! // Map>
! m.put(idCats.get(i), catMap);
}
m = catMap;
}
+ @SuppressWarnings("unchecked")
List catList = (List)m.get(idCats.get(size));
if (catList == null) {
catList = new ArrayList(idCats.size() - startIndex);
catList.addAll((startIndex > 0)
? idCats.subList(startIndex, idCats.size())
: idCats);
! m.put(idCats.get(size), catList);
}
return catList;
}
***************
*** 507,513 ****
}
public String toString() {
! return "LH: " + languageHierarchy;
}
private void checkMemberId(T id) {
--- 511,517 ----
}
public String toString() {
! return mimeType + ", LH: " + languageHierarchy;
}
private void checkMemberId(T id) {
***************
*** 549,571 ****
return new Language(languageHierarchy);
}
! public LanguageHierarchy languageHierarchy(
! Language language) {
return language.languageHierarchy();
}
public TokenHierarchy createTokenHierarchy(
! TokenHierarchyOperation tokenHierarchyOperation) {
return new TokenHierarchy(tokenHierarchyOperation);
}
public TokenHierarchyEvent createTokenChangeEvent(
! TokenHierarchy tokenHierarchy, TokenListChange change) {
! return new TokenHierarchyEvent(change);
}
! public TokenHierarchyOperation tokenHierarchyOperation(
! TokenHierarchy tokenHierarchy) {
return tokenHierarchy.operation();
}
--- 553,585 ----
return new Language(languageHierarchy);
}
! public LanguageHierarchy languageHierarchy(
! Language language) {
return language.languageHierarchy();
}
public TokenHierarchy createTokenHierarchy(
! TokenHierarchyOperation tokenHierarchyOperation) {
return new TokenHierarchy(tokenHierarchyOperation);
}
public TokenHierarchyEvent createTokenChangeEvent(
! TokenHierarchyEventInfo info) {
! return new TokenHierarchyEvent(info);
}
! public TokenChange createTokenChange(
! TokenChangeInfo info) {
! return new TokenChange(info);
! }
!
! public TokenChangeInfo tokenChangeInfo(
! TokenChange tokenChange) {
! return tokenChange.info();
! }
!
! public TokenHierarchyOperation tokenHierarchyOperation(
! TokenHierarchy tokenHierarchy) {
return tokenHierarchy.operation();
}
Index: lexer/src/org/netbeans/api/lexer/LanguagePath.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/LanguagePath.java,v
retrieving revision 1.5
diff -c -r1.5 LanguagePath.java
*** lexer/src/org/netbeans/api/lexer/LanguagePath.java 27 Oct 2006 12:59:07 -0000 1.5
--- lexer/src/org/netbeans/api/lexer/LanguagePath.java 28 Nov 2006 14:20:14 -0000
***************
*** 324,330 ****
}
}
- @SuppressWarnings("unchecked")
private Language extends TokenId>[] allocateLanguageArray(int length) {
return (Language extends TokenId>[])(new Language[length]);
}
--- 324,329 ----
Index: lexer/src/org/netbeans/api/lexer/Token.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/Token.java,v
retrieving revision 1.4
diff -c -r1.4 Token.java
*** lexer/src/org/netbeans/api/lexer/Token.java 4 Oct 2006 17:03:12 -0000 1.4
--- lexer/src/org/netbeans/api/lexer/Token.java 28 Nov 2006 14:20:14 -0000
***************
*** 170,176 ****
* @return >=0 offset of the token in the input or -1
* if this token is flyweight.
*/
! public abstract int offset(TokenHierarchy tokenHierarchy);
/**
* Checks whether this token instance is used for multiple occurrences
--- 170,176 ----
* @return >=0 offset of the token in the input or -1
* if this token is flyweight.
*/
! public abstract int offset(TokenHierarchy> tokenHierarchy);
/**
* Checks whether this token instance is used for multiple occurrences
Index: lexer/src/org/netbeans/api/lexer/TokenChange.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenChange.java,v
retrieving revision 1.4
diff -c -r1.4 TokenChange.java
*** lexer/src/org/netbeans/api/lexer/TokenChange.java 26 Oct 2006 20:45:22 -0000 1.4
--- lexer/src/org/netbeans/api/lexer/TokenChange.java 28 Nov 2006 14:20:14 -0000
***************
*** 19,26 ****
package org.netbeans.api.lexer;
import org.netbeans.lib.lexer.TokenList;
! import org.netbeans.lib.lexer.inc.TokenListChange;
/**
* Token change describes modification on one level of a token hierarchy.
--- 19,27 ----
package org.netbeans.api.lexer;
+ import org.netbeans.lib.lexer.LexerUtilsConstants;
import org.netbeans.lib.lexer.TokenList;
! import org.netbeans.lib.lexer.inc.TokenChangeInfo;
/**
* Token change describes modification on one level of a token hierarchy.
***************
*** 28,34 ****
* If there is only one token that was modified
* and there was a language embedding in that token then
* most of the embedded tokens can usually be retained.
! * This defines an embedded change accessible by {@link #embedded()}.
*
* There may possibly be multiple levels of the embedded changes.
*
--- 29,35 ----
* If there is only one token that was modified
* and there was a language embedding in that token then
* most of the embedded tokens can usually be retained.
! * This defines an embedded change accessible by {@link #embeddedChange(int)}.
*
* There may possibly be multiple levels of the embedded changes.
*
***************
*** 38,79 ****
public final class TokenChange {
! private final TokenListChange tokenListChange;
! TokenChange(TokenListChange tokenListChange) {
! this.tokenListChange = tokenListChange;
}
/**
! * Get embedded token change.
! *
! * If there is only one token that was modified
! * and there was a language embedding in that token then
! * there is possibility that the new token will be similar
! * to the old one and the embedded tokens can be retained
! * and just updated by another token change.
! *
! * In such case there will be an embedded token change.
*
! * @return valid embedded token change or null if there
! * is no embedded token change.
*/
! public TokenChange extends TokenId> embedded() {
! return null; // TODO
}
/**
! * Get embedded token change of the given type
! * only if it's of the given language.
*
* @return non-null token change or null if the embedded token change
* satisfies the condition (embedded().language() == language)
.
* Null is returned otherwise.
*/
! public TokenChange embedded(Language language) {
! @SuppressWarnings("unchecked")
! TokenChange e = (TokenChange)embedded();
! return (e != null && e.language() == language) ? e : null;
}
/**
--- 39,87 ----
public final class TokenChange {
! private final TokenChangeInfo info;
! TokenChange(TokenChangeInfo info) {
! this.info = info;
}
/**
! * Get number of embedded changes contained in this change.
*
! * @return >=0 number of embedded changes.
*/
! public int embeddedChangeCount() {
! return info.embeddedChanges().length;
! }
!
! /**
! * Get embedded change at the given index.
! *
! * @param index 0 <= index <= embeddedChangeCount() index of the embedded change.
! * @return non-null embedded token change.
! */
! public TokenChange extends TokenId> embeddedChange(int index) {
! return info.embeddedChanges()[index];
}
/**
! * Get embedded token change of the given type.
*
* @return non-null token change or null if the embedded token change
* satisfies the condition (embedded().language() == language)
.
* Null is returned otherwise.
*/
! public TokenChange embeddedChange(Language language) {
! TokenChange extends TokenId>[] ecs = info.embeddedChanges();
! for (int i = ecs.length - 1; i >= 0; i--) {
! TokenChange extends TokenId> c = ecs[i];
! if (c.language() == language) {
! @SuppressWarnings("unchecked")
! TokenChange ec = (TokenChange)c;
! return ec;
! }
! }
! return null;
}
/**
***************
*** 81,91 ****
* used by tokens contained in this token change.
*/
public Language language() {
! // No need to check as the token sequence should already
! // be obtained originally for the inner language
! @SuppressWarnings("unchecked") Language l
! = (Language)languagePath().innerLanguage();
! return l;
}
/**
--- 89,95 ----
* used by tokens contained in this token change.
*/
public Language language() {
! return LexerUtilsConstants.mostEmbeddedLanguage(languagePath());
}
/**
***************
*** 93,172 ****
* in this token sequence (containing outer language levels as well).
*/
public LanguagePath languagePath() {
! return tokenListChange.languagePath();
}
/**
- * Get start offset of the modification
- * that caused this token change.
- *
- * For token hierarchy rebuilds this is the start offset
- * of the area being rebuilt.
- */
- public int offset() {
- return tokenListChange.offset();
- }
-
- /**
- * Get number of characters inserted by the text modification
- * that caused this token change.
- *
- * For token hierarchy rebuilds this is the length
- * of the area being rebuilt.
- */
- public int insertedLength() {
- return tokenListChange.insertedLength();
- }
-
- /**
- * Get number of characters removed by the text modification
- * that caused this token change.
- *
- * For token hierarchy rebuilds this is the length
- * of the area being rebuilt.
- */
- public int removedLength() {
- return tokenListChange.removedLength();
- }
-
- /**
* Get index of the first token being modified.
*/
! public int tokenIndex() {
! return tokenListChange.tokenIndex();
! }
!
! /**
! * Get number of tokens removed.
! */
! public int removedTokenCount() {
! return tokenListChange.removedTokenList().tokenCount();
}
/**
* Get offset of the first token that was modified.
*
! * The returned value is always equal or below the {@link #offset()} value.
! *
! * If there were any removed tokens then this is a start offset
! * of the first removed token.
! *
! * If there were only added tokens (no removed tokens)
! * then this is the start offset of the first added token.
*/
! public int modifiedTokensStartOffset() {
! return tokenListChange.modifiedTokensStartOffset();
}
/**
! * Get end offset of the last token that was removed
! * (in the original offset space before the removal was done).
! *
! * If there were no removed tokens then the result of this method
! * is equal to {@link #modifiedTokensStartOffset()}.
*/
! public int removedTokensEndOffset() {
! return tokenListChange.removedTokensEndOffset();
}
/**
--- 97,128 ----
* in this token sequence (containing outer language levels as well).
*/
public LanguagePath languagePath() {
! return info.currentTokenList().languagePath();
}
/**
* Get index of the first token being modified.
*/
! public int index() {
! return info.index();
}
/**
* Get offset of the first token that was modified.
*
! * If there were any added/removed tokens then this is a start offset
! * of the first added/removed token.
*/
! public int offset() {
! return info.offset();
}
/**
! * Get number of removed tokens contained in this token change.
*/
! public int removedTokenCount() {
! TokenList extends TokenId> rtl = info.removedTokenList();
! return (rtl != null) ? rtl.tokenCount() : 0;
}
/**
***************
*** 182,208 ****
* or null if there were no removed tokens.
*/
public TokenSequence removedTokenSequence() {
! return new TokenSequence(tokenListChange.removedTokenList());
}
/**
* Get number of the tokens added by this token change.
*/
public int addedTokenCount() {
! return tokenListChange.addedTokenCount();
}
/**
- * Get end offset of the last token that was added.
- *
- * If there were no added tokens then the result of this method
- * is equal to {@link #modifiedTokensStartOffset()}.
- */
- public int addedTokensEndOffset() {
- return tokenListChange.addedTokensEndOffset();
- }
-
- /**
* Get the token sequence that corresponds to the current state
* of the token hierarchy.
*
--- 138,154 ----
* or null if there were no removed tokens.
*/
public TokenSequence removedTokenSequence() {
! return new TokenSequence(info.removedTokenList());
}
/**
* Get number of the tokens added by this token change.
*/
public int addedTokenCount() {
! return info.addedTokenCount();
}
/**
* Get the token sequence that corresponds to the current state
* of the token hierarchy.
*
***************
*** 210,227 ****
* the token sequence at the corresponding embedded level.
*/
public TokenSequence currentTokenSequence() {
! return new TokenSequence(tokenListChange.currentTokenList());
}
!
/**
! * Get token hierarchy where this change occurred.
*/
! public TokenHierarchy> tokenHierarchy() {
! return tokenListChange.tokenHierarchyOperation().tokenHierarchy();
! }
!
! TokenListChange tokenListChange() {
! return tokenListChange;
}
}
--- 156,169 ----
* the token sequence at the corresponding embedded level.
*/
public TokenSequence currentTokenSequence() {
! return new TokenSequence(info.currentTokenList());
}
!
/**
! * Used by package-private accessor.
*/
! TokenChangeInfo info() {
! return info;
}
}
Index: lexer/src/org/netbeans/api/lexer/TokenHierarchy.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenHierarchy.java,v
retrieving revision 1.3
diff -c -r1.3 TokenHierarchy.java
*** lexer/src/org/netbeans/api/lexer/TokenHierarchy.java 26 Oct 2006 20:45:23 -0000 1.3
--- lexer/src/org/netbeans/api/lexer/TokenHierarchy.java 28 Nov 2006 14:20:15 -0000
***************
*** 109,115 ****
CharSequence inputText, boolean copyInputText,
Language language, Set skipTokenIds, InputAttributes inputAttributes) {
! return new TokenHierarchyOperation(inputText, copyInputText,
language, skipTokenIds, inputAttributes).tokenHierarchy();
}
--- 109,115 ----
CharSequence inputText, boolean copyInputText,
Language language, Set skipTokenIds, InputAttributes inputAttributes) {
! return new TokenHierarchyOperation(inputText, copyInputText,
language, skipTokenIds, inputAttributes).tokenHierarchy();
}
***************
*** 139,152 ****
Reader inputReader,
Language language, Set skipTokenIds, InputAttributes inputAttributes) {
! return new TokenHierarchyOperation(inputReader,
language, skipTokenIds, inputAttributes).tokenHierarchy();
}
! private TokenHierarchyOperation operation;
! TokenHierarchy(TokenHierarchyOperation operation) {
this.operation = operation;
}
--- 139,152 ----
Reader inputReader,
Language language, Set skipTokenIds, InputAttributes inputAttributes) {
! return new TokenHierarchyOperation(inputReader,
language, skipTokenIds, inputAttributes).tokenHierarchy();
}
! private TokenHierarchyOperation operation;
! TokenHierarchy(TokenHierarchyOperation operation) {
this.operation = operation;
}
***************
*** 159,166 ****
* @return non-null token sequence of the top level of the token hierarchy.
*/
public TokenSequence extends TokenId> tokenSequence() {
! @SuppressWarnings("unchecked") TokenSequence extends TokenId> ts
! = new TokenSequence(operation.checkedTokenList());
return ts;
}
--- 159,167 ----
* @return non-null token sequence of the top level of the token hierarchy.
*/
public TokenSequence extends TokenId> tokenSequence() {
! @SuppressWarnings("unchecked")
! TokenSequence extends TokenId> ts = new TokenSequence(
! (TokenList)operation.checkedTokenList());
return ts;
}
***************
*** 174,198 ****
*
*/
public TokenSequence tokenSequence(Language language) {
! TokenList tokenList = operation.checkedTokenList();
! @SuppressWarnings("unchecked") TokenSequence ts
= (tokenList.languagePath().topLanguage() == language)
! ? new TokenSequence(tokenList)
: null;
return ts;
}
/**
! * Whether this provider supports token changes (upon change of the underlying
! * text input) or not.
*
! * If changes are not supported then it has no sense
! * to attach token change listeners (though it's allowed)
! * as they would never be fired.
! *
! * Token hierarchy snapshots do not fire token change events.
*
! * @return true if this provider supports token changes or false otherwise.
*/
public boolean isMutable() {
return operation.isMutable();
--- 175,206 ----
*
*/
public TokenSequence tokenSequence(Language language) {
! TokenList extends TokenId> tokenList = operation.checkedTokenList();
! @SuppressWarnings("unchecked")
! TokenSequence ts
= (tokenList.languagePath().topLanguage() == language)
! ? new TokenSequence((TokenList)tokenList)
: null;
return ts;
}
/**
! * Get a set of language paths used by this token hierarchy.
*
! * The set includes "static" paths that are those reachable by traversing
! * token ids of the top language and searching for the default embeddings
! * that could be created by
! * {@link org.netbeans.spi.lexer.LanguageHierarchy#embedding(Token,LanguagePath,InputAttributes)}.
! *
! */
! public Set languagePaths() {
! return operation.languagePaths();
! }
!
! /**
! * Whether input text of this token hierarchy is mutable or not.
*
! * @return true if the input text is mutable or false otherwise.
*/
public boolean isMutable() {
return operation.isMutable();
***************
*** 213,220 ****
* was not created over mutable input source.
*/
public I mutableInputSource() {
! @SuppressWarnings("unchecked") I input = (I)operation.mutableInputSource();
! return input;
}
/**
--- 221,227 ----
* was not created over mutable input source.
*/
public I mutableInputSource() {
! return operation.mutableInputSource();
}
/**
***************
*** 397,403 ****
* Obtaining of token hierarchy operation is only intended to be done
* by package accessor.
*/
! TokenHierarchyOperation operation() {
return operation;
}
--- 404,410 ----
* Obtaining of token hierarchy operation is only intended to be done
* by package accessor.
*/
! TokenHierarchyOperation operation() {
return operation;
}
Index: lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java,v
retrieving revision 1.4
diff -c -r1.4 TokenHierarchyEvent.java
*** lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java 26 Oct 2006 20:45:23 -0000 1.4
--- lexer/src/org/netbeans/api/lexer/TokenHierarchyEvent.java 28 Nov 2006 14:20:15 -0000
***************
*** 19,25 ****
package org.netbeans.api.lexer;
! import org.netbeans.lib.lexer.TokenList;
import org.netbeans.lib.lexer.inc.TokenListChange;
/**
--- 19,25 ----
package org.netbeans.api.lexer;
! import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo;
import org.netbeans.lib.lexer.inc.TokenListChange;
/**
***************
*** 31,56 ****
public final class TokenHierarchyEvent extends java.util.EventObject {
! private final TokenChange extends TokenId> tokenChange;
! TokenHierarchyEvent(TokenListChange tokenListChange) {
! super(tokenListChange.tokenHierarchyOperation().tokenHierarchy());
! this.tokenChange = new TokenChange(tokenListChange);
}
/**
* Get source of this event as a token hierarchy instance.
*/
! public TokenHierarchy tokenHierarchy() {
! return (TokenHierarchy)getSource();
}
/**
* Get the token change that occurred in the tokens
* at the top-level of the token hierarchy.
*/
public TokenChange extends TokenId> tokenChange() {
! return tokenChange;
}
/**
--- 31,63 ----
public final class TokenHierarchyEvent extends java.util.EventObject {
! private final TokenHierarchyEventInfo info;
! TokenHierarchyEvent(TokenHierarchyEventInfo info) {
! super(info.tokenHierarchyOperation().tokenHierarchy());
! this.info = info;
}
/**
* Get source of this event as a token hierarchy instance.
*/
! public TokenHierarchy> tokenHierarchy() {
! return (TokenHierarchy>)getSource();
}
/**
+ * Get reason why a token hierarchy event was fired.
+ */
+ public TokenHierarchyEventType type() {
+ return info.type();
+ }
+
+ /**
* Get the token change that occurred in the tokens
* at the top-level of the token hierarchy.
*/
public TokenChange extends TokenId> tokenChange() {
! return info.tokenChange();
}
/**
***************
*** 63,139 ****
* Returns null otherwise.
*/
public TokenChange tokenChange(Language language) {
@SuppressWarnings("unchecked")
! TokenChange tc = (TokenChange)tokenChange();
! return (tc != null && tc.language() == language) ? tc : null;
}
!
/**
! * Get reason why a token hierarchy event was fired.
*/
! public Type type() {
! return tokenChange.tokenListChange().type();
}
/**
! * Token hierarchy event type determines the reason
! * why token hierarchy modification happened.
*/
! public enum Type {
!
! /**
! * The token change was caused by modification (insert/remove) of the characters
! * in the underlying character sequence.
! */
! TEXT_MODIFY,
!
! /**
! * The token change was caused by a partial rebuilding
! * of the token hierarchy.
! *
! * The partial rebuilding may be caused by changes in input attributes.
! *
! * This change is notified under modification lock (write lock)
! * of the corresponding input source.
! */
! PARTIAL_REBUILD,
!
! /**
! * The token change was caused by a complete rebuild
! * of the token hierarchy.
! *
! * That may be necessary because of changes
! * in input attributes that influence the lexing.
! *
! * When the whole hierarchy is rebuilt only the removed tokens
! * will be notified. There will be no added tokens
! * because they will be created lazily when asked.
! *
! * This change is notified under modification lock (write lock)
! * of the corresponding input source.
! */
! FULL_REBUILD,
!
! /**
! * The token change was caused by change in activity
! * of the token hierarchy.
! *
! * The current activity state can be determined by {@link TokenHierarchy#isActive()}.
! *
! * Firing with this token change type may happen because the input source
! * (for which the token hierarchy was created) has not been used for a long time
! * and its token hierarchy is being deactivated. Or the token hierarchy is just going
! * to be activated again.
! *
! * The hierarchy will only notify the tokens being removed (for the case when
! * the hierarchy is going to be deactivated). There will be no added tokens
! * because they will be created lazily when asked.
! *
! * This change is notified under modification lock (write lock)
! * of the corresponding input source.
! */
! ACTIVATION;
!
}
}
--- 70,135 ----
* Returns null otherwise.
*/
public TokenChange tokenChange(Language language) {
+ TokenChange extends TokenId> tc = tokenChange();
@SuppressWarnings("unchecked")
! TokenChange tcl = (tc != null && tc.language() == language) ? (TokenChange)tc : null;
! return tcl;
}
!
/**
! * Get start offset of the area that was affected by the attached
! * token change(s).
! */
! public int affectedStartOffset() {
! return info.affectedStartOffset();
! }
!
! /**
! * Get end offset of the area that was affected by the attached
! * token change(s).
! *
! * If there was a text modification the offsets are related
! * to the state after the modification.
*/
! public int affectedEndOffset() {
! return info.affectedEndOffset();
}
/**
! * Get offset in the input source where the modification occurred.
! *
! * @return modification offset or -1
! * if this event's type is not {@link TokenHierarchyEventType#MODIFICATION}.
! */
! public int modificationOffset() {
! return info.modificationOffset();
! }
!
! /**
! * Get number of characters inserted by the text modification
! * that caused this token change.
! *
! * @return number of inserted characters by the modification.
! *
! * Returns 0
! * if this event's type is not {@link TokenHierarchyEventType#MODIFICATION}.
*/
! public int insertedLength() {
! return info.insertedLength();
}
+
+ /**
+ * Get number of characters removed by the text modification
+ * that caused this token change.
+ *
+ * @return number of inserted characters by the modification.
+ *
+ * Returns 0
+ * if this event's type is not {@link TokenHierarchyEventType#MODIFICATION}.
+ */
+ public int removedLength() {
+ return info.removedLength();
+ }
+
}
Index: lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java
===================================================================
RCS file: lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java
diff -N lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java
*** /dev/null 1 Jan 1970 00:00:00 -0000
--- lexer/src/org/netbeans/api/lexer/TokenHierarchyEventType.java 28 Nov 2006 14:20:15 -0000
***************
*** 0 ****
--- 1,101 ----
+ /*
+ * The contents of this file are subject to the terms of the Common Development
+ * and Distribution License (the License). You may not use this file except in
+ * compliance with the License.
+ *
+ * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
+ * or http://www.netbeans.org/cddl.txt.
+ *
+ * When distributing Covered Code, include this CDDL Header Notice in each file
+ * and include the License file at http://www.netbeans.org/cddl.txt.
+ * If applicable, add the following below the CDDL Header, with the fields
+ * enclosed by brackets [] replaced by your own identifying information:
+ * "Portions Copyrighted [year] [name of copyright owner]"
+ *
+ * The Original Software is NetBeans. The Initial Developer of the Original
+ * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
+ * Microsystems, Inc. All Rights Reserved.
+ */
+
+ package org.netbeans.api.lexer;
+
+ /**
+ * Token hierarchy event type determines the reason
+ * why token hierarchy modification described by {@link TokenHierarchyEvent}
+ * happened.
+ *
+ * @author Miloslav Metelka
+ * @version 1.00
+ */
+
+ public enum TokenHierarchyEventType {
+
+ /**
+ * The token change was caused by modification (insert/remove) of the characters
+ * in the underlying character sequence.
+ */
+ MODIFICATION,
+
+ /**
+ * The token change was caused by relexing of a part of the token hierarchy
+ * without any text modification.
+ *
+ * This change is notified under modification lock (write lock)
+ * of the corresponding input source.
+ */
+ RELEX,
+
+ /**
+ * The token change was caused by a complete rebuild
+ * of the token hierarchy.
+ *
+ * That may be necessary because of changes
+ * in input attributes that influence the lexing.
+ *
+ * When the whole hierarchy is rebuilt only the removed tokens
+ * will be notified. There will be no added tokens
+ * because they will be created lazily when asked.
+ *
+ * This change is notified under modification lock (write lock)
+ * of the corresponding input source.
+ */
+ REBUILD,
+
+ /**
+ * The token change was caused by change in activity
+ * of the token hierarchy.
+ *
+ * The current activity state can be determined by {@link TokenHierarchy#isActive()}.
+ *
+ * Firing an event with this type may happen because the input source
+ * (for which the token hierarchy was created) has not been used for a long time
+ * and its token hierarchy is being deactivated. Or the token hierarchy is just going
+ * to be activated again.
+ *
+ * The hierarchy will only notify the tokens being removed (for the case when
+ * the hierarchy is going to be deactivated). There will be no added tokens
+ * because they will be created lazily when asked.
+ *
+ * This change is notified under modification lock (write lock)
+ * of the corresponding input source.
+ */
+ ACTIVITY,
+
+ /**
+ * Custom language embedding was created by
+ * {@link TokenSequence#createEmbedding(Language,int,int)}.
+ *
+ * The {@link TokenHierarchyEvent#tokenChange()} contains the token
+ * where the embedding was created and the embedded change
+ * {@link TokenChange#embeddedChange(int)} that describes the added
+ * embedded language.
+ */
+ EMBEDDING,
+
+ /**
+ * Notification that result of
+ * {@link TokenHierarchy#languagePaths()} has changed.
+ */
+ LANGUAGE_PATHS;
+
+ }
\ No newline at end of file
Index: lexer/src/org/netbeans/api/lexer/TokenSequence.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/api/lexer/TokenSequence.java,v
retrieving revision 1.6
diff -c -r1.6 TokenSequence.java
*** lexer/src/org/netbeans/api/lexer/TokenSequence.java 26 Oct 2006 20:45:23 -0000 1.6
--- lexer/src/org/netbeans/api/lexer/TokenSequence.java 28 Nov 2006 14:20:15 -0000
***************
*** 20,26 ****
package org.netbeans.api.lexer;
import java.util.ConcurrentModificationException;
! import org.netbeans.lib.lexer.BranchTokenList;
import org.netbeans.lib.lexer.SubSequenceTokenList;
import org.netbeans.lib.lexer.LexerUtilsConstants;
import org.netbeans.lib.lexer.TokenList;
--- 20,26 ----
package org.netbeans.api.lexer;
import java.util.ConcurrentModificationException;
! import org.netbeans.lib.lexer.EmbeddingContainer;
import org.netbeans.lib.lexer.SubSequenceTokenList;
import org.netbeans.lib.lexer.LexerUtilsConstants;
import org.netbeans.lib.lexer.TokenList;
***************
*** 61,67 ****
public final class TokenSequence {
! private TokenList tokenList; // 8 + 4 = 12 bytes
private AbstractToken token; // 16 bytes
--- 61,67 ----
public final class TokenSequence {
! private TokenList tokenList; // 8 + 4 = 12 bytes
private AbstractToken token; // 16 bytes
***************
*** 78,88 ****
* changes (by modification) this token sequence will become invalid.
*/
private final int modCount; // 28 bytes
/**
* Package-private constructor used by API accessor.
*/
! TokenSequence(TokenList tokenList) {
this.tokenList = tokenList;
this.modCount = tokenList.modCount();
}
--- 78,97 ----
* changes (by modification) this token sequence will become invalid.
*/
private final int modCount; // 28 bytes
+
+ /**
+ * Parent token indexes allow to effectively determine parent tokens
+ * in the tree token hierarchy.
+ *
+ * The first index corresponds to the top language in the hierarchy
+ * and the ones that follow point to subsequent embedded levels.
+ */
+ private int[] parentTokenIndexes; // 32 bytes
/**
* Package-private constructor used by API accessor.
*/
! TokenSequence(TokenList tokenList) {
this.tokenList = tokenList;
this.modCount = tokenList.modCount();
}
***************
*** 92,102 ****
* used by tokens in this token sequence.
*/
public Language language() {
! // No need to check as the token sequence should already
! // be obtained originally for the inner language
! @SuppressWarnings("unchecked") Language l
! = (Language)languagePath().innerLanguage();
! return l;
}
/**
--- 101,107 ----
* used by tokens in this token sequence.
*/
public Language language() {
! return LexerUtilsConstants.mostEmbeddedLanguage(languagePath());
}
/**
***************
*** 157,163 ****
public Token offsetToken() {
checkToken();
if (token.isFlyweight()) {
! token = tokenList.createNonFlyToken(tokenIndex, token, offset());
}
return token;
}
--- 162,168 ----
public Token offsetToken() {
checkToken();
if (token.isFlyweight()) {
! token = tokenList.replaceFlyToken(tokenIndex, token, offset());
}
return token;
}
***************
*** 195,204 ****
return tokenIndex;
}
! /**
! * Get the embedded token sequence if the token
* to which this token sequence is currently positioned
* has a language embedding.
*
* @return embedded sequence or null if no embedding exists for this token.
* @throws IllegalStateException if this token sequence was not positioned
--- 200,215 ----
return tokenIndex;
}
! /**
! * Get embedded token sequence if the token
* to which this token sequence is currently positioned
* has a language embedding.
+ *
+ * If there is a custom embedding created by
+ * {@link #createEmbedding(Language,int,int)} it will be returned
+ * instead of the default embedding
+ * (the one created by LanguageHierarchy.embedding()
+ * or LanguageProvider
).
*
* @return embedded sequence or null if no embedding exists for this token.
* @throws IllegalStateException if this token sequence was not positioned
***************
*** 206,240 ****
*/
public TokenSequence extends TokenId> embedded() {
checkToken();
! TokenList branchTokenList = BranchTokenList.getOrCreate(tokenList, tokenIndex);
! if (branchTokenList != null) {
! TokenList tl = tokenList;
if (tokenList.getClass() == SubSequenceTokenList.class) {
! tl = ((SubSequenceTokenList)tokenList).delegate();
}
if (tl.getClass() == FilterSnapshotTokenList.class) {
! branchTokenList = new FilterSnapshotTokenList(branchTokenList,
! ((FilterSnapshotTokenList)tl).tokenOffsetDiff());
} else if (tl.getClass() == SnapshotTokenList.class) {
! branchTokenList = new FilterSnapshotTokenList(branchTokenList,
offset() - token().offset(null));
}
! return new TokenSequence(branchTokenList);
} else // Embedded token list does not exist
return null;
}
/**
! * Created embedded token sequence of the given type or return null
! * if the embedded token sequence does not exist or it has a different type.
*/
public TokenSequence embedded(Language embeddedLanguage) {
! @SuppressWarnings("unchecked")
! TokenSequence ets = (TokenSequence)embedded();
! return (ets != null && ets.language() == embeddedLanguage) ? ets : null;
}
/**
--- 217,302 ----
*/
public TokenSequence extends TokenId> embedded() {
checkToken();
! return embeddedImpl(null);
! }
!
! private TokenSequence embeddedImpl(Language embeddedLanguage) {
! TokenList embeddedTokenList
! = EmbeddingContainer.getEmbedding(tokenList, tokenIndex, embeddedLanguage);
! if (embeddedTokenList != null) {
! TokenList tl = tokenList;
if (tokenList.getClass() == SubSequenceTokenList.class) {
! tl = ((SubSequenceTokenList)tokenList).delegate();
}
if (tl.getClass() == FilterSnapshotTokenList.class) {
! embeddedTokenList = new FilterSnapshotTokenList(embeddedTokenList,
! ((FilterSnapshotTokenList)tl).tokenOffsetDiff());
} else if (tl.getClass() == SnapshotTokenList.class) {
! embeddedTokenList = new FilterSnapshotTokenList(embeddedTokenList,
offset() - token().offset(null));
}
! return new TokenSequence(embeddedTokenList);
} else // Embedded token list does not exist
return null;
}
/**
! * Get embedded token sequence if the token
! * to which this token sequence is currently positioned
! * has a language embedding.
*/
public TokenSequence embedded(Language embeddedLanguage) {
! checkToken();
! return embeddedImpl(embeddedLanguage);
! }
!
! /**
! * Create language embedding without joining of the embedded sections.
! *
! * @see #createEmbedding(Language, int, int, boolean)
! */
! public boolean createEmbedding(Language extends TokenId> embeddedLanguage,
! int startSkipLength, int endSkipLength) {
! return createEmbedding(embeddedLanguage, startSkipLength, endSkipLength, false);
! }
!
! /**
! * Create language embedding described by the given parameters.
! *
! * If the underying text input is mutable then this method should only be called
! * within a read lock over the text input.
! *
! * @param embeddedLanguage non-null embedded language
! * @param startSkipLength >=0 number of characters in an initial part of the token
! * for which the language embedding is defined that should be excluded
! * from the embedded section. The excluded characters will not be lexed
! * and there will be no tokens created for them.
! * @param endSkipLength >=0 number of characters at the end of the token
! * for which the language embedding is defined that should be excluded
! * from the embedded section. The excluded characters will not be lexed
! * and there will be no tokens created for them.
! * @param joinSections whether sections with this embedding should be joined
! * across the input source or whether they should stay separate.
! *
! * For example for HTML sections embedded in JSP this flag should be true:
! *
! * <!-- HTML comment start
! * <% System.out.println("Hello"); %>
! still in HTML comment --<
! *
! *
! * Only the embedded sections with the same language path can be joined.
! * @return true if the embedding was created successfully or false if an embedding
! * with the given language already exists for this token.
! */
! public boolean createEmbedding(Language extends TokenId> embeddedLanguage,
! int startSkipLength, int endSkipLength, boolean joinSections) {
! checkToken();
! return EmbeddingContainer.createEmbedding(tokenList, tokenIndex,
! embeddedLanguage, startSkipLength, endSkipLength, joinSections);
}
/**
***************
*** 253,262 ****
public boolean moveNext() {
checkModCount();
tokenIndex++;
! Object tokenOrBranch = tokenList.tokenOrBranch(tokenIndex);
! if (tokenOrBranch != null) {
AbstractToken origToken = token;
! assignToken(tokenOrBranch);
if (tokenOffset != -1) {
// If the token list is continuous or the fetched token
// is flyweight (there cannot be a gap before flyweight token)
--- 315,324 ----
public boolean moveNext() {
checkModCount();
tokenIndex++;
! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(tokenIndex);
! if (tokenOrEmbeddingContainer != null) {
AbstractToken origToken = token;
! assignToken(tokenOrEmbeddingContainer);
if (tokenOffset != -1) {
// If the token list is continuous or the fetched token
// is flyweight (there cannot be a gap before flyweight token)
***************
*** 328,337 ****
if (index < 0) {
return false;
}
! Object tokenOrBranch = tokenList.tokenOrBranch(index);
! if (tokenOrBranch != null) { // enough tokens
this.tokenIndex = index;
! assignToken(tokenOrBranch);
tokenOffset = -1;
return true;
--- 390,399 ----
if (index < 0) {
return false;
}
! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index);
! if (tokenOrEmbeddingContainer != null) { // enough tokens
this.tokenIndex = index;
! assignToken(tokenOrEmbeddingContainer);
tokenOffset = -1;
return true;
***************
*** 409,415 ****
// when asked by clients.
int tokenCount = tokenList.tokenCountCurrent(); // presently created token count
if (tokenCount == 0) { // no tokens yet -> attempt to create at least one
! if (tokenList.tokenOrBranch(0) == null) { // really no tokens at all
// In this case the token sequence could not be positioned yet
// so no need to reset "index" or other vars
return Integer.MAX_VALUE;
--- 471,477 ----
// when asked by clients.
int tokenCount = tokenList.tokenCountCurrent(); // presently created token count
if (tokenCount == 0) { // no tokens yet -> attempt to create at least one
! if (tokenList.tokenOrEmbeddingContainer(0) == null) { // really no tokens at all
// In this case the token sequence could not be positioned yet
// so no need to reset "index" or other vars
return Integer.MAX_VALUE;
***************
*** 426,434 ****
// there may be gaps between tokens due to token id filter use.
int tokenLength = LexerUtilsConstants.token(tokenList, tokenCount - 1).length();
while (offset >= prevTokenOffset + tokenLength) { // above present token
! Object tokenOrBranch = tokenList.tokenOrBranch(tokenCount);
! if (tokenOrBranch != null) {
! AbstractToken t = LexerUtilsConstants.token(tokenOrBranch);
if (t.isFlyweight()) { // need to use previous tokenLength
prevTokenOffset += tokenLength;
} else { // non-flyweight token - retrieve offset
--- 488,496 ----
// there may be gaps between tokens due to token id filter use.
int tokenLength = LexerUtilsConstants.token(tokenList, tokenCount - 1).length();
while (offset >= prevTokenOffset + tokenLength) { // above present token
! Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(tokenCount);
! if (tokenOrEmbeddingContainer != null) {
! AbstractToken t = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
if (t.isFlyweight()) { // need to use previous tokenLength
prevTokenOffset += tokenLength;
} else { // non-flyweight token - retrieve offset
***************
*** 547,578 ****
*/
public TokenSequence subSequence(int startOffset, int endOffset) {
checkModCount(); // Ensure subsequences on valid token sequences only
! TokenList tl;
if (tokenList.getClass() == SubSequenceTokenList.class) {
! SubSequenceTokenList stl = (SubSequenceTokenList)tokenList;
tl = stl.delegate();
startOffset = Math.max(startOffset, stl.limitStartOffset());
endOffset = Math.min(endOffset, stl.limitEndOffset());
} else // Regular token list
tl = tokenList;
! return new TokenSequence(new SubSequenceTokenList(tl, startOffset, endOffset));
}
public String toString() {
return LexerUtilsConstants.appendTokenList(null, tokenList, tokenIndex).toString();
}
! @SuppressWarnings("unchecked")
! private void assignToken(Object tokenOrBranch) {
! if (tokenOrBranch.getClass() == BranchTokenList.class) {
! token = (AbstractToken)((BranchTokenList)tokenOrBranch).branchToken();
! } else {
! token = (AbstractToken)tokenOrBranch;
! }
}
private void assignToken() {
! assignToken(tokenList.tokenOrBranch(tokenIndex));
}
private void checkToken() {
--- 609,639 ----
*/
public TokenSequence subSequence(int startOffset, int endOffset) {
checkModCount(); // Ensure subsequences on valid token sequences only
! TokenList tl;
if (tokenList.getClass() == SubSequenceTokenList.class) {
! SubSequenceTokenList stl = (SubSequenceTokenList)tokenList;
tl = stl.delegate();
startOffset = Math.max(startOffset, stl.limitStartOffset());
endOffset = Math.min(endOffset, stl.limitEndOffset());
} else // Regular token list
tl = tokenList;
! return new TokenSequence(new SubSequenceTokenList(tl, startOffset, endOffset));
}
public String toString() {
return LexerUtilsConstants.appendTokenList(null, tokenList, tokenIndex).toString();
}
+
+ int[] parentTokenIndexes() {
+ return parentTokenIndexes;
+ }
! private void assignToken(Object tokenOrEmbeddingContainer) {
! token = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
}
private void assignToken() {
! assignToken(tokenList.tokenOrEmbeddingContainer(tokenIndex));
}
private void checkToken() {
Index: lexer/src/org/netbeans/lib/lexer/BranchTokenList.java
===================================================================
RCS file: lexer/src/org/netbeans/lib/lexer/BranchTokenList.java
diff -N lexer/src/org/netbeans/lib/lexer/BranchTokenList.java
*** lexer/src/org/netbeans/lib/lexer/BranchTokenList.java 26 Oct 2006 20:45:23 -0000 1.5
--- /dev/null 1 Jan 1970 00:00:00 -0000
***************
*** 1,422 ****
- /*
- * The contents of this file are subject to the terms of the Common Development
- * and Distribution License (the License). You may not use this file except in
- * compliance with the License.
- *
- * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
- * or http://www.netbeans.org/cddl.txt.
- *
- * When distributing Covered Code, include this CDDL Header Notice in each file
- * and include the License file at http://www.netbeans.org/cddl.txt.
- * If applicable, add the following below the CDDL Header, with the fields
- * enclosed by brackets [] replaced by your own identifying information:
- * "Portions Copyrighted [year] [name of copyright owner]"
- *
- * The Original Software is NetBeans. The Initial Developer of the Original
- * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
- * Microsystems, Inc. All Rights Reserved.
- */
-
- package org.netbeans.lib.lexer;
-
- import java.util.List;
- import java.util.Set;
- import org.netbeans.api.lexer.Language;
- import org.netbeans.api.lexer.LanguagePath;
- import org.netbeans.lib.editor.util.FlyOffsetGapList;
- import org.netbeans.lib.lexer.inc.MutableTokenList;
- import org.netbeans.api.lexer.InputAttributes;
- import org.netbeans.api.lexer.Token;
- import org.netbeans.api.lexer.TokenId;
- import org.netbeans.lib.lexer.inc.TokenListChange;
- import org.netbeans.spi.lexer.LanguageEmbedding;
- import org.netbeans.spi.lexer.LanguageHierarchy;
- import org.netbeans.lib.lexer.token.AbstractToken;
- import org.netbeans.lib.lexer.token.TextToken;
-
-
- /**
- * Branch token list maintains a list of tokens
- * on a particular embedded language level .
- *
- * The physical storage contains a gap to speed up list modifications
- * during typing in a document when tokens are typically added/removed
- * at the same index in the list.
- *
- *
- * There is an intent to not degrade performance significantly
- * with each extra language embedding level so the token list maintains direct
- * link to the root level.
- *
- * @author Miloslav Metelka
- * @version 1.00
- */
-
- public final class BranchTokenList extends FlyOffsetGapList implements MutableTokenList {
-
- /** Flag for additional correctness checks (may degrade performance). */
- private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test");
-
- /**
- * Get or create branch token list.
- *
- * This method also calls updateStartOffset().
- *
- */
- public static BranchTokenList getOrCreate(TokenList tokenList, int index) {
- Object tokenOrBranch = tokenList.tokenOrBranch(index);
- if (tokenOrBranch.getClass() == BranchTokenList.class) { // already exists
- BranchTokenList embeddedList = (BranchTokenList)tokenOrBranch;
- embeddedList.updateStartOffset();
- return embeddedList;
- }
- AbstractToken branchToken = (AbstractToken)tokenOrBranch;
- if (branchToken.isFlyweight()) { // branching cannot exist for this flyweight token
- return null;
- }
- // Create branch token list now
- LanguagePath languagePath = tokenList.languagePath();
- LanguageHierarchy languageHierarchy = LexerUtilsConstants.languageHierarchy(languagePath);
- LanguageEmbedding embedding = LexerSpiPackageAccessor.get().embedding(
- languageHierarchy, branchToken, true, languagePath, tokenList.inputAttributes());
-
- if (embedding == null) {
- // try language embeddings registered in Lookup
- embedding = LanguageManager.getInstance().findLanguageEmbedding(languagePath, branchToken, tokenList.inputAttributes());
- }
-
- if (embedding != null) {
- LanguagePath embeddedLanguagePath = LanguagePath.get(languagePath,
- embedding.language());
- // updateStartOffset() called in constructor
- BranchTokenList embeddedList = new BranchTokenList((AbstractToken extends TokenId>)branchToken,
- embeddedLanguagePath, embedding);
- tokenList.wrapToken(index, embeddedList);
- return embeddedList;
- } else {
- return null;
- }
- }
-
- private final AbstractToken extends TokenId> branchToken; // 36 bytes (32-super + 4)
-
- private final LanguagePath languagePath; // 40 bytes
-
- /**
- * For mutable environment this field contains root token list of the hierarchy.
- *
- */
- private final TokenList root; // 44 bytes
-
- /**
- * Cached modification count allows to determine whether the start offset
- * needs to be recomputed.
- */
- private int cachedModCount; // 48 bytes
-
- /**
- * Cached start offset of the first token in this token list.
- *
- * It consists of start ofsset of the branch token plus embedding start shift.
- *
- * It must be added to the real offset for each child token
- * (the child's real offset is only extra shift against startOffset).
- */
- private int startOffset; // 52 bytes
-
- private final LanguageEmbedding embedding; // 56 bytes
-
- /**
- * The branch token at the root level (root token list).
- *
- * For first-level embedding it is the same like value of branchToken variable
- * but for deeper embeddings it points to the corresponding branch token
- * in the root token list.
- *
- * It's used for getting of the start offset of the contained tokens
- * and for getting of their text.
- */
- private final AbstractToken extends TokenId> rootBranchToken; // 60 bytes
-
- /**
- * Difference between start offset of the first token in this token list
- * against the start offset of the corresponding root branch token.
- *
- * The offset gets refreshed upon updateStartOffset()
.
- */
- private int startOffsetShift; // 64 bytes
-
- /**
- * Storage for lookaheads and states for the lexed tokens.
- *
- * It's only initialized for mutable token lists
- * or when in testing environment.
- */
- private LAState laState; // 68 bytes
-
-
- public BranchTokenList(AbstractToken extends TokenId> branchToken, LanguagePath languagePath, LanguageEmbedding embedding) {
- this.branchToken = branchToken;
- this.languagePath = languagePath;
- this.embedding = embedding;
- TokenList branchTokenList = branchToken.tokenList();
- this.root = branchTokenList.root();
- this.rootBranchToken = (branchTokenList.getClass() == BranchTokenList.class)
- ? ((BranchTokenList)branchTokenList).rootBranchToken()
- : branchToken;
-
- if (root.modCount() != -1 || testing) {
- this.laState = LAState.empty(); // Store lookaheads and states
- }
-
- // Set cachedModCount to value that will force its update (can't be 0 or -1)
- this.cachedModCount = -2;
- updateStartOffset(); // update startOffset
-
- init();
- }
-
- private void init() {
- // Lex all the input represented by branch token at once
- LexerInputOperation lexerInputOperation = createLexerInputOperation(
- 0, startOffset, null);
- Token token = lexerInputOperation.nextToken();
- while (token != null) {
- updateElementOffsetAdd(token); // must subtract startOffset()
- add(token);
- if (laState != null) {
- laState = laState.add(lexerInputOperation.lookahead(),
- lexerInputOperation.lexerState());
- }
- token = lexerInputOperation.nextToken();
- }
-
- trimToSize(); // Compact storage
- if (laState != null)
- laState.trimToSize();
- }
-
- public AbstractToken extends TokenId> branchToken() {
- return branchToken;
- }
-
- public AbstractToken extends TokenId> rootBranchToken() {
- return rootBranchToken;
- }
-
- public LanguagePath languagePath() {
- return languagePath;
- }
-
- public int tokenCount() {
- // initialized at once so no need to check whether lexing is finished
- return size();
- }
-
- public synchronized Object tokenOrBranch(int index) {
- // Assuming all the token are lexed since begining and after updates
- return (index < size()) ? get(index) : null;
- }
-
- private Token existingToken(int index) {
- // Tokens not created lazily -> use regular unsync tokenOrBranch()
- return LexerUtilsConstants.token(tokenOrBranch(index));
- }
-
- public synchronized AbstractToken createNonFlyToken(
- int index, AbstractToken flyToken, int offset) {
- TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset2Raw(offset));
- set(index, nonFlyToken);
- return nonFlyToken;
- }
-
- public int lookahead(int index) {
- return (laState != null) ? laState.lookahead(index) : -1;
- }
-
- public Object state(int index) {
- return (laState != null) ? laState.state(index) : null;
- }
-
- /**
- * Returns absolute offset of the token at the given index
- * (startOffset gets added to the child token's real offset).
- *
- * For token hierarchy snapshots the returned value is corrected
- * in the TokenSequence explicitly by adding TokenSequence.tokenOffsetDiff.
- */
- public int tokenOffset(int index) {
- return elementOffset(index);
- }
-
- public int childTokenOffset(int rawOffset) {
- // Need to make sure that the startOffset is up-to-date
- updateStartOffset();
- return startOffset + childTokenRelOffset(rawOffset);
- }
-
- /**
- * Get difference between start offset of the particular child token
- * against start offset of the root branch token.
- */
- public int childTokenOffsetShift(int rawOffset) {
- // Need to make sure that the startOffsetShift is up-to-date
- updateStartOffset();
- return startOffsetShift + childTokenRelOffset(rawOffset);
- }
-
- /**
- * Get child token's real offset which is always a relative value
- * to startOffset value.
- */
- private int childTokenRelOffset(int rawOffset) {
- return (rawOffset < offsetGapStart())
- ? rawOffset
- : rawOffset - offsetGapLength();
- }
-
- public char childTokenCharAt(int rawOffset, int index) {
- // Do not update the start offset shift - the token.text()
- // did it before returning its result and its contract
- // specifies that.
- // Return chars by delegating to rootBranchToken
- return rootBranchToken.charAt(startOffsetShift + childTokenRelOffset(rawOffset) + index);
- }
-
- public int modCount() {
- // Delegate to root to have the most up-to-date value for token sequence's check.
- return root.modCount();
- }
-
- protected int startOffset() { // used by FlyOffsetGapList
- return startOffset;
- }
-
- public void updateStartOffset() {
- synchronized (root) {
- if (cachedModCount != root.modCount()) {
- cachedModCount = root.modCount();
- // Assign cached start offset of the first token in this list
- startOffset = branchToken.offset(null) + embedding.startSkipLength();
- // Assign difference between start offset of the first token
- // in this list and start offset of the corresponding root branch token
- startOffsetShift = startOffset - rootBranchToken.offset(null);
- }
- }
- }
-
- public TokenList root() {
- return root;
- }
-
- protected int elementRawOffset(Object elem) {
- return (elem.getClass() == BranchTokenList.class)
- ? ((BranchTokenList)elem).branchToken().rawOffset()
- : ((AbstractToken)elem).rawOffset();
- }
-
- protected void setElementRawOffset(Object elem, int rawOffset) {
- if (elem.getClass() == BranchTokenList.class)
- ((BranchTokenList)elem).branchToken().setRawOffset(rawOffset);
- else
- ((AbstractToken)elem).setRawOffset(rawOffset);
- }
-
- protected boolean isElementFlyweight(Object elem) {
- // token wrapper always contains non-flyweight token
- return (elem.getClass() != BranchTokenList.class)
- && ((Token)elem).isFlyweight();
- }
-
- protected int elementLength(Object elem) {
- return LexerUtilsConstants.token(elem).length();
- }
-
- public synchronized void wrapToken(int index, BranchTokenList wrapper) {
- set(index, wrapper);
- }
-
- public InputAttributes inputAttributes() {
- return root.inputAttributes();
- }
-
- // MutableTokenList extra methods
- public Object tokenOrBranchUnsync(int index) {
- return get(index);
- }
-
- public int tokenCountCurrent() {
- return size();
- }
-
- public LexerInputOperation createLexerInputOperation(
- int tokenIndex, int relexOffset, Object relexState) {
- CharSequence branchTokenText = branchToken.text();
- int branchTokenStartOffset = startOffset - embedding.startSkipLength();
- int endOffset = branchTokenStartOffset + branchTokenText.length()
- - embedding.endSkipLength();
- // Do not need to update offset - clients
- // (constructor or token list updater) call updateStartOffset()
- // before calling this method
- return new TextLexerInputOperation(this, tokenIndex, relexState, branchTokenText,
- branchTokenStartOffset, relexOffset, endOffset);
- }
-
- public boolean isFullyLexed() {
- return true;
- }
-
- public void replaceTokens(TokenListChange change, int removeTokenCount) {
- int index = change.tokenIndex();
- // Remove obsolete tokens (original offsets are retained)
- Object[] removedTokensOrBranches = new Object[removeTokenCount];
- copyElements(index, index + removeTokenCount, removedTokensOrBranches, 0);
- int offset = change.modifiedTokensStartOffset();
- for (int i = 0; i < removeTokenCount; i++) {
- Object tokenOrBranch = removedTokensOrBranches[i];
- Token token = LexerUtilsConstants.token(tokenOrBranch);
- if (!token.isFlyweight()) {
- updateElementOffsetRemove(token);
- ((AbstractToken)token).setTokenList(null);
- }
- offset += token.length();
- }
- remove(index, removeTokenCount); // Retain original offsets
- laState.remove(index, removeTokenCount); // Remove lookaheads and states
- change.initRemovedTokenList(removedTokensOrBranches);
- change.setRemovedTokensEndOffset(offset);
-
- // Move and fix the gap according to the performed modification.
- int diffLength = change.insertedLength() - change.removedLength();
- if (offsetGapStart() != change.offset()) {
- // Minimum of the index of the first removed index and original computed index
- moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex()));
- }
- updateOffsetGapLength(-diffLength);
-
- // Add created tokens.
- List addedTokens = change.addedTokens();
- if (addedTokens != null) {
- for (Token token : addedTokens) {
- updateElementOffsetAdd(token);
- }
- addAll(index, addedTokens);
- laState = laState.addAll(index, change.laState());
- change.setAddedTokenCount(addedTokens.size());
- change.clearAddedTokens();
- }
- }
-
- public boolean isContinuous() {
- return true;
- }
-
- public Set extends TokenId> skipTokenIds() {
- return null;
- }
-
- public String toString() {
- return LexerUtilsConstants.appendTokenList(null, this, -1).toString();
- }
-
- }
--- 0 ----
Index: lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java
===================================================================
RCS file: lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java
diff -N lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java
*** /dev/null 1 Jan 1970 00:00:00 -0000
--- lexer/src/org/netbeans/lib/lexer/EmbeddedTokenList.java 28 Nov 2006 14:20:15 -0000
***************
*** 0 ****
--- 1,348 ----
+ /*
+ * The contents of this file are subject to the terms of the Common Development
+ * and Distribution License (the License). You may not use this file except in
+ * compliance with the License.
+ *
+ * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
+ * or http://www.netbeans.org/cddl.txt.
+ *
+ * When distributing Covered Code, include this CDDL Header Notice in each file
+ * and include the License file at http://www.netbeans.org/cddl.txt.
+ * If applicable, add the following below the CDDL Header, with the fields
+ * enclosed by brackets [] replaced by your own identifying information:
+ * "Portions Copyrighted [year] [name of copyright owner]"
+ *
+ * The Original Software is NetBeans. The Initial Developer of the Original
+ * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
+ * Microsystems, Inc. All Rights Reserved.
+ */
+
+ package org.netbeans.lib.lexer;
+
+ import java.util.List;
+ import java.util.Set;
+ import org.netbeans.api.lexer.LanguagePath;
+ import org.netbeans.lib.editor.util.FlyOffsetGapList;
+ import org.netbeans.lib.lexer.inc.MutableTokenList;
+ import org.netbeans.api.lexer.InputAttributes;
+ import org.netbeans.api.lexer.Token;
+ import org.netbeans.api.lexer.TokenId;
+ import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo;
+ import org.netbeans.lib.lexer.inc.TokenListChange;
+ import org.netbeans.spi.lexer.LanguageEmbedding;
+ import org.netbeans.lib.lexer.token.AbstractToken;
+ import org.netbeans.lib.lexer.token.TextToken;
+
+
+ /**
+ * Embedded token list maintains a list of tokens
+ * on a particular embedded language level .
+ *
+ * The physical storage contains a gap to speed up list modifications
+ * during typing in a document when tokens are typically added/removed
+ * at the same index in the list.
+ *
+ *
+ * There is an intent to not degrade performance significantly
+ * with each extra language embedding level so the token list maintains direct
+ * link to the root level.
+ *
+ * @author Miloslav Metelka
+ * @version 1.00
+ */
+
+ public final class EmbeddedTokenList
+ extends FlyOffsetGapList implements MutableTokenList {
+
+ /** Flag for additional correctness checks (may degrade performance). */
+ private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test");
+
+ /**
+ * Embedding container carries info about the token into which this
+ * token list is embedded.
+ */
+ private final EmbeddingContainer extends TokenId> embeddingContainer; // 36 bytes (32-super + 4)
+
+ /**
+ * Language embedding for this embedded token list.
+ */
+ private final LanguageEmbedding embedding; // 40 bytes
+
+ /**
+ * Language path of this token list.
+ */
+ private final LanguagePath languagePath; // 44 bytes
+
+ /**
+ * Storage for lookaheads and states.
+ *
+ * It's non-null only initialized for mutable token lists
+ * or when in testing environment.
+ */
+ private LAState laState; // 48 bytes
+
+ /**
+ * Next embedded token list forming a single-linked list.
+ */
+ private EmbeddedTokenList extends TokenId> nextEmbedding; // 52 bytes
+
+ public EmbeddedTokenList(EmbeddingContainer extends TokenId> embeddingContainer,
+ LanguagePath languagePath, LanguageEmbedding embedding,
+ EmbeddedTokenList extends TokenId> nextEmbedding) {
+ this.embeddingContainer = embeddingContainer;
+ this.languagePath = languagePath;
+ this.embedding = embedding;
+ this.nextEmbedding = nextEmbedding;
+
+ if (embeddingContainer.rootTokenList().modCount() != -1 || testing) {
+ this.laState = LAState.empty(); // Store lookaheads and states
+ }
+
+ init();
+ }
+
+ private void init() {
+ // Lex the whole input represented by token at once
+ LexerInputOperation lexerInputOperation = createLexerInputOperation(
+ 0, startOffset(), null);
+ AbstractToken token = lexerInputOperation.nextToken();
+ while (token != null) {
+ updateElementOffsetAdd(token); // must subtract startOffset()
+ add(token);
+ if (laState != null) {
+ laState = laState.add(lexerInputOperation.lookahead(),
+ lexerInputOperation.lexerState());
+ }
+ token = lexerInputOperation.nextToken();
+ }
+
+ trimToSize(); // Compact storage
+ if (laState != null)
+ laState.trimToSize();
+ }
+
+ EmbeddedTokenList extends TokenId> nextEmbedding() {
+ return nextEmbedding;
+ }
+
+ void setNextEmbedding(EmbeddedTokenList extends TokenId> nextEmbedding) {
+ this.nextEmbedding = nextEmbedding;
+ }
+
+ public LanguagePath languagePath() {
+ return languagePath;
+ }
+
+ public int tokenCount() {
+ // initialized at once so no need to check whether lexing is finished
+ return size();
+ }
+
+ public synchronized Object tokenOrEmbeddingContainer(int index) {
+ // Assuming all the token are lexed since begining and after updates
+ return (index < size()) ? get(index) : null;
+ }
+
+ private Token existingToken(int index) {
+ // Tokens not created lazily -> use regular unsync tokenOrEmbeddingContainer()
+ return LexerUtilsConstants.token(tokenOrEmbeddingContainer(index));
+ }
+
+ public int lookahead(int index) {
+ return (laState != null) ? laState.lookahead(index) : -1;
+ }
+
+ public Object state(int index) {
+ return (laState != null) ? laState.state(index) : null;
+ }
+
+ /**
+ * Returns absolute offset of the token at the given index
+ * (startOffset gets added to the child token's real offset).
+ *
+ * For token hierarchy snapshots the returned value is corrected
+ * in the TokenSequence explicitly by adding TokenSequence.tokenOffsetDiff.
+ */
+ public int tokenOffset(int index) {
+ return elementOffset(index);
+ }
+
+ public int childTokenOffset(int rawOffset) {
+ // Need to make sure that the startOffset is up-to-date
+ embeddingContainer.updateOffsets();
+ return embeddingContainer.tokenStartOffset() + embedding.startSkipLength()
+ + childTokenRelOffset(rawOffset);
+ }
+
+ /**
+ * Get difference between start offset of the particular child token
+ * against start offset of the root token.
+ */
+ public int childTokenOffsetShift(int rawOffset) {
+ // Need to make sure that the startOffsetShift is up-to-date
+ updateStartOffset();
+ return embeddingContainer.rootTokenOffsetShift() + childTokenRelOffset(rawOffset);
+ }
+
+ /**
+ * Get child token's real offset which is always a relative value
+ * to startOffset value.
+ */
+ private int childTokenRelOffset(int rawOffset) {
+ return (rawOffset < offsetGapStart())
+ ? rawOffset
+ : rawOffset - offsetGapLength();
+ }
+
+ public char childTokenCharAt(int rawOffset, int index) {
+ // Do not update the start offset shift - the token.text()
+ // did it before returning its result and its contract
+ // specifies that.
+ // Return chars by delegating to rootToken
+ return embeddingContainer.charAt(
+ embedding.startSkipLength() + childTokenRelOffset(rawOffset) + index);
+ }
+
+ public int modCount() {
+ // Delegate to root to have the most up-to-date value for token sequence's check.
+ return root().modCount();
+ }
+
+ protected int startOffset() { // used by FlyOffsetGapList
+ return embeddingContainer.tokenStartOffset() + embedding.startSkipLength();
+ }
+
+ public void updateStartOffset() {
+ embeddingContainer.updateOffsets();
+ }
+
+ public TokenList extends TokenId> root() {
+ return embeddingContainer.rootTokenList();
+ }
+
+ public TokenHierarchyOperation,? extends TokenId> tokenHierarchyOperation() {
+ return root().tokenHierarchyOperation();
+ }
+
+ public AbstractToken extends TokenId> rootToken() {
+ return embeddingContainer.rootToken();
+ }
+
+ protected int elementRawOffset(Object elem) {
+ return (elem.getClass() == EmbeddingContainer.class)
+ ? ((EmbeddingContainer)elem).token().rawOffset()
+ : ((AbstractToken extends TokenId>)elem).rawOffset();
+ }
+
+ protected void setElementRawOffset(Object elem, int rawOffset) {
+ if (elem.getClass() == EmbeddingContainer.class)
+ ((EmbeddingContainer)elem).token().setRawOffset(rawOffset);
+ else
+ ((AbstractToken extends TokenId>)elem).setRawOffset(rawOffset);
+ }
+
+ protected boolean isElementFlyweight(Object elem) {
+ // token wrapper always contains non-flyweight token
+ return (elem.getClass() != EmbeddingContainer.class)
+ && ((AbstractToken extends TokenId>)elem).isFlyweight();
+ }
+
+ protected int elementLength(Object elem) {
+ return LexerUtilsConstants.token(elem).length();
+ }
+
+ public synchronized AbstractToken replaceFlyToken(
+ int index, AbstractToken flyToken, int offset) {
+ TextToken nonFlyToken = ((TextToken)flyToken).createCopy(this, offset2Raw(offset));
+ set(index, nonFlyToken);
+ return nonFlyToken;
+ }
+
+ public synchronized void wrapToken(int index, EmbeddingContainer embeddingContainer) {
+ set(index, embeddingContainer);
+ }
+
+ public InputAttributes inputAttributes() {
+ return root().inputAttributes();
+ }
+
+ // MutableTokenList extra methods
+ public Object tokenOrEmbeddingContainerUnsync(int index) {
+ return get(index);
+ }
+
+ public int tokenCountCurrent() {
+ return size();
+ }
+
+ public LexerInputOperation createLexerInputOperation(
+ int tokenIndex, int relexOffset, Object relexState) {
+ CharSequence tokenText = embeddingContainer.token().text();
+ int tokenStartOffset = embeddingContainer.tokenStartOffset();
+ int endOffset = tokenStartOffset + tokenText.length()
+ - embedding.endSkipLength();
+ // Do not need to update offset - clients
+ // (constructor or token list updater) call updateStartOffset()
+ // before calling this method
+ return new TextLexerInputOperation(this, tokenIndex, relexState, tokenText,
+ tokenStartOffset, relexOffset, endOffset);
+ }
+
+ public boolean isFullyLexed() {
+ return true;
+ }
+
+ public void replaceTokens(TokenHierarchyEventInfo eventInfo,
+ TokenListChange change, int removeTokenCount) {
+ int index = change.index();
+ // Remove obsolete tokens (original offsets are retained)
+ Object[] removedTokensOrEmbeddingContainers = new Object[removeTokenCount];
+ copyElements(index, index + removeTokenCount, removedTokensOrEmbeddingContainers, 0);
+ int offset = change.offset();
+ for (int i = 0; i < removeTokenCount; i++) {
+ Object tokenOrEmbeddingContainer = removedTokensOrEmbeddingContainers[i];
+ AbstractToken token = LexerUtilsConstants.token(tokenOrEmbeddingContainer);
+ if (!token.isFlyweight()) {
+ updateElementOffsetRemove(token);
+ token.setTokenList(null);
+ }
+ offset += token.length();
+ }
+ remove(index, removeTokenCount); // Retain original offsets
+ laState.remove(index, removeTokenCount); // Remove lookaheads and states
+ change.setRemovedTokens(removedTokensOrEmbeddingContainers);
+ change.setRemovedEndOffset(offset);
+
+ // Move and fix the gap according to the performed modification.
+ int diffLength = eventInfo.insertedLength() - eventInfo.removedLength();
+ if (offsetGapStart() != change.offset()) {
+ // Minimum of the index of the first removed index and original computed index
+ moveOffsetGap(change.offset(), Math.min(index, change.offsetGapIndex()));
+ }
+ updateOffsetGapLength(-diffLength);
+
+ // Add created tokens.
+ List> addedTokens = change.addedTokens();
+ if (addedTokens != null) {
+ for (Token token : addedTokens) {
+ updateElementOffsetAdd(token);
+ }
+ addAll(index, addedTokens);
+ laState = laState.addAll(index, change.laState());
+ change.syncAddedTokenCount();
+ }
+ }
+
+ public boolean isContinuous() {
+ return true;
+ }
+
+ public Set skipTokenIds() {
+ return null;
+ }
+
+ public String toString() {
+ return LexerUtilsConstants.appendTokenList(null, this, -1).toString();
+ }
+
+ }
Index: lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java
===================================================================
RCS file: lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java
diff -N lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java
*** /dev/null 1 Jan 1970 00:00:00 -0000
--- lexer/src/org/netbeans/lib/lexer/EmbeddingContainer.java 28 Nov 2006 14:20:15 -0000
***************
*** 0 ****
--- 1,330 ----
+ /*
+ * The contents of this file are subject to the terms of the Common Development
+ * and Distribution License (the License). You may not use this file except in
+ * compliance with the License.
+ *
+ * You can obtain a copy of the License at http://www.netbeans.org/cddl.html
+ * or http://www.netbeans.org/cddl.txt.
+ *
+ * When distributing Covered Code, include this CDDL Header Notice in each file
+ * and include the License file at http://www.netbeans.org/cddl.txt.
+ * If applicable, add the following below the CDDL Header, with the fields
+ * enclosed by brackets [] replaced by your own identifying information:
+ * "Portions Copyrighted [year] [name of copyright owner]"
+ *
+ * The Original Software is NetBeans. The Initial Developer of the Original
+ * Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
+ * Microsystems, Inc. All Rights Reserved.
+ */
+
+ package org.netbeans.lib.lexer;
+
+ import org.netbeans.api.lexer.Language;
+ import org.netbeans.api.lexer.LanguagePath;
+ import org.netbeans.api.lexer.TokenHierarchyEventType;
+ import org.netbeans.api.lexer.TokenId;
+ import org.netbeans.lib.lexer.TokenHierarchyOperation;
+ import org.netbeans.lib.lexer.inc.TokenChangeInfo;
+ import org.netbeans.lib.lexer.inc.TokenHierarchyEventInfo;
+ import org.netbeans.spi.lexer.LanguageEmbedding;
+ import org.netbeans.spi.lexer.LanguageHierarchy;
+ import org.netbeans.lib.lexer.token.AbstractToken;
+
+
+ /**
+ * Embedding info contains information about all the embeddings
+ * for a particular token in a token list.
+ *
+ * There can be one or more {@link EmbeddedTokenList} instances for each
+ * cotnained embedding.
+ *
+ * There is an intent to not degrade performance significantly
+ * with each extra language embedding level so the token list maintains direct
+ * link to the root level.
+ *
+ * @author Miloslav Metelka
+ * @version 1.00
+ */
+
+ public final class EmbeddingContainer {
+
+ /** Flag for additional correctness checks (may degrade performance). */
+ private static final boolean testing = Boolean.getBoolean("netbeans.debug.lexer.test");
+
+ /**
+ * Get embedded token list.
+ *
+ * @param tokenList non-null token list in which the token for which the embedding
+ * should be obtained resides.
+ * @param index >=0 index of the token in the token list where the embedding
+ * should be obtained.
+ * @param language whether only language embeddding of the particular language
+ * was requested. It may be null if any embedding should be returned.
+ */
+ public static EmbeddedTokenList getEmbedding(
+ TokenList tokenList, int index, Language language) {
+ EmbeddingContainer ec;
+ AbstractToken token;
+ EmbeddedTokenList extends TokenId> lastEtl = null;
+ synchronized (tokenList.root()) {
+ Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index);
+ if (tokenOrEmbeddingContainer.getClass() == EmbeddingContainer.class) {
+ // Embedding container exists
+ @SuppressWarnings("unchecked")
+ EmbeddingContainer ecUC = (EmbeddingContainer)tokenOrEmbeddingContainer;
+ ec = ecUC;
+ ec.updateOffsets();
+
+ EmbeddedTokenList extends TokenId> etl = ec.firstEmbedding();
+ while (etl != null) {
+ if (language == null || etl.languagePath().innerLanguage() == language) {
+ @SuppressWarnings("unchecked")
+ EmbeddedTokenList etlUC = (EmbeddedTokenList)etl;
+ return etlUC;
+ }
+ lastEtl = etl;
+ etl = etl.nextEmbedding();
+ }
+ token = ec.token();
+ } else {
+ ec = null;
+ @SuppressWarnings("unchecked")
+ AbstractToken t = (AbstractToken)tokenOrEmbeddingContainer;
+ token = t;
+ if (token.isFlyweight()) { // embedding cannot exist for this flyweight token
+ return null;
+ }
+ }
+
+ // Attempt to find default embedding
+ LanguagePath languagePath = tokenList.languagePath();
+ LanguageHierarchy languageHierarchy
+ = LexerUtilsConstants.mostEmbeddedLanguageHierarchy(languagePath);
+ @SuppressWarnings("unchecked")
+ LanguageEmbedding embedding = (LanguageEmbedding)LexerSpiPackageAccessor.get().embedding(
+ languageHierarchy, token, languagePath, tokenList.inputAttributes());
+
+ if (embedding == null) {
+ // try language embeddings registered in Lookup
+ @SuppressWarnings("unchecked")
+ LanguageEmbedding embeddingUC = (LanguageEmbedding)
+ LanguageManager.getInstance().findLanguageEmbedding(
+ token, languagePath, tokenList.inputAttributes());
+ embedding = embeddingUC;
+ }
+
+ if (embedding != null && (language == null || language == embedding.language())) {
+ if (ec == null) {
+ ec = new EmbeddingContainer(token);
+ tokenList.wrapToken(index, ec);
+ }
+ LanguagePath embeddedLanguagePath = LanguagePath.get(languagePath,
+ embedding.language());
+ EmbeddedTokenList etl = new EmbeddedTokenList(ec,
+ embeddedLanguagePath, embedding, null);
+ if (lastEtl != null)
+ lastEtl.setNextEmbedding(etl);
+ else
+ ec.setFirstEmbedding(etl);
+ return etl;
+ }
+ return null;
+ }
+ }
+
+ /**
+ * Create custom embedding.
+ *
+ * @param tokenList non-null token list in which the token for which the embedding
+ * should be created resides.
+ * @param index >=0 index of the token in the token list where the embedding
+ * should be created.
+ * @param embeddedLanguage non-null embedded language.
+ * @param startSkipLength >=0 number of characters in an initial part of the token
+ * for which the language embedding is being create that should be excluded
+ * from the embedded section. The excluded characters will not be lexed
+ * and there will be no tokens created for them.
+ * @param endSkipLength >=0 number of characters at the end of the token
+ * for which the language embedding is defined that should be excluded
+ * from the embedded section. The excluded characters will not be lexed
+ * and there will be no tokens created for them.
+ */
+ public static boolean createEmbedding(
+ TokenList tokenList, int index, Language embeddedLanguage,
+ int startSkipLength, int endSkipLength, boolean joinSections) {
+ synchronized (tokenList.root()) {
+ TokenHierarchyOperation,?> tokenHierarchyOperation = tokenList.tokenHierarchyOperation();
+ // Only create embedddings for valid operations so not e.g. for removed token list
+ if (tokenHierarchyOperation == null) {
+ return false;
+ }
+ Object tokenOrEmbeddingContainer = tokenList.tokenOrEmbeddingContainer(index);
+ EmbeddingContainer ec;
+ AbstractToken token;
+ if (tokenOrEmbeddingContainer.getClass() == EmbeddingContainer.class) {
+ // Embedding container exists
+ @SuppressWarnings("unchecked")
+ EmbeddingContainer ecUC = (EmbeddingContainer)tokenOrEmbeddingContainer;
+ ec = ecUC;
+ EmbeddedTokenList extends TokenId> etl = ec.firstEmbedding();
+ while (etl != null) {
+ if (embeddedLanguage == etl.languagePath().innerLanguage()) {
+ return false; // already exists
+ }
+ etl = etl.nextEmbedding();
+ }
+ token = ec.token();
+ } else {
+ @SuppressWarnings("unchecked")
+ AbstractToken t = (AbstractToken)tokenOrEmbeddingContainer;
+ token = t;
+ if (token.isFlyweight()) { // embedding cannot exist for this flyweight token
+ return false;
+ }
+ ec = new EmbeddingContainer(token);
+ tokenList.wrapToken(index, ec);
+ }
+
+ // Add the new embedding as the first one in the single-linked list
+ LanguageEmbedding embedding = LanguageEmbedding.create(embeddedLanguage,
+ startSkipLength, endSkipLength, joinSections);
+ LanguagePath languagePath = tokenList.languagePath();
+ LanguagePath embeddedLanguagePath = LanguagePath.get(languagePath, embeddedLanguage);
+ // Make the embedded token list to be the first in the list
+ EmbeddedTokenList etl = new EmbeddedTokenList(
+ ec, embeddedLanguagePath, embedding, ec.firstEmbedding());
+ ec.setFirstEmbedding(etl);
+ // Increment mod count? - not in this case
+
+ // Fire the embedding creation to the clients
+ // Threading model may need to be changed if necessary
+ int aOffset = ec.tokenStartOffset();
+ TokenHierarchyEventInfo eventInfo = new TokenHierarchyEventInfo(
+ tokenHierarchyOperation,
+ TokenHierarchyEventType.EMBEDDING,
+ aOffset, 0, "", 0
+ );
+ eventInfo.setAffectedStartOffset(aOffset);
+ eventInfo.setAffectedEndOffset(aOffset + token.length());
+ // Construct outer token change info
+ TokenChangeInfo info = new TokenChangeInfo(tokenList);
+ info.setIndex(index);
+ info.setOffset(aOffset);
+ //info.setAddedTokenCount(0);
+ eventInfo.setTokenChangeInfo(info);
+
+ TokenChangeInfo embeddedInfo = new TokenChangeInfo(etl);
+ embeddedInfo.setIndex(0);
+ embeddedInfo.setOffset(aOffset + embedding.startSkipLength());
+ // Should set number of added tokens directly?
+ // - would prevent further lazy embedded lexing so leave to zero for now
+ //info.setAddedTokenCount(0);
+ info.addEmbeddedChange(embeddedInfo);
+
+ // Fire the change
+ tokenHierarchyOperation.fireTokenHierarchyChanged(
+ LexerApiPackageAccessor.get().createTokenChangeEvent(eventInfo));
+ }
+ return true;
+ }
+
+ private final AbstractToken token; // 12 bytes (8-super + 4)
+
+ /**
+ * Cached modification count allows to determine whether the start offset
+ * needs to be recomputed.
+ */
+ private int cachedModCount; // 16 bytes
+
+ /**
+ * For mutable environment this field contains root token list of the hierarchy.
+ *
+ */
+ private final TokenList extends TokenId> rootTokenList; // 20 bytes
+
+ /**
+ * The token in the root token list to which this embedding container relates.
+ *
+ * For first-level embedding it is the same like value of branchToken variable
+ * but for deeper embeddings it points to the corresponding branch token
+ * in the root token list.
+ *
+ * It's used for getting of the start offset of the contained tokens
+ * and for getting of their text.
+ */
+ private final AbstractToken extends TokenId> rootToken; // 24 bytes
+
+ /**
+ * Cached start offset of the token for which this embedding container
+ * was created.
+ */
+ private int tokenStartOffset; // 28 bytes
+
+ /**
+ * First embedded token list in the single-linked list.
+ */
+ private EmbeddedTokenList extends TokenId> firstEmbedding; // 32 bytes
+
+ /**
+ * Difference between start offset of the first token in this token list
+ * against the start offset of the root token.
+ *
+ * The offset gets refreshed upon updateStartOffset()
.
+ */
+ private int rootTokenOffsetShift; // 52 bytes
+
+
+ public EmbeddingContainer(AbstractToken token) {
+ this.token = token;
+ TokenList embeddedTokenList = token.tokenList();
+ this.rootTokenList = embeddedTokenList.root();
+ this.rootToken = (embeddedTokenList.getClass() == EmbeddedTokenList.class)
+ ? ((EmbeddedTokenList extends TokenId>)embeddedTokenList).rootToken()
+ : token;
+ this.cachedModCount = -2; // must differ from root's one to sync offsets
+ updateOffsets();
+ }
+
+ public void updateOffsets() {
+ synchronized (rootTokenList) {
+ if (cachedModCount != rootTokenList.modCount()) {
+ cachedModCount = rootTokenList.modCount();
+ tokenStartOffset = token.offset(null);
+ rootTokenOffsetShift = tokenStartOffset - rootToken.offset(null);
+ }
+ }
+ }
+
+ public AbstractToken token() {
+ return token;
+ }
+
+ public TokenList extends TokenId> rootTokenList() {
+ return rootTokenList;
+ }
+
+ public AbstractToken extends TokenId> rootToken() {
+ return rootToken;
+ }
+
+ public int tokenStartOffset() {
+ return tokenStartOffset;
+ }
+
+ public int rootTokenOffsetShift() {
+ return rootTokenOffsetShift;
+ }
+
+ public char charAt(int tokenRelOffset) {
+ return rootToken.charAt(rootTokenOffsetShift + tokenRelOffset);
+ }
+
+ public EmbeddedTokenList extends TokenId> firstEmbedding() {
+ return firstEmbedding;
+ }
+
+ void setFirstEmbedding(EmbeddedTokenList extends TokenId> firstEmbedding) {
+ this.firstEmbedding = firstEmbedding;
+ }
+
+ }
Index: lexer/src/org/netbeans/lib/lexer/LanguageManager.java
===================================================================
RCS file: /cvs/lexer/src/org/netbeans/lib/lexer/LanguageManager.java,v
retrieving revision 1.6
diff -c -r1.6 LanguageManager.java
*** lexer/src/org/netbeans/lib/lexer/LanguageManager.java 26 Oct 2006 20:45:23 -0000 1.6
--- lexer/src/org/netbeans/lib/lexer/LanguageManager.java 28 Nov 2006 14:20:15 -0000
***************
*** 60,76 ****
}
}.language();
! private static final LanguageEmbedding NO_LANG_EMBEDDING = new LanguageEmbedding() {
! public int endSkipLength() {
! return 0;
! }
! public Language extends TokenId> language() {
! return NO_LANG;
! }
! public int startSkipLength() {
! return 0;
! }
! };
private static LanguageManager instance = null;
--- 60,67 ----
}
}.language();
! private static final LanguageEmbedding NO_LANG_EMBEDDING
! = LanguageEmbedding.create(NO_LANG, 0, 0);
private static LanguageManager instance = null;
***************
*** 86,93 ****
private List providers = Collections.emptyList();
private HashMap>> langCache
= new HashMap>>();
! private WeakHashMap