commit c4cf274cd3e033cd612cb4ccf074148f491d0b79
parent 1593aed02f8936ce2ed4d56a6d85bb7b79c62d4b
Author: simos.lists <simos.lists@70737e48-4f4a-0410-8df8-290828ad50c4>
Date: Fri, 9 May 2008 11:03:17 +0000
Updated grammar
git-svn-id: http://keyboardlayouteditor.googlecode.com/svn/trunk@12 70737e48-4f4a-0410-8df8-290828ad50c4
Diffstat:
6 files changed, 1127 insertions(+), 1234 deletions(-)
diff --git a/XKBGrammar/XKBGrammar.g b/XKBGrammar/XKBGrammar.g
@@ -1,3 +1,7 @@
+// XKB Grammar (X.org)
+// Written by Simos Xenitellis <simos.lists@googlemail.com>, 2008.
+// Version 0.2
+
grammar XKBGrammar;
options
@@ -8,7 +12,7 @@ options
tokens
{
- // attributes
+ // attributes [TODO: check terminolody]
TOKEN_DEFAULT = 'default';
TOKEN_HIDDEN = 'hidden';
TOKEN_PARTIAL = 'partial';
@@ -17,12 +21,11 @@ tokens
TOKEN_ALTERNATE_GROUP = 'alternate_group';
TOKEN_XKB_SYMBOLS = 'xkb_symbols';
- // Keywords
+ // Keywords [TODO: check terminology]
TOKEN_INCLUDE = 'include';
TOKEN_KEY_TYPE = 'key.type';
TOKEN_NAME = 'name';
TOKEN_KEY = 'key';
- TOKEN_MODIFIER_MAP = 'modifier_map';
// Punctuators
LBRACKET = '[';
@@ -37,82 +40,85 @@ tokens
EQUAL = '=';
LOWERTHAN = '<';
GREATERTHAN = '>';
+ DOT = '.';
// HYPHEN = '-';
- // SPACE = ' ';
+ // SPACE = ' ';
// UNDERSCORE = '_';
- DOT = '.';
+ // Tokens for tree.
ATTRIBUTES;
ATTRIBUTE;
INCLUDE;
- NAME;
KEY;
KEYTYPE;
SECTION;
SECTIONNAME;
}
-layout : section* EOF!
- ;
+// We cover XKB files that look like
+//
+// // comments can appear here.
+// one of more modifiers "mysectionname"
+// {
+// // comments can appear here.
+// include "somename" // comments can also appear here.
+// name[somestring] = "sometext";
+// key.type[someotherstring] = "someothertext";
+// key <someotherstring> { [ somesymbol, someothersymbol, ... uptoEightSymbols ] };
+// modifier_map someothertext { somesymbol, someothersymbol, ... uptoEightSymbols };
+// // can also have multiples of the above.
+// };
+//
+// // can have several sections as above.
+
+layout : section* EOF!
+ ;
-section
- :
- preamble sectionmaterial
- { print '}' }
- -> ^(SECTION)
- ;
-
-preamble : attribute_xkb+ sectionname=quotedstring
- { print '%(sname)s {' % { "sname": $sectionname.text } }
- ;
+section : preamble sectionmaterial
+ { print '}' }
+ -> ^(SECTION)
+ ;
-quotedstring returns [value]
- : DQUOTE sectionname+=~(DQUOTE)+ DQUOTE
-{
-qstring = ['"']
-for elem in $sectionname:
- qstring.append(elem.getText())
-qstring.append('"')
-$value = "".join(qstring)
-}
- ;
+preamble : attribute_xkb+ sectionname=quotedstring
+ { print '\%(sname)s {' \% { "sname": $sectionname.text } }
+ ;
-sectionmaterial
- : LCURLY (line_include
- | line_name
- | line_keytype
- | line_key
-// | line_modifiermap
- | line_comment)+ RCURLY SEMICOLON
- ;
-
-line_comment
- : COMMENT { skip(); } ;
+quotedstring returns [value]
+ : DQUOTE sectionname+=~(DQUOTE)+ DQUOTE
+ {
+ qstring = ['"']
+ for elem in $sectionname:
+ qstring.append(elem.getText())
+ qstring.append('"')
+ $value = "".join(qstring)
+ }
+ ;
+
+sectionmaterial : LCURLY (line_include
+ | line_name
+ | line_keytype
+ | line_key
+ )+ RCURLY SEMICOLON
+ ;
line_include
- //: KEYWORD_INCLUDE DQUOTE NAME_INCLUDE DQUOTE COMMENT*
: TOKEN_INCLUDE include=quotedstring
- { print '\tinclude %(inc)s' % { "inc": $include.text } }
+ { print '\tinclude \%(inc)s' \% { "inc": $include.text } }
;
line_name
: TOKEN_NAME LBRACKET name=NAME RBRACKET EQUAL nameval=quotedstring SEMICOLON
- { print '\tname[\%(name)s] = %(nameval)s;' % { "name": $name.text, "nameval": $nameval.text } }
+ { print '\tname[\%(name)s] = \%(nameval)s;' \% { "name": $name.text, "nameval": $nameval.text } }
;
line_keytype
: TOKEN_KEY_TYPE LBRACKET keytype=NAME RBRACKET EQUAL DQUOTE keytypevalue=NAME DQUOTE SEMICOLON
- { print '\tkey.type[\%(kt)s] = \"%(ktv)s\";' % { "kt": $keytype.text, "ktv": $keytypevalue.text } }
+ { print '\tkey.type[\%(kt)s] = \"\%(ktv)s\";' \% { "kt": $keytype.text, "ktv": $keytypevalue.text } }
;
-// line_modifiermap
-// : TOKEN_MODIFIER_MAP mapname=NAME mapsyms SEMICOLON
-// { print "\tmodifier_map \%(mapname)s %(mapsyms)s ;" % { "mapname": $mapname.text, "mapsyms": $mapsyms.text } }
-// ;
-
line_key
: TOKEN_KEY keycode keysyms SEMICOLON
- { print "\tkey \%(keycode)s %(keysyms)s ;" % { "keycode": $keycode.text, "keysyms": $keysyms.text } }
+ { print "\tkey \%(keycode)s \%(keysyms)s ;" \% { "keycode": $keycode.text, "keysyms": $keysyms.text } }
;
keycode
@@ -121,7 +127,7 @@ keycode
;
keysyms
- : LCURLY LBRACKET (NAME|NAME_KEYSYM) (COMMA (NAME|NAME_KEYSYM))* RBRACKET RCURLY
+ : LCURLY LBRACKET NAME (COMMA NAME)* RBRACKET RCURLY
;
// mapsyms
@@ -133,40 +139,20 @@ attribute_xkb
| TOKEN_HIDDEN { print "hidden", }
| TOKEN_PARTIAL { print "partial", }
| TOKEN_ALPHANUMERIC_KEYS { print "alphanumeric_keys", }
-// | TOKEN_MODIFIER_KEYS { print "modifier_keys", }
| TOKEN_ALTERNATE_GROUP { print "alternate_group", }
| TOKEN_XKB_SYMBOLS { print "xkb_symbols", }
-> ^(ATTRIBUTES ATTRIBUTE)
;
-/*
-ATTRIBUTE_XKB
- : 'default'
- | 'hidden'
- | 'partial'
- | 'alphanumeric_keys'
- | 'modifier_keys'
- | 'alternate_group'
- | 'xkb_symbols'
- ;
-*/
-
-NAME
- : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
- ;
-
-NAME_INCLUDE
- : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'('|')'|'0'..'9')*
+fragment GENERIC_NAME
+ : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'0'..'9')
;
-NAME_KEYSYM
- : ('0'..'9'|'a'..'z'|'A'..'Z')('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
- ;
-
-NAME_GROUP
- : ('0'..'9'|'a'..'z'|'A'..'Z')('a'..'z'|'A'..'Z'|'_'|'-'|'.'|'0'..'9')*
+NAME
+ : ('a'..'z'|'A'..'Z'|'_'|'('|')'|'0'..'9')*
;
+// Comments are currently ignored.
COMMENT : '//' (~('\n'|'\r'))*
{ $channel = HIDDEN; }
;
diff --git a/XKBGrammar/XKBGrammar.tokens b/XKBGrammar/XKBGrammar.tokens
@@ -1,64 +1,60 @@
TOKEN_ALTERNATE_GROUP=9
-ATTRIBUTES=29
-SECTION=35
+ATTRIBUTES=28
+SECTION=33
TOKEN_INCLUDE=11
-KEY=33
-KEYTYPE=34
-ATTRIBUTE=30
+KEY=31
+KEYTYPE=32
+ATTRIBUTE=29
TOKEN_NAME=13
-DQUOTE=21
-LCURLY=18
-SEMICOLON=24
-TOKEN_MODIFIER_MAP=15
-NAME_INCLUDE=39
-MINUS=22
+DQUOTE=20
+LCURLY=17
+SEMICOLON=23
+MINUS=21
TOKEN_XKB_SYMBOLS=10
-SECTIONNAME=36
-NAME_KEYSYM=38
-NAME_GROUP=40
-LBRACKET=16
-NAME=32
+GENERIC_NAME=36
+SECTIONNAME=34
+LBRACKET=15
+NAME=35
TOKEN_PARTIAL=6
-WS=42
-NEWLINE=41
+WS=39
+NEWLINE=38
TOKEN_ALPHANUMERIC_KEYS=7
TOKEN_HIDDEN=5
-COMMA=20
-LOWERTHAN=26
-INCLUDE=31
-EQUAL=25
-RCURLY=19
+COMMA=19
+LOWERTHAN=25
+INCLUDE=30
+EQUAL=24
+RCURLY=18
TOKEN_MODIFIER_KEYS=8
-PLUS=23
+PLUS=22
TOKEN_KEY=14
-RBRACKET=17
+RBRACKET=16
COMMENT=37
-DOT=28
+DOT=27
TOKEN_DEFAULT=4
TOKEN_KEY_TYPE=12
-GREATERTHAN=27
+GREATERTHAN=26
'alphanumeric_keys'=7
+'"'=20
+'}'=18
'alternate_group'=9
-'>'=27
-'include'=11
-'hidden'=5
-';'=24
-'='=25
-'xkb_symbols'=10
-'+'=23
-'.'=28
-'"'=21
-'}'=19
'key'=14
'partial'=6
-'{'=18
+'>'=26
+'{'=17
+'include'=11
+'hidden'=5
'modifier_keys'=8
+';'=23
+'='=24
+'<'=25
'key.type'=12
-'<'=26
-'['=16
-'-'=22
+'xkb_symbols'=10
+'-'=21
+'['=15
+'+'=22
'name'=13
-','=20
-'modifier_map'=15
+','=19
+'.'=27
'default'=4
-']'=17
+']'=16
diff --git a/XKBGrammar/XKBGrammarLexer.py b/XKBGrammar/XKBGrammarLexer.py
@@ -1,4 +1,4 @@
-# $ANTLR 3.0.1 XKBGrammar.g 2008-05-08 01:14:04
+# $ANTLR 3.0.1 XKBGrammar.g 2008-05-09 12:01:09
from antlr3 import *
from antlr3.compat import set, frozenset
@@ -8,47 +8,44 @@ from antlr3.compat import set, frozenset
HIDDEN = BaseRecognizer.HIDDEN
# token types
-ATTRIBUTES=29
+TOKEN_ALTERNATE_GROUP=9
+ATTRIBUTES=28
+SECTION=33
TOKEN_INCLUDE=11
-ATTRIBUTE=30
-DQUOTE=21
-TOKEN_MODIFIER_MAP=15
+KEY=31
+KEYTYPE=32
+ATTRIBUTE=29
+TOKEN_NAME=13
+DQUOTE=20
+LCURLY=17
+SEMICOLON=23
+MINUS=21
TOKEN_XKB_SYMBOLS=10
-NAME_INCLUDE=39
+Tokens=40
EOF=-1
-SECTIONNAME=36
-NAME_KEYSYM=38
-NAME_GROUP=40
-LBRACKET=16
-NAME=32
+GENERIC_NAME=36
+SECTIONNAME=34
+LBRACKET=15
TOKEN_PARTIAL=6
-COMMA=20
-INCLUDE=31
-EQUAL=25
-PLUS=23
-RBRACKET=17
-COMMENT=37
-DOT=28
-TOKEN_DEFAULT=4
-GREATERTHAN=27
-TOKEN_ALTERNATE_GROUP=9
-SECTION=35
-KEY=33
-KEYTYPE=34
-TOKEN_NAME=13
-LCURLY=18
-SEMICOLON=24
-MINUS=22
-Tokens=43
-WS=42
-NEWLINE=41
-TOKEN_ALPHANUMERIC_KEYS=7
+NAME=35
+WS=39
TOKEN_HIDDEN=5
-LOWERTHAN=26
-RCURLY=19
+TOKEN_ALPHANUMERIC_KEYS=7
+NEWLINE=38
+COMMA=19
+LOWERTHAN=25
+EQUAL=24
+INCLUDE=30
+RCURLY=18
TOKEN_MODIFIER_KEYS=8
+PLUS=22
TOKEN_KEY=14
+RBRACKET=16
+DOT=27
+COMMENT=37
+TOKEN_DEFAULT=4
TOKEN_KEY_TYPE=12
+GREATERTHAN=26
class XKBGrammarLexer(Lexer):
@@ -56,17 +53,6 @@ class XKBGrammarLexer(Lexer):
def __init__(self, input=None):
Lexer.__init__(self, input)
- self.dfa7 = self.DFA7(
- self, 7,
- eot = self.DFA7_eot,
- eof = self.DFA7_eof,
- min = self.DFA7_min,
- max = self.DFA7_max,
- accept = self.DFA7_accept,
- special = self.DFA7_special,
- transition = self.DFA7_transition
- )
-
@@ -325,37 +311,14 @@ class XKBGrammarLexer(Lexer):
- # $ANTLR start TOKEN_MODIFIER_MAP
- def mTOKEN_MODIFIER_MAP(self, ):
-
- try:
- self.type = TOKEN_MODIFIER_MAP
-
- # XKBGrammar.g:18:20: ( 'modifier_map' )
- # XKBGrammar.g:18:22: 'modifier_map'
- self.match("modifier_map")
-
-
-
-
-
-
- finally:
-
- pass
-
- # $ANTLR end TOKEN_MODIFIER_MAP
-
-
-
# $ANTLR start LBRACKET
def mLBRACKET(self, ):
try:
self.type = LBRACKET
- # XKBGrammar.g:19:10: ( '[' )
- # XKBGrammar.g:19:12: '['
+ # XKBGrammar.g:18:10: ( '[' )
+ # XKBGrammar.g:18:12: '['
self.match(u'[')
@@ -376,8 +339,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = RBRACKET
- # XKBGrammar.g:20:10: ( ']' )
- # XKBGrammar.g:20:12: ']'
+ # XKBGrammar.g:19:10: ( ']' )
+ # XKBGrammar.g:19:12: ']'
self.match(u']')
@@ -398,8 +361,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = LCURLY
- # XKBGrammar.g:21:8: ( '{' )
- # XKBGrammar.g:21:10: '{'
+ # XKBGrammar.g:20:8: ( '{' )
+ # XKBGrammar.g:20:10: '{'
self.match(u'{')
@@ -420,8 +383,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = RCURLY
- # XKBGrammar.g:22:8: ( '}' )
- # XKBGrammar.g:22:10: '}'
+ # XKBGrammar.g:21:8: ( '}' )
+ # XKBGrammar.g:21:10: '}'
self.match(u'}')
@@ -442,8 +405,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = COMMA
- # XKBGrammar.g:23:7: ( ',' )
- # XKBGrammar.g:23:9: ','
+ # XKBGrammar.g:22:7: ( ',' )
+ # XKBGrammar.g:22:9: ','
self.match(u',')
@@ -464,8 +427,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = DQUOTE
- # XKBGrammar.g:24:8: ( '\"' )
- # XKBGrammar.g:24:10: '\"'
+ # XKBGrammar.g:23:8: ( '\"' )
+ # XKBGrammar.g:23:10: '\"'
self.match(u'"')
@@ -486,8 +449,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = MINUS
- # XKBGrammar.g:25:7: ( '-' )
- # XKBGrammar.g:25:9: '-'
+ # XKBGrammar.g:24:7: ( '-' )
+ # XKBGrammar.g:24:9: '-'
self.match(u'-')
@@ -508,8 +471,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = PLUS
- # XKBGrammar.g:26:6: ( '+' )
- # XKBGrammar.g:26:8: '+'
+ # XKBGrammar.g:25:6: ( '+' )
+ # XKBGrammar.g:25:8: '+'
self.match(u'+')
@@ -530,8 +493,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = SEMICOLON
- # XKBGrammar.g:27:11: ( ';' )
- # XKBGrammar.g:27:13: ';'
+ # XKBGrammar.g:26:11: ( ';' )
+ # XKBGrammar.g:26:13: ';'
self.match(u';')
@@ -552,8 +515,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = EQUAL
- # XKBGrammar.g:28:7: ( '=' )
- # XKBGrammar.g:28:9: '='
+ # XKBGrammar.g:27:7: ( '=' )
+ # XKBGrammar.g:27:9: '='
self.match(u'=')
@@ -574,8 +537,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = LOWERTHAN
- # XKBGrammar.g:29:11: ( '<' )
- # XKBGrammar.g:29:13: '<'
+ # XKBGrammar.g:28:11: ( '<' )
+ # XKBGrammar.g:28:13: '<'
self.match(u'<')
@@ -596,8 +559,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = GREATERTHAN
- # XKBGrammar.g:30:13: ( '>' )
- # XKBGrammar.g:30:15: '>'
+ # XKBGrammar.g:29:13: ( '>' )
+ # XKBGrammar.g:29:15: '>'
self.match(u'>')
@@ -618,8 +581,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = DOT
- # XKBGrammar.g:31:5: ( '.' )
- # XKBGrammar.g:31:7: '.'
+ # XKBGrammar.g:30:5: ( '.' )
+ # XKBGrammar.g:30:7: '.'
self.match(u'.')
@@ -634,14 +597,12 @@ class XKBGrammarLexer(Lexer):
- # $ANTLR start NAME
- def mNAME(self, ):
+ # $ANTLR start GENERIC_NAME
+ def mGENERIC_NAME(self, ):
try:
- self.type = NAME
-
- # XKBGrammar.g:155:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* )
- # XKBGrammar.g:155:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
+ # XKBGrammar.g:148:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' ) )
+ # XKBGrammar.g:148:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )
if (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
self.input.consume();
@@ -651,108 +612,7 @@ class XKBGrammarLexer(Lexer):
raise mse
- # XKBGrammar.g:155:27: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
- while True: #loop1
- alt1 = 2
- LA1_0 = self.input.LA(1)
-
- if ((u'0' <= LA1_0 <= u'9') or (u'A' <= LA1_0 <= u'Z') or LA1_0 == u'_' or (u'a' <= LA1_0 <= u'z')) :
- alt1 = 1
-
-
- if alt1 == 1:
- # XKBGrammar.g:
- if (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
-
-
-
-
- else:
- break #loop1
-
-
-
-
-
-
- finally:
-
- pass
-
- # $ANTLR end NAME
-
-
-
- # $ANTLR start NAME_INCLUDE
- def mNAME_INCLUDE(self, ):
-
- try:
- self.type = NAME_INCLUDE
-
- # XKBGrammar.g:159:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )* )
- # XKBGrammar.g:159:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )*
- if (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
-
-
- # XKBGrammar.g:159:27: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )*
- while True: #loop2
- alt2 = 2
- LA2_0 = self.input.LA(1)
-
- if ((u'(' <= LA2_0 <= u')') or (u'0' <= LA2_0 <= u'9') or (u'A' <= LA2_0 <= u'Z') or LA2_0 == u'_' or (u'a' <= LA2_0 <= u'z')) :
- alt2 = 1
-
-
- if alt2 == 1:
- # XKBGrammar.g:
- if (u'(' <= self.input.LA(1) <= u')') or (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
-
-
-
-
- else:
- break #loop2
-
-
-
-
-
-
- finally:
-
- pass
-
- # $ANTLR end NAME_INCLUDE
-
-
-
- # $ANTLR start NAME_KEYSYM
- def mNAME_KEYSYM(self, ):
-
- try:
- self.type = NAME_KEYSYM
-
- # XKBGrammar.g:163:2: ( ( '0' .. '9' | 'a' .. 'z' | 'A' .. 'Z' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* )
- # XKBGrammar.g:163:4: ( '0' .. '9' | 'a' .. 'z' | 'A' .. 'Z' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
- if (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or (u'a' <= self.input.LA(1) <= u'z'):
+ if (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
self.input.consume();
else:
@@ -761,32 +621,6 @@ class XKBGrammarLexer(Lexer):
raise mse
- # XKBGrammar.g:163:32: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )*
- while True: #loop3
- alt3 = 2
- LA3_0 = self.input.LA(1)
-
- if ((u'0' <= LA3_0 <= u'9') or (u'A' <= LA3_0 <= u'Z') or LA3_0 == u'_' or (u'a' <= LA3_0 <= u'z')) :
- alt3 = 1
-
-
- if alt3 == 1:
- # XKBGrammar.g:
- if (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
-
-
-
-
- else:
- break #loop3
-
-
@@ -795,39 +629,30 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end NAME_KEYSYM
+ # $ANTLR end GENERIC_NAME
- # $ANTLR start NAME_GROUP
- def mNAME_GROUP(self, ):
+ # $ANTLR start NAME
+ def mNAME(self, ):
try:
- self.type = NAME_GROUP
-
- # XKBGrammar.g:167:2: ( ( '0' .. '9' | 'a' .. 'z' | 'A' .. 'Z' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '-' | '.' | '0' .. '9' )* )
- # XKBGrammar.g:167:4: ( '0' .. '9' | 'a' .. 'z' | 'A' .. 'Z' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '-' | '.' | '0' .. '9' )*
- if (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
-
+ self.type = NAME
- # XKBGrammar.g:167:32: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '-' | '.' | '0' .. '9' )*
- while True: #loop4
- alt4 = 2
- LA4_0 = self.input.LA(1)
+ # XKBGrammar.g:152:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )* )
+ # XKBGrammar.g:152:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )*
+ # XKBGrammar.g:152:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )*
+ while True: #loop1
+ alt1 = 2
+ LA1_0 = self.input.LA(1)
- if ((u'-' <= LA4_0 <= u'.') or (u'0' <= LA4_0 <= u'9') or (u'A' <= LA4_0 <= u'Z') or LA4_0 == u'_' or (u'a' <= LA4_0 <= u'z')) :
- alt4 = 1
+ if ((u'(' <= LA1_0 <= u')') or (u'0' <= LA1_0 <= u'9') or (u'A' <= LA1_0 <= u'Z') or LA1_0 == u'_' or (u'a' <= LA1_0 <= u'z')) :
+ alt1 = 1
- if alt4 == 1:
+ if alt1 == 1:
# XKBGrammar.g:
- if (u'-' <= self.input.LA(1) <= u'.') or (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
+ if (u'(' <= self.input.LA(1) <= u')') or (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
self.input.consume();
else:
@@ -839,7 +664,7 @@ class XKBGrammarLexer(Lexer):
else:
- break #loop4
+ break #loop1
@@ -850,7 +675,7 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end NAME_GROUP
+ # $ANTLR end NAME
@@ -860,22 +685,22 @@ class XKBGrammarLexer(Lexer):
try:
self.type = COMMENT
- # XKBGrammar.g:170:9: ( '//' (~ ( '\\n' | '\\r' ) )* )
- # XKBGrammar.g:170:11: '//' (~ ( '\\n' | '\\r' ) )*
+ # XKBGrammar.g:156:9: ( '//' (~ ( '\\n' | '\\r' ) )* )
+ # XKBGrammar.g:156:11: '//' (~ ( '\\n' | '\\r' ) )*
self.match("//")
- # XKBGrammar.g:170:16: (~ ( '\\n' | '\\r' ) )*
- while True: #loop5
- alt5 = 2
- LA5_0 = self.input.LA(1)
+ # XKBGrammar.g:156:16: (~ ( '\\n' | '\\r' ) )*
+ while True: #loop2
+ alt2 = 2
+ LA2_0 = self.input.LA(1)
- if ((u'\u0000' <= LA5_0 <= u'\t') or (u'\u000B' <= LA5_0 <= u'\f') or (u'\u000E' <= LA5_0 <= u'\uFFFE')) :
- alt5 = 1
+ if ((u'\u0000' <= LA2_0 <= u'\t') or (u'\u000B' <= LA2_0 <= u'\f') or (u'\u000E' <= LA2_0 <= u'\uFFFE')) :
+ alt2 = 1
- if alt5 == 1:
- # XKBGrammar.g:170:17: ~ ( '\\n' | '\\r' )
+ if alt2 == 1:
+ # XKBGrammar.g:156:17: ~ ( '\\n' | '\\r' )
if (u'\u0000' <= self.input.LA(1) <= u'\t') or (u'\u000B' <= self.input.LA(1) <= u'\f') or (u'\u000E' <= self.input.LA(1) <= u'\uFFFE'):
self.input.consume();
@@ -888,7 +713,7 @@ class XKBGrammarLexer(Lexer):
else:
- break #loop5
+ break #loop2
#action start
@@ -912,19 +737,19 @@ class XKBGrammarLexer(Lexer):
try:
self.type = WS
- # XKBGrammar.g:174:9: ( ( '\\t' | ' ' | NEWLINE )+ )
- # XKBGrammar.g:174:17: ( '\\t' | ' ' | NEWLINE )+
- # XKBGrammar.g:174:17: ( '\\t' | ' ' | NEWLINE )+
- cnt6 = 0
- while True: #loop6
- alt6 = 2
- LA6_0 = self.input.LA(1)
+ # XKBGrammar.g:160:9: ( ( '\\t' | ' ' | NEWLINE )+ )
+ # XKBGrammar.g:160:17: ( '\\t' | ' ' | NEWLINE )+
+ # XKBGrammar.g:160:17: ( '\\t' | ' ' | NEWLINE )+
+ cnt3 = 0
+ while True: #loop3
+ alt3 = 2
+ LA3_0 = self.input.LA(1)
- if ((u'\t' <= LA6_0 <= u'\n') or LA6_0 == u'\r' or LA6_0 == u' ') :
- alt6 = 1
+ if ((u'\t' <= LA3_0 <= u'\n') or LA3_0 == u'\r' or LA3_0 == u' ') :
+ alt3 = 1
- if alt6 == 1:
+ if alt3 == 1:
# XKBGrammar.g:
if (u'\t' <= self.input.LA(1) <= u'\n') or self.input.LA(1) == u'\r' or self.input.LA(1) == u' ':
self.input.consume();
@@ -938,13 +763,13 @@ class XKBGrammarLexer(Lexer):
else:
- if cnt6 >= 1:
- break #loop6
+ if cnt3 >= 1:
+ break #loop3
- eee = EarlyExitException(6, self.input)
+ eee = EarlyExitException(3, self.input)
raise eee
- cnt6 += 1
+ cnt3 += 1
#action start
@@ -966,7 +791,7 @@ class XKBGrammarLexer(Lexer):
def mNEWLINE(self, ):
try:
- # XKBGrammar.g:179:9: ( '\\r' | '\\n' )
+ # XKBGrammar.g:165:9: ( '\\r' | '\\n' )
# XKBGrammar.g:
if self.input.LA(1) == u'\n' or self.input.LA(1) == u'\r':
self.input.consume();
@@ -990,191 +815,659 @@ class XKBGrammarLexer(Lexer):
def mTokens(self):
- # XKBGrammar.g:1:8: ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_MODIFIER_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS | TOKEN_INCLUDE | TOKEN_KEY_TYPE | TOKEN_NAME | TOKEN_KEY | TOKEN_MODIFIER_MAP | LBRACKET | RBRACKET | LCURLY | RCURLY | COMMA | DQUOTE | MINUS | PLUS | SEMICOLON | EQUAL | LOWERTHAN | GREATERTHAN | DOT | NAME | NAME_INCLUDE | NAME_KEYSYM | NAME_GROUP | COMMENT | WS )
- alt7 = 31
- alt7 = self.dfa7.predict(self.input)
- if alt7 == 1:
+ # XKBGrammar.g:1:8: ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_MODIFIER_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS | TOKEN_INCLUDE | TOKEN_KEY_TYPE | TOKEN_NAME | TOKEN_KEY | LBRACKET | RBRACKET | LCURLY | RCURLY | COMMA | DQUOTE | MINUS | PLUS | SEMICOLON | EQUAL | LOWERTHAN | GREATERTHAN | DOT | NAME | COMMENT | WS )
+ alt4 = 27
+ LA4 = self.input.LA(1)
+ if LA4 == u'd':
+ LA4_1 = self.input.LA(2)
+
+ if (LA4_1 == u'e') :
+ LA4_26 = self.input.LA(3)
+
+ if (LA4_26 == u'f') :
+ LA4_35 = self.input.LA(4)
+
+ if (LA4_35 == u'a') :
+ LA4_45 = self.input.LA(5)
+
+ if (LA4_45 == u'u') :
+ LA4_56 = self.input.LA(6)
+
+ if (LA4_56 == u'l') :
+ LA4_65 = self.input.LA(7)
+
+ if (LA4_65 == u't') :
+ LA4_73 = self.input.LA(8)
+
+ if ((u'(' <= LA4_73 <= u')') or (u'0' <= LA4_73 <= u'9') or (u'A' <= LA4_73 <= u'Z') or LA4_73 == u'_' or (u'a' <= LA4_73 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 1
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'h':
+ LA4_2 = self.input.LA(2)
+
+ if (LA4_2 == u'i') :
+ LA4_27 = self.input.LA(3)
+
+ if (LA4_27 == u'd') :
+ LA4_36 = self.input.LA(4)
+
+ if (LA4_36 == u'd') :
+ LA4_46 = self.input.LA(5)
+
+ if (LA4_46 == u'e') :
+ LA4_57 = self.input.LA(6)
+
+ if (LA4_57 == u'n') :
+ LA4_66 = self.input.LA(7)
+
+ if ((u'(' <= LA4_66 <= u')') or (u'0' <= LA4_66 <= u'9') or (u'A' <= LA4_66 <= u'Z') or LA4_66 == u'_' or (u'a' <= LA4_66 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 2
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'p':
+ LA4_3 = self.input.LA(2)
+
+ if (LA4_3 == u'a') :
+ LA4_28 = self.input.LA(3)
+
+ if (LA4_28 == u'r') :
+ LA4_37 = self.input.LA(4)
+
+ if (LA4_37 == u't') :
+ LA4_47 = self.input.LA(5)
+
+ if (LA4_47 == u'i') :
+ LA4_58 = self.input.LA(6)
+
+ if (LA4_58 == u'a') :
+ LA4_67 = self.input.LA(7)
+
+ if (LA4_67 == u'l') :
+ LA4_75 = self.input.LA(8)
+
+ if ((u'(' <= LA4_75 <= u')') or (u'0' <= LA4_75 <= u'9') or (u'A' <= LA4_75 <= u'Z') or LA4_75 == u'_' or (u'a' <= LA4_75 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 3
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'a':
+ LA4_4 = self.input.LA(2)
+
+ if (LA4_4 == u'l') :
+ LA4 = self.input.LA(3)
+ if LA4 == u'p':
+ LA4_38 = self.input.LA(4)
+
+ if (LA4_38 == u'h') :
+ LA4_48 = self.input.LA(5)
+
+ if (LA4_48 == u'a') :
+ LA4_59 = self.input.LA(6)
+
+ if (LA4_59 == u'n') :
+ LA4_68 = self.input.LA(7)
+
+ if (LA4_68 == u'u') :
+ LA4_76 = self.input.LA(8)
+
+ if (LA4_76 == u'm') :
+ LA4_83 = self.input.LA(9)
+
+ if (LA4_83 == u'e') :
+ LA4_88 = self.input.LA(10)
+
+ if (LA4_88 == u'r') :
+ LA4_92 = self.input.LA(11)
+
+ if (LA4_92 == u'i') :
+ LA4_96 = self.input.LA(12)
+
+ if (LA4_96 == u'c') :
+ LA4_100 = self.input.LA(13)
+
+ if (LA4_100 == u'_') :
+ LA4_104 = self.input.LA(14)
+
+ if (LA4_104 == u'k') :
+ LA4_107 = self.input.LA(15)
+
+ if (LA4_107 == u'e') :
+ LA4_110 = self.input.LA(16)
+
+ if (LA4_110 == u'y') :
+ LA4_112 = self.input.LA(17)
+
+ if (LA4_112 == u's') :
+ LA4_114 = self.input.LA(18)
+
+ if ((u'(' <= LA4_114 <= u')') or (u'0' <= LA4_114 <= u'9') or (u'A' <= LA4_114 <= u'Z') or LA4_114 == u'_' or (u'a' <= LA4_114 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 4
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u't':
+ LA4_39 = self.input.LA(4)
+
+ if (LA4_39 == u'e') :
+ LA4_49 = self.input.LA(5)
+
+ if (LA4_49 == u'r') :
+ LA4_60 = self.input.LA(6)
+
+ if (LA4_60 == u'n') :
+ LA4_69 = self.input.LA(7)
+
+ if (LA4_69 == u'a') :
+ LA4_77 = self.input.LA(8)
+
+ if (LA4_77 == u't') :
+ LA4_84 = self.input.LA(9)
+
+ if (LA4_84 == u'e') :
+ LA4_89 = self.input.LA(10)
+
+ if (LA4_89 == u'_') :
+ LA4_93 = self.input.LA(11)
+
+ if (LA4_93 == u'g') :
+ LA4_97 = self.input.LA(12)
+
+ if (LA4_97 == u'r') :
+ LA4_101 = self.input.LA(13)
+
+ if (LA4_101 == u'o') :
+ LA4_105 = self.input.LA(14)
+
+ if (LA4_105 == u'u') :
+ LA4_108 = self.input.LA(15)
+
+ if (LA4_108 == u'p') :
+ LA4_111 = self.input.LA(16)
+
+ if ((u'(' <= LA4_111 <= u')') or (u'0' <= LA4_111 <= u'9') or (u'A' <= LA4_111 <= u'Z') or LA4_111 == u'_' or (u'a' <= LA4_111 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 6
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'm':
+ LA4_5 = self.input.LA(2)
+
+ if (LA4_5 == u'o') :
+ LA4_30 = self.input.LA(3)
+
+ if (LA4_30 == u'd') :
+ LA4_40 = self.input.LA(4)
+
+ if (LA4_40 == u'i') :
+ LA4_50 = self.input.LA(5)
+
+ if (LA4_50 == u'f') :
+ LA4_61 = self.input.LA(6)
+
+ if (LA4_61 == u'i') :
+ LA4_70 = self.input.LA(7)
+
+ if (LA4_70 == u'e') :
+ LA4_78 = self.input.LA(8)
+
+ if (LA4_78 == u'r') :
+ LA4_85 = self.input.LA(9)
+
+ if (LA4_85 == u'_') :
+ LA4_90 = self.input.LA(10)
+
+ if (LA4_90 == u'k') :
+ LA4_94 = self.input.LA(11)
+
+ if (LA4_94 == u'e') :
+ LA4_98 = self.input.LA(12)
+
+ if (LA4_98 == u'y') :
+ LA4_102 = self.input.LA(13)
+
+ if (LA4_102 == u's') :
+ LA4_106 = self.input.LA(14)
+
+ if ((u'(' <= LA4_106 <= u')') or (u'0' <= LA4_106 <= u'9') or (u'A' <= LA4_106 <= u'Z') or LA4_106 == u'_' or (u'a' <= LA4_106 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 5
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'x':
+ LA4_6 = self.input.LA(2)
+
+ if (LA4_6 == u'k') :
+ LA4_31 = self.input.LA(3)
+
+ if (LA4_31 == u'b') :
+ LA4_41 = self.input.LA(4)
+
+ if (LA4_41 == u'_') :
+ LA4_51 = self.input.LA(5)
+
+ if (LA4_51 == u's') :
+ LA4_62 = self.input.LA(6)
+
+ if (LA4_62 == u'y') :
+ LA4_71 = self.input.LA(7)
+
+ if (LA4_71 == u'm') :
+ LA4_79 = self.input.LA(8)
+
+ if (LA4_79 == u'b') :
+ LA4_86 = self.input.LA(9)
+
+ if (LA4_86 == u'o') :
+ LA4_91 = self.input.LA(10)
+
+ if (LA4_91 == u'l') :
+ LA4_95 = self.input.LA(11)
+
+ if (LA4_95 == u's') :
+ LA4_99 = self.input.LA(12)
+
+ if ((u'(' <= LA4_99 <= u')') or (u'0' <= LA4_99 <= u'9') or (u'A' <= LA4_99 <= u'Z') or LA4_99 == u'_' or (u'a' <= LA4_99 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 7
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'i':
+ LA4_7 = self.input.LA(2)
+
+ if (LA4_7 == u'n') :
+ LA4_32 = self.input.LA(3)
+
+ if (LA4_32 == u'c') :
+ LA4_42 = self.input.LA(4)
+
+ if (LA4_42 == u'l') :
+ LA4_52 = self.input.LA(5)
+
+ if (LA4_52 == u'u') :
+ LA4_63 = self.input.LA(6)
+
+ if (LA4_63 == u'd') :
+ LA4_72 = self.input.LA(7)
+
+ if (LA4_72 == u'e') :
+ LA4_80 = self.input.LA(8)
+
+ if ((u'(' <= LA4_80 <= u')') or (u'0' <= LA4_80 <= u'9') or (u'A' <= LA4_80 <= u'Z') or LA4_80 == u'_' or (u'a' <= LA4_80 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 8
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'k':
+ LA4_8 = self.input.LA(2)
+
+ if (LA4_8 == u'e') :
+ LA4_33 = self.input.LA(3)
+
+ if (LA4_33 == u'y') :
+ LA4 = self.input.LA(4)
+ if LA4 == u'.':
+ alt4 = 9
+ elif LA4 == u'(' or LA4 == u')' or LA4 == u'0' or LA4 == u'1' or LA4 == u'2' or LA4 == u'3' or LA4 == u'4' or LA4 == u'5' or LA4 == u'6' or LA4 == u'7' or LA4 == u'8' or LA4 == u'9' or LA4 == u'A' or LA4 == u'B' or LA4 == u'C' or LA4 == u'D' or LA4 == u'E' or LA4 == u'F' or LA4 == u'G' or LA4 == u'H' or LA4 == u'I' or LA4 == u'J' or LA4 == u'K' or LA4 == u'L' or LA4 == u'M' or LA4 == u'N' or LA4 == u'O' or LA4 == u'P' or LA4 == u'Q' or LA4 == u'R' or LA4 == u'S' or LA4 == u'T' or LA4 == u'U' or LA4 == u'V' or LA4 == u'W' or LA4 == u'X' or LA4 == u'Y' or LA4 == u'Z' or LA4 == u'_' or LA4 == u'a' or LA4 == u'b' or LA4 == u'c' or LA4 == u'd' or LA4 == u'e' or LA4 == u'f' or LA4 == u'g' or LA4 == u'h' or LA4 == u'i' or LA4 == u'j' or LA4 == u'k' or LA4 == u'l' or LA4 == u'm' or LA4 == u'n' or LA4 == u'o' or LA4 == u'p' or LA4 == u'q' or LA4 == u'r' or LA4 == u's' or LA4 == u't' or LA4 == u'u' or LA4 == u'v' or LA4 == u'w' or LA4 == u'x' or LA4 == u'y' or LA4 == u'z':
+ alt4 = 25
+ else:
+ alt4 = 11
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'n':
+ LA4_9 = self.input.LA(2)
+
+ if (LA4_9 == u'a') :
+ LA4_34 = self.input.LA(3)
+
+ if (LA4_34 == u'm') :
+ LA4_44 = self.input.LA(4)
+
+ if (LA4_44 == u'e') :
+ LA4_55 = self.input.LA(5)
+
+ if ((u'(' <= LA4_55 <= u')') or (u'0' <= LA4_55 <= u'9') or (u'A' <= LA4_55 <= u'Z') or LA4_55 == u'_' or (u'a' <= LA4_55 <= u'z')) :
+ alt4 = 25
+ else:
+ alt4 = 10
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ else:
+ alt4 = 25
+ elif LA4 == u'[':
+ alt4 = 12
+ elif LA4 == u']':
+ alt4 = 13
+ elif LA4 == u'{':
+ alt4 = 14
+ elif LA4 == u'}':
+ alt4 = 15
+ elif LA4 == u',':
+ alt4 = 16
+ elif LA4 == u'"':
+ alt4 = 17
+ elif LA4 == u'-':
+ alt4 = 18
+ elif LA4 == u'+':
+ alt4 = 19
+ elif LA4 == u';':
+ alt4 = 20
+ elif LA4 == u'=':
+ alt4 = 21
+ elif LA4 == u'<':
+ alt4 = 22
+ elif LA4 == u'>':
+ alt4 = 23
+ elif LA4 == u'.':
+ alt4 = 24
+ elif LA4 == u'/':
+ alt4 = 26
+ elif LA4 == u'\t' or LA4 == u'\n' or LA4 == u'\r' or LA4 == u' ':
+ alt4 = 27
+ else:
+ alt4 = 25
+ if alt4 == 1:
# XKBGrammar.g:1:10: TOKEN_DEFAULT
self.mTOKEN_DEFAULT()
- elif alt7 == 2:
+ elif alt4 == 2:
# XKBGrammar.g:1:24: TOKEN_HIDDEN
self.mTOKEN_HIDDEN()
- elif alt7 == 3:
+ elif alt4 == 3:
# XKBGrammar.g:1:37: TOKEN_PARTIAL
self.mTOKEN_PARTIAL()
- elif alt7 == 4:
+ elif alt4 == 4:
# XKBGrammar.g:1:51: TOKEN_ALPHANUMERIC_KEYS
self.mTOKEN_ALPHANUMERIC_KEYS()
- elif alt7 == 5:
+ elif alt4 == 5:
# XKBGrammar.g:1:75: TOKEN_MODIFIER_KEYS
self.mTOKEN_MODIFIER_KEYS()
- elif alt7 == 6:
+ elif alt4 == 6:
# XKBGrammar.g:1:95: TOKEN_ALTERNATE_GROUP
self.mTOKEN_ALTERNATE_GROUP()
- elif alt7 == 7:
+ elif alt4 == 7:
# XKBGrammar.g:1:117: TOKEN_XKB_SYMBOLS
self.mTOKEN_XKB_SYMBOLS()
- elif alt7 == 8:
+ elif alt4 == 8:
# XKBGrammar.g:1:135: TOKEN_INCLUDE
self.mTOKEN_INCLUDE()
- elif alt7 == 9:
+ elif alt4 == 9:
# XKBGrammar.g:1:149: TOKEN_KEY_TYPE
self.mTOKEN_KEY_TYPE()
- elif alt7 == 10:
+ elif alt4 == 10:
# XKBGrammar.g:1:164: TOKEN_NAME
self.mTOKEN_NAME()
- elif alt7 == 11:
+ elif alt4 == 11:
# XKBGrammar.g:1:175: TOKEN_KEY
self.mTOKEN_KEY()
- elif alt7 == 12:
- # XKBGrammar.g:1:185: TOKEN_MODIFIER_MAP
- self.mTOKEN_MODIFIER_MAP()
-
-
-
- elif alt7 == 13:
- # XKBGrammar.g:1:204: LBRACKET
+ elif alt4 == 12:
+ # XKBGrammar.g:1:185: LBRACKET
self.mLBRACKET()
- elif alt7 == 14:
- # XKBGrammar.g:1:213: RBRACKET
+ elif alt4 == 13:
+ # XKBGrammar.g:1:194: RBRACKET
self.mRBRACKET()
- elif alt7 == 15:
- # XKBGrammar.g:1:222: LCURLY
+ elif alt4 == 14:
+ # XKBGrammar.g:1:203: LCURLY
self.mLCURLY()
- elif alt7 == 16:
- # XKBGrammar.g:1:229: RCURLY
+ elif alt4 == 15:
+ # XKBGrammar.g:1:210: RCURLY
self.mRCURLY()
- elif alt7 == 17:
- # XKBGrammar.g:1:236: COMMA
+ elif alt4 == 16:
+ # XKBGrammar.g:1:217: COMMA
self.mCOMMA()
- elif alt7 == 18:
- # XKBGrammar.g:1:242: DQUOTE
+ elif alt4 == 17:
+ # XKBGrammar.g:1:223: DQUOTE
self.mDQUOTE()
- elif alt7 == 19:
- # XKBGrammar.g:1:249: MINUS
+ elif alt4 == 18:
+ # XKBGrammar.g:1:230: MINUS
self.mMINUS()
- elif alt7 == 20:
- # XKBGrammar.g:1:255: PLUS
+ elif alt4 == 19:
+ # XKBGrammar.g:1:236: PLUS
self.mPLUS()
- elif alt7 == 21:
- # XKBGrammar.g:1:260: SEMICOLON
+ elif alt4 == 20:
+ # XKBGrammar.g:1:241: SEMICOLON
self.mSEMICOLON()
- elif alt7 == 22:
- # XKBGrammar.g:1:270: EQUAL
+ elif alt4 == 21:
+ # XKBGrammar.g:1:251: EQUAL
self.mEQUAL()
- elif alt7 == 23:
- # XKBGrammar.g:1:276: LOWERTHAN
+ elif alt4 == 22:
+ # XKBGrammar.g:1:257: LOWERTHAN
self.mLOWERTHAN()
- elif alt7 == 24:
- # XKBGrammar.g:1:286: GREATERTHAN
+ elif alt4 == 23:
+ # XKBGrammar.g:1:267: GREATERTHAN
self.mGREATERTHAN()
- elif alt7 == 25:
- # XKBGrammar.g:1:298: DOT
+ elif alt4 == 24:
+ # XKBGrammar.g:1:279: DOT
self.mDOT()
- elif alt7 == 26:
- # XKBGrammar.g:1:302: NAME
+ elif alt4 == 25:
+ # XKBGrammar.g:1:283: NAME
self.mNAME()
- elif alt7 == 27:
- # XKBGrammar.g:1:307: NAME_INCLUDE
- self.mNAME_INCLUDE()
-
-
-
- elif alt7 == 28:
- # XKBGrammar.g:1:320: NAME_KEYSYM
- self.mNAME_KEYSYM()
-
-
-
- elif alt7 == 29:
- # XKBGrammar.g:1:332: NAME_GROUP
- self.mNAME_GROUP()
-
-
-
- elif alt7 == 30:
- # XKBGrammar.g:1:343: COMMENT
+ elif alt4 == 26:
+ # XKBGrammar.g:1:288: COMMENT
self.mCOMMENT()
- elif alt7 == 31:
- # XKBGrammar.g:1:351: WS
+ elif alt4 == 27:
+ # XKBGrammar.g:1:296: WS
self.mWS()
@@ -1184,289 +1477,5 @@ class XKBGrammarLexer(Lexer):
- # lookup tables for DFA #7
-
- DFA7_eot = DFA.unpack(
- u"\1\uffff\11\36\15\uffff\2\36\1\53\2\uffff\2\36\3\uffff\11\36\1"
- u"\53\1\uffff\10\36\1\77\11\36\1\40\1\uffff\1\112\10\36\1\40\1\uffff"
- u"\1\36\1\125\6\36\1\40\1\135\1\uffff\1\136\4\36\1\143\1\40\2\uffff"
- u"\4\36\1\uffff\1\151\4\36\1\uffff\11\36\1\170\2\36\1\173\1\36\1"
- u"\uffff\2\36\1\uffff\1\177\2\36\1\uffff\1\36\1\u0083\1\36\1\uffff"
- u"\1\u0085\1\uffff"
- )
-
- DFA7_eof = DFA.unpack(
- u"\u0086\uffff"
- )
-
- DFA7_min = DFA.unpack(
- u"\1\11\11\50\15\uffff\2\50\1\55\2\uffff\2\50\3\uffff\11\50\1\55"
- u"\1\uffff\22\50\1\164\1\uffff\11\50\1\171\1\uffff\10\50\1\160\1"
- u"\50\1\uffff\6\50\1\145\2\uffff\4\50\1\uffff\1\55\4\50\1\uffff\16"
- u"\50\1\uffff\2\50\1\uffff\3\50\1\uffff\3\50\1\uffff\1\50\1\uffff"
- )
-
- DFA7_max = DFA.unpack(
- u"\1\175\11\172\15\uffff\3\172\2\uffff\2\172\3\uffff\12\172\1\uffff"
- u"\22\172\1\164\1\uffff\11\172\1\171\1\uffff\10\172\1\160\1\172\1"
- u"\uffff\6\172\1\145\2\uffff\4\172\1\uffff\5\172\1\uffff\16\172\1"
- u"\uffff\2\172\1\uffff\3\172\1\uffff\3\172\1\uffff\1\172\1\uffff"
- )
-
- DFA7_accept = DFA.unpack(
- u"\12\uffff\1\15\1\16\1\17\1\20\1\21\1\22\1\23\1\24\1\25\1\26\1\27"
- u"\1\30\1\31\3\uffff\1\36\1\37\2\uffff\1\32\1\33\1\35\12\uffff\1"
- u"\34\23\uffff\1\13\12\uffff\1\12\12\uffff\1\2\7\uffff\1\1\1\3\4"
- u"\uffff\1\10\5\uffff\1\11\16\uffff\1\7\2\uffff\1\14\3\uffff\1\5"
- u"\3\uffff\1\6\1\uffff\1\4"
- )
-
- DFA7_special = DFA.unpack(
- u"\u0086\uffff"
- )
-
-
- DFA7_transition = [
- DFA.unpack(u"\2\33\2\uffff\1\33\22\uffff\1\33\1\uffff\1\17\10\uffff"
- u"\1\21\1\16\1\20\1\26\1\32\12\31\1\uffff\1\22\1\24\1\23\1\25\2\uffff"
- u"\32\27\1\12\1\uffff\1\13\1\uffff\1\30\1\uffff\1\4\2\27\1\1\3\27"
- u"\1\2\1\7\1\27\1\10\1\27\1\5\1\11\1\27\1\3\7\27\1\6\2\27\1\14\1"
- u"\uffff\1\15"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\34\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\10\35\1\41\21\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\42\31\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\13\35\1\43\16\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\16\35\1\44\13\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\12\35\1\45\17\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\15\35\1\46\14\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\47\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\50\31\35"),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\6\uffff\12\51\7\uffff\32\51\4\uffff\1\51\1\uffff"
- u"\32\51"),
- DFA.unpack(u"\2\40\1\uffff\12\52\7\uffff\32\52\4\uffff\1\52\1\uffff"
- u"\32\52"),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\5\35\1\54\24\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\3\35\1\55\26\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\21\35\1\56\10\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\17\35\1\57\3\35\1\60\6\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\3\35\1\61\26\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\35\1\62\30\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\2\35\1\63\27\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\30\35\1\64\1\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\14\35\1\65\15\35"),
- DFA.unpack(u"\2\37\6\uffff\12\51\7\uffff\32\51\4\uffff\1\51\1\uffff"
- u"\32\51"),
- DFA.unpack(u"\2\40\1\uffff\12\52\7\uffff\32\52\4\uffff\1\52\1\uffff"
- u"\32\52"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\66\31\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\3\35\1\67\26\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\23\35\1\70\6\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\7\35\1\71\22\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\72\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\10\35\1\73\21\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\74\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\13\35\1\75\16\35"),
- DFA.unpack(u"\2\37\3\uffff\1\40\1\76\1\uffff\12\35\7\uffff\32\35"
- u"\4\uffff\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\100\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\24\35\1\101\5\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\102\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\10\35\1\103\21\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\104\31\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\21\35\1\105\10\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\5\35\1\106\24\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\22\35\1\107\7\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\24\35\1\110\5\35"),
- DFA.unpack(u"\1\111"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\13\35\1\113\16\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\15\35\1\114\14\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\115\31\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\15\35\1\116\14\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\15\35\1\117\14\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\10\35\1\120\21\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\30\35\1\121\1\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\3\35\1\122\26\35"),
- DFA.unpack(u"\1\123"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\23\35\1\124\6\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\13\35\1\126\16\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\24\35\1\127\5\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\130\31\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\131\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\14\35\1\132\15\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\133\25\35"),
- DFA.unpack(u"\1\134"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\14\35\1\137\15\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\23\35\1\140\6\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\21\35\1\141\10\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\35\1\142\30\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\1\144"),
- DFA.unpack(u""),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\145\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\146\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\147\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\16\35\1\150\13\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\40\1\uffff\12\40\7\uffff\32\40\4\uffff\1\40\1\uffff"
- u"\32\40"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\21\35\1\152\10\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\153\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\12\35\1\155\1\35\1\154\15\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\13\35\1\156\16\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\10\35\1\157\21\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\6\35\1\160\23\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\1\161\31\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\162\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\22\35\1\163\7\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\2\35\1\164\27\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\21\35\1\165\10\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\17\35\1\166\12\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\30\35\1\167\1\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\171\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\16\35\1\172\13\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\22\35\1\174\7\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\12\35\1\175\17\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\24\35\1\176\5\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\4\35\1\u0080\25\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\17\35\1\u0081\12\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\30\35\1\u0082\1\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\22\35\1\u0084\7\35"),
- DFA.unpack(u""),
- DFA.unpack(u"\2\37\3\uffff\2\40\1\uffff\12\35\7\uffff\32\35\4\uffff"
- u"\1\35\1\uffff\32\35"),
- DFA.unpack(u"")
- ]
-
- # class definition for DFA #7
-
- DFA7 = DFA
diff --git a/XKBGrammar/XKBGrammarParser.py b/XKBGrammar/XKBGrammarParser.py
@@ -1,4 +1,4 @@
-# $ANTLR 3.0.1 XKBGrammar.g 2008-05-08 01:14:03
+# $ANTLR 3.0.1 XKBGrammar.g 2008-05-09 12:01:09
from antlr3 import *
from antlr3.compat import set, frozenset
@@ -12,56 +12,53 @@ HIDDEN = BaseRecognizer.HIDDEN
# token types
TOKEN_ALTERNATE_GROUP=9
-ATTRIBUTES=29
-SECTION=35
+ATTRIBUTES=28
+SECTION=33
TOKEN_INCLUDE=11
-KEY=33
-KEYTYPE=34
-ATTRIBUTE=30
+KEY=31
+KEYTYPE=32
+ATTRIBUTE=29
TOKEN_NAME=13
-DQUOTE=21
-LCURLY=18
-SEMICOLON=24
-TOKEN_MODIFIER_MAP=15
-NAME_INCLUDE=39
-MINUS=22
+DQUOTE=20
+LCURLY=17
+SEMICOLON=23
+MINUS=21
TOKEN_XKB_SYMBOLS=10
EOF=-1
-SECTIONNAME=36
-NAME_KEYSYM=38
-NAME_GROUP=40
-LBRACKET=16
-NAME=32
+GENERIC_NAME=36
+SECTIONNAME=34
+LBRACKET=15
+NAME=35
TOKEN_PARTIAL=6
-WS=42
-NEWLINE=41
+WS=39
+NEWLINE=38
TOKEN_ALPHANUMERIC_KEYS=7
TOKEN_HIDDEN=5
-COMMA=20
-LOWERTHAN=26
-INCLUDE=31
-EQUAL=25
-RCURLY=19
+COMMA=19
+LOWERTHAN=25
+INCLUDE=30
+EQUAL=24
+RCURLY=18
TOKEN_MODIFIER_KEYS=8
-PLUS=23
+PLUS=22
TOKEN_KEY=14
-RBRACKET=17
+RBRACKET=16
COMMENT=37
-DOT=28
+DOT=27
TOKEN_DEFAULT=4
TOKEN_KEY_TYPE=12
-GREATERTHAN=27
+GREATERTHAN=26
# token names
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"TOKEN_DEFAULT", "TOKEN_HIDDEN", "TOKEN_PARTIAL", "TOKEN_ALPHANUMERIC_KEYS",
"TOKEN_MODIFIER_KEYS", "TOKEN_ALTERNATE_GROUP", "TOKEN_XKB_SYMBOLS",
- "TOKEN_INCLUDE", "TOKEN_KEY_TYPE", "TOKEN_NAME", "TOKEN_KEY", "TOKEN_MODIFIER_MAP",
- "LBRACKET", "RBRACKET", "LCURLY", "RCURLY", "COMMA", "DQUOTE", "MINUS",
- "PLUS", "SEMICOLON", "EQUAL", "LOWERTHAN", "GREATERTHAN", "DOT", "ATTRIBUTES",
- "ATTRIBUTE", "INCLUDE", "NAME", "KEY", "KEYTYPE", "SECTION", "SECTIONNAME",
- "COMMENT", "NAME_KEYSYM", "NAME_INCLUDE", "NAME_GROUP", "NEWLINE", "WS"
+ "TOKEN_INCLUDE", "TOKEN_KEY_TYPE", "TOKEN_NAME", "TOKEN_KEY", "LBRACKET",
+ "RBRACKET", "LCURLY", "RCURLY", "COMMA", "DQUOTE", "MINUS", "PLUS",
+ "SEMICOLON", "EQUAL", "LOWERTHAN", "GREATERTHAN", "DOT", "ATTRIBUTES",
+ "ATTRIBUTE", "INCLUDE", "KEY", "KEYTYPE", "SECTION", "SECTIONNAME",
+ "NAME", "GENERIC_NAME", "COMMENT", "NEWLINE", "WS"
]
@@ -90,7 +87,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start layout
- # XKBGrammar.g:55:1: layout : ( section )* EOF ;
+ # XKBGrammar.g:74:1: layout : ( section )* EOF ;
def layout(self, ):
retval = self.layout_return()
@@ -106,11 +103,11 @@ class XKBGrammarParser(Parser):
try:
try:
- # XKBGrammar.g:55:9: ( ( section )* EOF )
- # XKBGrammar.g:55:11: ( section )* EOF
+ # XKBGrammar.g:74:10: ( ( section )* EOF )
+ # XKBGrammar.g:74:12: ( section )* EOF
root_0 = self.adaptor.nil()
- # XKBGrammar.g:55:11: ( section )*
+ # XKBGrammar.g:74:12: ( section )*
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
@@ -120,8 +117,8 @@ class XKBGrammarParser(Parser):
if alt1 == 1:
- # XKBGrammar.g:55:11: section
- self.following.append(self.FOLLOW_section_in_layout349)
+ # XKBGrammar.g:74:12: section
+ self.following.append(self.FOLLOW_section_in_layout359)
section1 = self.section()
self.following.pop()
@@ -133,7 +130,7 @@ class XKBGrammarParser(Parser):
EOF2 = self.input.LT(1)
- self.match(self.input, EOF, self.FOLLOW_EOF_in_layout352)
+ self.match(self.input, EOF, self.FOLLOW_EOF_in_layout362)
@@ -164,7 +161,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start section
- # XKBGrammar.g:58:1: section : preamble sectionmaterial -> ^( SECTION ) ;
+ # XKBGrammar.g:77:1: section : preamble sectionmaterial -> ^( SECTION ) ;
def section(self, ):
retval = self.section_return()
@@ -181,14 +178,14 @@ class XKBGrammarParser(Parser):
stream_sectionmaterial = RewriteRuleSubtreeStream(self.adaptor, "rule sectionmaterial")
try:
try:
- # XKBGrammar.g:59:2: ( preamble sectionmaterial -> ^( SECTION ) )
- # XKBGrammar.g:60:2: preamble sectionmaterial
- self.following.append(self.FOLLOW_preamble_in_section368)
+ # XKBGrammar.g:77:10: ( preamble sectionmaterial -> ^( SECTION ) )
+ # XKBGrammar.g:77:12: preamble sectionmaterial
+ self.following.append(self.FOLLOW_preamble_in_section377)
preamble3 = self.preamble()
self.following.pop()
stream_preamble.add(preamble3.tree)
- self.following.append(self.FOLLOW_sectionmaterial_in_section370)
+ self.following.append(self.FOLLOW_sectionmaterial_in_section379)
sectionmaterial4 = self.sectionmaterial()
self.following.pop()
@@ -212,8 +209,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 62:2: -> ^( SECTION )
- # XKBGrammar.g:62:5: ^( SECTION )
+ # 79:3: -> ^( SECTION )
+ # XKBGrammar.g:79:6: ^( SECTION )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(SECTION, "SECTION"), root_1)
@@ -250,7 +247,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start preamble
- # XKBGrammar.g:65:1: preamble : ( attribute_xkb )+ sectionname= quotedstring ;
+ # XKBGrammar.g:82:1: preamble : ( attribute_xkb )+ sectionname= quotedstring ;
def preamble(self, ):
retval = self.preamble_return()
@@ -266,11 +263,11 @@ class XKBGrammarParser(Parser):
try:
try:
- # XKBGrammar.g:65:10: ( ( attribute_xkb )+ sectionname= quotedstring )
- # XKBGrammar.g:65:12: ( attribute_xkb )+ sectionname= quotedstring
+ # XKBGrammar.g:82:11: ( ( attribute_xkb )+ sectionname= quotedstring )
+ # XKBGrammar.g:82:13: ( attribute_xkb )+ sectionname= quotedstring
root_0 = self.adaptor.nil()
- # XKBGrammar.g:65:12: ( attribute_xkb )+
+ # XKBGrammar.g:82:13: ( attribute_xkb )+
cnt2 = 0
while True: #loop2
alt2 = 2
@@ -281,8 +278,8 @@ class XKBGrammarParser(Parser):
if alt2 == 1:
- # XKBGrammar.g:65:12: attribute_xkb
- self.following.append(self.FOLLOW_attribute_xkb_in_preamble391)
+ # XKBGrammar.g:82:13: attribute_xkb
+ self.following.append(self.FOLLOW_attribute_xkb_in_preamble405)
attribute_xkb5 = self.attribute_xkb()
self.following.pop()
@@ -299,7 +296,7 @@ class XKBGrammarParser(Parser):
cnt2 += 1
- self.following.append(self.FOLLOW_quotedstring_in_preamble396)
+ self.following.append(self.FOLLOW_quotedstring_in_preamble410)
sectionname = self.quotedstring()
self.following.pop()
@@ -337,7 +334,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start quotedstring
- # XKBGrammar.g:69:1: quotedstring returns [value] : DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE ;
+ # XKBGrammar.g:86:1: quotedstring returns [value] : DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE ;
def quotedstring(self, ):
retval = self.quotedstring_return()
@@ -356,18 +353,18 @@ class XKBGrammarParser(Parser):
try:
try:
- # XKBGrammar.g:70:2: ( DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE )
- # XKBGrammar.g:70:4: DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE
+ # XKBGrammar.g:87:10: ( DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE )
+ # XKBGrammar.g:87:12: DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE
root_0 = self.adaptor.nil()
DQUOTE6 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_quotedstring414)
+ self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_quotedstring438)
DQUOTE6_tree = self.adaptor.createWithPayload(DQUOTE6)
self.adaptor.addChild(root_0, DQUOTE6_tree)
- # XKBGrammar.g:70:22: (sectionname+=~ ( DQUOTE ) )+
+ # XKBGrammar.g:87:30: (sectionname+=~ ( DQUOTE ) )+
cnt3 = 0
while True: #loop3
alt3 = 2
@@ -378,7 +375,7 @@ class XKBGrammarParser(Parser):
if alt3 == 1:
- # XKBGrammar.g:70:22: sectionname+=~ ( DQUOTE )
+ # XKBGrammar.g:87:30: sectionname+=~ ( DQUOTE )
sectionname = self.input.LT(1)
if (TOKEN_DEFAULT <= self.input.LA(1) <= COMMA) or (MINUS <= self.input.LA(1) <= WS):
self.input.consume();
@@ -388,7 +385,7 @@ class XKBGrammarParser(Parser):
else:
mse = MismatchedSetException(None, self.input)
self.recoverFromMismatchedSet(
- self.input, mse, self.FOLLOW_set_in_quotedstring418
+ self.input, mse, self.FOLLOW_set_in_quotedstring442
)
raise mse
@@ -410,20 +407,20 @@ class XKBGrammarParser(Parser):
DQUOTE7 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_quotedstring424)
+ self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_quotedstring448)
DQUOTE7_tree = self.adaptor.createWithPayload(DQUOTE7)
self.adaptor.addChild(root_0, DQUOTE7_tree)
#action start
-
+
qstring = ['"']
for elem in list_sectionname:
- qstring.append(elem.getText())
+ qstring.append(elem.getText())
qstring.append('"')
retval.value = "".join(qstring)
-
+
#action end
@@ -454,7 +451,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start sectionmaterial
- # XKBGrammar.g:80:1: sectionmaterial : LCURLY ( line_include | line_name | line_keytype | line_key | line_comment )+ RCURLY SEMICOLON ;
+ # XKBGrammar.g:97:1: sectionmaterial : LCURLY ( line_include | line_name | line_keytype | line_key )+ RCURLY SEMICOLON ;
def sectionmaterial(self, ):
retval = self.sectionmaterial_return()
@@ -463,8 +460,8 @@ class XKBGrammarParser(Parser):
root_0 = None
LCURLY8 = None
- RCURLY14 = None
- SEMICOLON15 = None
+ RCURLY13 = None
+ SEMICOLON14 = None
line_include9 = None
line_name10 = None
@@ -473,30 +470,28 @@ class XKBGrammarParser(Parser):
line_key12 = None
- line_comment13 = None
-
LCURLY8_tree = None
- RCURLY14_tree = None
- SEMICOLON15_tree = None
+ RCURLY13_tree = None
+ SEMICOLON14_tree = None
try:
try:
- # XKBGrammar.g:81:2: ( LCURLY ( line_include | line_name | line_keytype | line_key | line_comment )+ RCURLY SEMICOLON )
- # XKBGrammar.g:81:4: LCURLY ( line_include | line_name | line_keytype | line_key | line_comment )+ RCURLY SEMICOLON
+ # XKBGrammar.g:97:17: ( LCURLY ( line_include | line_name | line_keytype | line_key )+ RCURLY SEMICOLON )
+ # XKBGrammar.g:97:19: LCURLY ( line_include | line_name | line_keytype | line_key )+ RCURLY SEMICOLON
root_0 = self.adaptor.nil()
LCURLY8 = self.input.LT(1)
- self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_sectionmaterial438)
+ self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_sectionmaterial485)
LCURLY8_tree = self.adaptor.createWithPayload(LCURLY8)
self.adaptor.addChild(root_0, LCURLY8_tree)
- # XKBGrammar.g:81:11: ( line_include | line_name | line_keytype | line_key | line_comment )+
+ # XKBGrammar.g:97:26: ( line_include | line_name | line_keytype | line_key )+
cnt4 = 0
while True: #loop4
- alt4 = 6
+ alt4 = 5
LA4 = self.input.LA(1)
if LA4 == TOKEN_INCLUDE:
alt4 = 1
@@ -506,12 +501,10 @@ class XKBGrammarParser(Parser):
alt4 = 3
elif LA4 == TOKEN_KEY:
alt4 = 4
- elif LA4 == COMMENT:
- alt4 = 5
if alt4 == 1:
- # XKBGrammar.g:81:12: line_include
- self.following.append(self.FOLLOW_line_include_in_sectionmaterial441)
+ # XKBGrammar.g:97:27: line_include
+ self.following.append(self.FOLLOW_line_include_in_sectionmaterial488)
line_include9 = self.line_include()
self.following.pop()
@@ -519,8 +512,8 @@ class XKBGrammarParser(Parser):
elif alt4 == 2:
- # XKBGrammar.g:82:4: line_name
- self.following.append(self.FOLLOW_line_name_in_sectionmaterial447)
+ # XKBGrammar.g:98:5: line_name
+ self.following.append(self.FOLLOW_line_name_in_sectionmaterial495)
line_name10 = self.line_name()
self.following.pop()
@@ -528,8 +521,8 @@ class XKBGrammarParser(Parser):
elif alt4 == 3:
- # XKBGrammar.g:83:4: line_keytype
- self.following.append(self.FOLLOW_line_keytype_in_sectionmaterial453)
+ # XKBGrammar.g:99:5: line_keytype
+ self.following.append(self.FOLLOW_line_keytype_in_sectionmaterial502)
line_keytype11 = self.line_keytype()
self.following.pop()
@@ -537,23 +530,14 @@ class XKBGrammarParser(Parser):
elif alt4 == 4:
- # XKBGrammar.g:84:4: line_key
- self.following.append(self.FOLLOW_line_key_in_sectionmaterial459)
+ # XKBGrammar.g:100:5: line_key
+ self.following.append(self.FOLLOW_line_key_in_sectionmaterial509)
line_key12 = self.line_key()
self.following.pop()
self.adaptor.addChild(root_0, line_key12.tree)
- elif alt4 == 5:
- # XKBGrammar.g:86:4: line_comment
- self.following.append(self.FOLLOW_line_comment_in_sectionmaterial466)
- line_comment13 = self.line_comment()
- self.following.pop()
-
- self.adaptor.addChild(root_0, line_comment13.tree)
-
-
else:
if cnt4 >= 1:
break #loop4
@@ -564,19 +548,19 @@ class XKBGrammarParser(Parser):
cnt4 += 1
- RCURLY14 = self.input.LT(1)
- self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_sectionmaterial470)
+ RCURLY13 = self.input.LT(1)
+ self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_sectionmaterial517)
- RCURLY14_tree = self.adaptor.createWithPayload(RCURLY14)
- self.adaptor.addChild(root_0, RCURLY14_tree)
+ RCURLY13_tree = self.adaptor.createWithPayload(RCURLY13)
+ self.adaptor.addChild(root_0, RCURLY13_tree)
- SEMICOLON15 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_sectionmaterial472)
+ SEMICOLON14 = self.input.LT(1)
+ self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_sectionmaterial519)
- SEMICOLON15_tree = self.adaptor.createWithPayload(SEMICOLON15)
- self.adaptor.addChild(root_0, SEMICOLON15_tree)
+ SEMICOLON14_tree = self.adaptor.createWithPayload(SEMICOLON14)
+ self.adaptor.addChild(root_0, SEMICOLON14_tree)
@@ -598,63 +582,6 @@ class XKBGrammarParser(Parser):
# $ANTLR end sectionmaterial
- class line_comment_return(object):
- def __init__(self):
- self.start = None
- self.stop = None
-
- self.tree = None
-
-
- # $ANTLR start line_comment
- # XKBGrammar.g:89:1: line_comment : COMMENT ;
- def line_comment(self, ):
-
- retval = self.line_comment_return()
- retval.start = self.input.LT(1)
-
- root_0 = None
-
- COMMENT16 = None
-
- COMMENT16_tree = None
-
- try:
- try:
- # XKBGrammar.g:90:2: ( COMMENT )
- # XKBGrammar.g:90:4: COMMENT
- root_0 = self.adaptor.nil()
-
- COMMENT16 = self.input.LT(1)
- self.match(self.input, COMMENT, self.FOLLOW_COMMENT_in_line_comment483)
-
-
- COMMENT16_tree = self.adaptor.createWithPayload(COMMENT16)
- self.adaptor.addChild(root_0, COMMENT16_tree)
-
- #action start
- skip();
- #action end
-
-
-
- retval.stop = self.input.LT(-1)
-
-
- retval.tree = self.adaptor.rulePostProcessing(root_0)
- self.adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
-
- except RecognitionException, re:
- self.reportError(re)
- self.recover(self.input, re)
- finally:
-
- pass
-
- return retval
-
- # $ANTLR end line_comment
-
class line_include_return(object):
def __init__(self):
self.start = None
@@ -664,7 +591,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_include
- # XKBGrammar.g:92:1: line_include : TOKEN_INCLUDE include= quotedstring ;
+ # XKBGrammar.g:104:1: line_include : TOKEN_INCLUDE include= quotedstring ;
def line_include(self, ):
retval = self.line_include_return()
@@ -672,26 +599,26 @@ class XKBGrammarParser(Parser):
root_0 = None
- TOKEN_INCLUDE17 = None
+ TOKEN_INCLUDE15 = None
include = None
- TOKEN_INCLUDE17_tree = None
+ TOKEN_INCLUDE15_tree = None
try:
try:
- # XKBGrammar.g:94:2: ( TOKEN_INCLUDE include= quotedstring )
- # XKBGrammar.g:94:4: TOKEN_INCLUDE include= quotedstring
+ # XKBGrammar.g:105:2: ( TOKEN_INCLUDE include= quotedstring )
+ # XKBGrammar.g:105:4: TOKEN_INCLUDE include= quotedstring
root_0 = self.adaptor.nil()
- TOKEN_INCLUDE17 = self.input.LT(1)
- self.match(self.input, TOKEN_INCLUDE, self.FOLLOW_TOKEN_INCLUDE_in_line_include497)
+ TOKEN_INCLUDE15 = self.input.LT(1)
+ self.match(self.input, TOKEN_INCLUDE, self.FOLLOW_TOKEN_INCLUDE_in_line_include531)
- TOKEN_INCLUDE17_tree = self.adaptor.createWithPayload(TOKEN_INCLUDE17)
- self.adaptor.addChild(root_0, TOKEN_INCLUDE17_tree)
+ TOKEN_INCLUDE15_tree = self.adaptor.createWithPayload(TOKEN_INCLUDE15)
+ self.adaptor.addChild(root_0, TOKEN_INCLUDE15_tree)
- self.following.append(self.FOLLOW_quotedstring_in_line_include501)
+ self.following.append(self.FOLLOW_quotedstring_in_line_include535)
include = self.quotedstring()
self.following.pop()
@@ -728,7 +655,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_name
- # XKBGrammar.g:98:1: line_name : TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON ;
+ # XKBGrammar.g:109:1: line_name : TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON ;
def line_name(self, ):
retval = self.line_name_return()
@@ -737,73 +664,73 @@ class XKBGrammarParser(Parser):
root_0 = None
name = None
- TOKEN_NAME18 = None
- LBRACKET19 = None
- RBRACKET20 = None
- EQUAL21 = None
- SEMICOLON22 = None
+ TOKEN_NAME16 = None
+ LBRACKET17 = None
+ RBRACKET18 = None
+ EQUAL19 = None
+ SEMICOLON20 = None
nameval = None
name_tree = None
- TOKEN_NAME18_tree = None
- LBRACKET19_tree = None
- RBRACKET20_tree = None
- EQUAL21_tree = None
- SEMICOLON22_tree = None
+ TOKEN_NAME16_tree = None
+ LBRACKET17_tree = None
+ RBRACKET18_tree = None
+ EQUAL19_tree = None
+ SEMICOLON20_tree = None
try:
try:
- # XKBGrammar.g:99:2: ( TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON )
- # XKBGrammar.g:99:4: TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON
+ # XKBGrammar.g:110:2: ( TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON )
+ # XKBGrammar.g:110:4: TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON
root_0 = self.adaptor.nil()
- TOKEN_NAME18 = self.input.LT(1)
- self.match(self.input, TOKEN_NAME, self.FOLLOW_TOKEN_NAME_in_line_name516)
+ TOKEN_NAME16 = self.input.LT(1)
+ self.match(self.input, TOKEN_NAME, self.FOLLOW_TOKEN_NAME_in_line_name550)
- TOKEN_NAME18_tree = self.adaptor.createWithPayload(TOKEN_NAME18)
- self.adaptor.addChild(root_0, TOKEN_NAME18_tree)
+ TOKEN_NAME16_tree = self.adaptor.createWithPayload(TOKEN_NAME16)
+ self.adaptor.addChild(root_0, TOKEN_NAME16_tree)
- LBRACKET19 = self.input.LT(1)
- self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_line_name518)
+ LBRACKET17 = self.input.LT(1)
+ self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_line_name552)
- LBRACKET19_tree = self.adaptor.createWithPayload(LBRACKET19)
- self.adaptor.addChild(root_0, LBRACKET19_tree)
+ LBRACKET17_tree = self.adaptor.createWithPayload(LBRACKET17)
+ self.adaptor.addChild(root_0, LBRACKET17_tree)
name = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name522)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name556)
name_tree = self.adaptor.createWithPayload(name)
self.adaptor.addChild(root_0, name_tree)
- RBRACKET20 = self.input.LT(1)
- self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_line_name524)
+ RBRACKET18 = self.input.LT(1)
+ self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_line_name558)
- RBRACKET20_tree = self.adaptor.createWithPayload(RBRACKET20)
- self.adaptor.addChild(root_0, RBRACKET20_tree)
+ RBRACKET18_tree = self.adaptor.createWithPayload(RBRACKET18)
+ self.adaptor.addChild(root_0, RBRACKET18_tree)
- EQUAL21 = self.input.LT(1)
- self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_line_name526)
+ EQUAL19 = self.input.LT(1)
+ self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_line_name560)
- EQUAL21_tree = self.adaptor.createWithPayload(EQUAL21)
- self.adaptor.addChild(root_0, EQUAL21_tree)
+ EQUAL19_tree = self.adaptor.createWithPayload(EQUAL19)
+ self.adaptor.addChild(root_0, EQUAL19_tree)
- self.following.append(self.FOLLOW_quotedstring_in_line_name530)
+ self.following.append(self.FOLLOW_quotedstring_in_line_name564)
nameval = self.quotedstring()
self.following.pop()
self.adaptor.addChild(root_0, nameval.tree)
- SEMICOLON22 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_name532)
+ SEMICOLON20 = self.input.LT(1)
+ self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_name566)
- SEMICOLON22_tree = self.adaptor.createWithPayload(SEMICOLON22)
- self.adaptor.addChild(root_0, SEMICOLON22_tree)
+ SEMICOLON20_tree = self.adaptor.createWithPayload(SEMICOLON20)
+ self.adaptor.addChild(root_0, SEMICOLON20_tree)
#action start
print '\tname[%(name)s] = %(nameval)s;' % { "name": name.text, "nameval": self.input.toString(nameval.start,nameval.stop) }
@@ -837,7 +764,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_keytype
- # XKBGrammar.g:103:1: line_keytype : TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON ;
+ # XKBGrammar.g:114:1: line_keytype : TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON ;
def line_keytype(self, ):
retval = self.line_keytype_return()
@@ -847,92 +774,92 @@ class XKBGrammarParser(Parser):
keytype = None
keytypevalue = None
- TOKEN_KEY_TYPE23 = None
- LBRACKET24 = None
- RBRACKET25 = None
- EQUAL26 = None
- DQUOTE27 = None
- DQUOTE28 = None
- SEMICOLON29 = None
+ TOKEN_KEY_TYPE21 = None
+ LBRACKET22 = None
+ RBRACKET23 = None
+ EQUAL24 = None
+ DQUOTE25 = None
+ DQUOTE26 = None
+ SEMICOLON27 = None
keytype_tree = None
keytypevalue_tree = None
- TOKEN_KEY_TYPE23_tree = None
- LBRACKET24_tree = None
- RBRACKET25_tree = None
- EQUAL26_tree = None
- DQUOTE27_tree = None
- DQUOTE28_tree = None
- SEMICOLON29_tree = None
+ TOKEN_KEY_TYPE21_tree = None
+ LBRACKET22_tree = None
+ RBRACKET23_tree = None
+ EQUAL24_tree = None
+ DQUOTE25_tree = None
+ DQUOTE26_tree = None
+ SEMICOLON27_tree = None
try:
try:
- # XKBGrammar.g:104:2: ( TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON )
- # XKBGrammar.g:104:4: TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON
+ # XKBGrammar.g:115:2: ( TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON )
+ # XKBGrammar.g:115:4: TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON
root_0 = self.adaptor.nil()
- TOKEN_KEY_TYPE23 = self.input.LT(1)
- self.match(self.input, TOKEN_KEY_TYPE, self.FOLLOW_TOKEN_KEY_TYPE_in_line_keytype546)
+ TOKEN_KEY_TYPE21 = self.input.LT(1)
+ self.match(self.input, TOKEN_KEY_TYPE, self.FOLLOW_TOKEN_KEY_TYPE_in_line_keytype580)
- TOKEN_KEY_TYPE23_tree = self.adaptor.createWithPayload(TOKEN_KEY_TYPE23)
- self.adaptor.addChild(root_0, TOKEN_KEY_TYPE23_tree)
+ TOKEN_KEY_TYPE21_tree = self.adaptor.createWithPayload(TOKEN_KEY_TYPE21)
+ self.adaptor.addChild(root_0, TOKEN_KEY_TYPE21_tree)
- LBRACKET24 = self.input.LT(1)
- self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_line_keytype548)
+ LBRACKET22 = self.input.LT(1)
+ self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_line_keytype582)
- LBRACKET24_tree = self.adaptor.createWithPayload(LBRACKET24)
- self.adaptor.addChild(root_0, LBRACKET24_tree)
+ LBRACKET22_tree = self.adaptor.createWithPayload(LBRACKET22)
+ self.adaptor.addChild(root_0, LBRACKET22_tree)
keytype = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype552)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype586)
keytype_tree = self.adaptor.createWithPayload(keytype)
self.adaptor.addChild(root_0, keytype_tree)
- RBRACKET25 = self.input.LT(1)
- self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_line_keytype554)
+ RBRACKET23 = self.input.LT(1)
+ self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_line_keytype588)
- RBRACKET25_tree = self.adaptor.createWithPayload(RBRACKET25)
- self.adaptor.addChild(root_0, RBRACKET25_tree)
+ RBRACKET23_tree = self.adaptor.createWithPayload(RBRACKET23)
+ self.adaptor.addChild(root_0, RBRACKET23_tree)
- EQUAL26 = self.input.LT(1)
- self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_line_keytype556)
+ EQUAL24 = self.input.LT(1)
+ self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_line_keytype590)
- EQUAL26_tree = self.adaptor.createWithPayload(EQUAL26)
- self.adaptor.addChild(root_0, EQUAL26_tree)
+ EQUAL24_tree = self.adaptor.createWithPayload(EQUAL24)
+ self.adaptor.addChild(root_0, EQUAL24_tree)
- DQUOTE27 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_line_keytype558)
+ DQUOTE25 = self.input.LT(1)
+ self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_line_keytype592)
- DQUOTE27_tree = self.adaptor.createWithPayload(DQUOTE27)
- self.adaptor.addChild(root_0, DQUOTE27_tree)
+ DQUOTE25_tree = self.adaptor.createWithPayload(DQUOTE25)
+ self.adaptor.addChild(root_0, DQUOTE25_tree)
keytypevalue = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype562)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype596)
keytypevalue_tree = self.adaptor.createWithPayload(keytypevalue)
self.adaptor.addChild(root_0, keytypevalue_tree)
- DQUOTE28 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_line_keytype564)
+ DQUOTE26 = self.input.LT(1)
+ self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_line_keytype598)
- DQUOTE28_tree = self.adaptor.createWithPayload(DQUOTE28)
- self.adaptor.addChild(root_0, DQUOTE28_tree)
+ DQUOTE26_tree = self.adaptor.createWithPayload(DQUOTE26)
+ self.adaptor.addChild(root_0, DQUOTE26_tree)
- SEMICOLON29 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_keytype566)
+ SEMICOLON27 = self.input.LT(1)
+ self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_keytype600)
- SEMICOLON29_tree = self.adaptor.createWithPayload(SEMICOLON29)
- self.adaptor.addChild(root_0, SEMICOLON29_tree)
+ SEMICOLON27_tree = self.adaptor.createWithPayload(SEMICOLON27)
+ self.adaptor.addChild(root_0, SEMICOLON27_tree)
#action start
print '\tkey.type[%(kt)s] = \"%(ktv)s\";' % { "kt": keytype.text, "ktv": keytypevalue.text }
@@ -966,7 +893,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_key
- # XKBGrammar.g:113:1: line_key : TOKEN_KEY keycode keysyms SEMICOLON ;
+ # XKBGrammar.g:119:1: line_key : TOKEN_KEY keycode keysyms SEMICOLON ;
def line_key(self, ):
retval = self.line_key_return()
@@ -974,48 +901,48 @@ class XKBGrammarParser(Parser):
root_0 = None
- TOKEN_KEY30 = None
- SEMICOLON33 = None
- keycode31 = None
+ TOKEN_KEY28 = None
+ SEMICOLON31 = None
+ keycode29 = None
- keysyms32 = None
+ keysyms30 = None
- TOKEN_KEY30_tree = None
- SEMICOLON33_tree = None
+ TOKEN_KEY28_tree = None
+ SEMICOLON31_tree = None
try:
try:
- # XKBGrammar.g:114:2: ( TOKEN_KEY keycode keysyms SEMICOLON )
- # XKBGrammar.g:114:4: TOKEN_KEY keycode keysyms SEMICOLON
+ # XKBGrammar.g:120:2: ( TOKEN_KEY keycode keysyms SEMICOLON )
+ # XKBGrammar.g:120:4: TOKEN_KEY keycode keysyms SEMICOLON
root_0 = self.adaptor.nil()
- TOKEN_KEY30 = self.input.LT(1)
- self.match(self.input, TOKEN_KEY, self.FOLLOW_TOKEN_KEY_in_line_key586)
+ TOKEN_KEY28 = self.input.LT(1)
+ self.match(self.input, TOKEN_KEY, self.FOLLOW_TOKEN_KEY_in_line_key615)
- TOKEN_KEY30_tree = self.adaptor.createWithPayload(TOKEN_KEY30)
- self.adaptor.addChild(root_0, TOKEN_KEY30_tree)
+ TOKEN_KEY28_tree = self.adaptor.createWithPayload(TOKEN_KEY28)
+ self.adaptor.addChild(root_0, TOKEN_KEY28_tree)
- self.following.append(self.FOLLOW_keycode_in_line_key588)
- keycode31 = self.keycode()
+ self.following.append(self.FOLLOW_keycode_in_line_key617)
+ keycode29 = self.keycode()
self.following.pop()
- self.adaptor.addChild(root_0, keycode31.tree)
- self.following.append(self.FOLLOW_keysyms_in_line_key590)
- keysyms32 = self.keysyms()
+ self.adaptor.addChild(root_0, keycode29.tree)
+ self.following.append(self.FOLLOW_keysyms_in_line_key619)
+ keysyms30 = self.keysyms()
self.following.pop()
- self.adaptor.addChild(root_0, keysyms32.tree)
- SEMICOLON33 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_key592)
+ self.adaptor.addChild(root_0, keysyms30.tree)
+ SEMICOLON31 = self.input.LT(1)
+ self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_key621)
- SEMICOLON33_tree = self.adaptor.createWithPayload(SEMICOLON33)
- self.adaptor.addChild(root_0, SEMICOLON33_tree)
+ SEMICOLON31_tree = self.adaptor.createWithPayload(SEMICOLON31)
+ self.adaptor.addChild(root_0, SEMICOLON31_tree)
#action start
- print "\tkey %(keycode)s %(keysyms)s ;" % { "keycode": self.input.toString(keycode31.start,keycode31.stop), "keysyms": self.input.toString(keysyms32.start,keysyms32.stop) }
+ print "\tkey %(keycode)s %(keysyms)s ;" % { "keycode": self.input.toString(keycode29.start,keycode29.stop), "keysyms": self.input.toString(keysyms30.start,keysyms30.stop) }
#action end
@@ -1046,7 +973,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start keycode
- # XKBGrammar.g:118:1: keycode : LOWERTHAN NAME GREATERTHAN -> ^( INCLUDE NAME ) ;
+ # XKBGrammar.g:124:1: keycode : LOWERTHAN NAME GREATERTHAN -> ^( INCLUDE NAME ) ;
def keycode(self, ):
retval = self.keycode_return()
@@ -1054,33 +981,33 @@ class XKBGrammarParser(Parser):
root_0 = None
- LOWERTHAN34 = None
- NAME35 = None
- GREATERTHAN36 = None
+ LOWERTHAN32 = None
+ NAME33 = None
+ GREATERTHAN34 = None
- LOWERTHAN34_tree = None
- NAME35_tree = None
- GREATERTHAN36_tree = None
+ LOWERTHAN32_tree = None
+ NAME33_tree = None
+ GREATERTHAN34_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
stream_LOWERTHAN = RewriteRuleTokenStream(self.adaptor, "token LOWERTHAN")
stream_GREATERTHAN = RewriteRuleTokenStream(self.adaptor, "token GREATERTHAN")
try:
try:
- # XKBGrammar.g:119:2: ( LOWERTHAN NAME GREATERTHAN -> ^( INCLUDE NAME ) )
- # XKBGrammar.g:119:4: LOWERTHAN NAME GREATERTHAN
- LOWERTHAN34 = self.input.LT(1)
- self.match(self.input, LOWERTHAN, self.FOLLOW_LOWERTHAN_in_keycode608)
+ # XKBGrammar.g:125:2: ( LOWERTHAN NAME GREATERTHAN -> ^( INCLUDE NAME ) )
+ # XKBGrammar.g:125:4: LOWERTHAN NAME GREATERTHAN
+ LOWERTHAN32 = self.input.LT(1)
+ self.match(self.input, LOWERTHAN, self.FOLLOW_LOWERTHAN_in_keycode637)
- stream_LOWERTHAN.add(LOWERTHAN34)
- NAME35 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode610)
+ stream_LOWERTHAN.add(LOWERTHAN32)
+ NAME33 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode639)
- stream_NAME.add(NAME35)
- GREATERTHAN36 = self.input.LT(1)
- self.match(self.input, GREATERTHAN, self.FOLLOW_GREATERTHAN_in_keycode612)
+ stream_NAME.add(NAME33)
+ GREATERTHAN34 = self.input.LT(1)
+ self.match(self.input, GREATERTHAN, self.FOLLOW_GREATERTHAN_in_keycode641)
- stream_GREATERTHAN.add(GREATERTHAN36)
+ stream_GREATERTHAN.add(GREATERTHAN34)
# AST Rewrite
# elements: NAME
# token labels:
@@ -1097,8 +1024,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 120:2: -> ^( INCLUDE NAME )
- # XKBGrammar.g:120:5: ^( INCLUDE NAME )
+ # 126:2: -> ^( INCLUDE NAME )
+ # XKBGrammar.g:126:5: ^( INCLUDE NAME )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(INCLUDE, "INCLUDE"), root_1)
@@ -1137,7 +1064,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start keysyms
- # XKBGrammar.g:123:1: keysyms : LCURLY LBRACKET ( NAME | NAME_KEYSYM ) ( COMMA ( NAME | NAME_KEYSYM ) )* RBRACKET RCURLY ;
+ # XKBGrammar.g:129:1: keysyms : LCURLY LBRACKET NAME ( COMMA NAME )* RBRACKET RCURLY ;
def keysyms(self, ):
retval = self.keysyms_return()
@@ -1145,57 +1072,50 @@ class XKBGrammarParser(Parser):
root_0 = None
- LCURLY37 = None
- LBRACKET38 = None
- set39 = None
- COMMA40 = None
- set41 = None
- RBRACKET42 = None
- RCURLY43 = None
-
- LCURLY37_tree = None
- LBRACKET38_tree = None
- set39_tree = None
- COMMA40_tree = None
- set41_tree = None
- RBRACKET42_tree = None
- RCURLY43_tree = None
+ LCURLY35 = None
+ LBRACKET36 = None
+ NAME37 = None
+ COMMA38 = None
+ NAME39 = None
+ RBRACKET40 = None
+ RCURLY41 = None
+
+ LCURLY35_tree = None
+ LBRACKET36_tree = None
+ NAME37_tree = None
+ COMMA38_tree = None
+ NAME39_tree = None
+ RBRACKET40_tree = None
+ RCURLY41_tree = None
try:
try:
- # XKBGrammar.g:124:2: ( LCURLY LBRACKET ( NAME | NAME_KEYSYM ) ( COMMA ( NAME | NAME_KEYSYM ) )* RBRACKET RCURLY )
- # XKBGrammar.g:124:4: LCURLY LBRACKET ( NAME | NAME_KEYSYM ) ( COMMA ( NAME | NAME_KEYSYM ) )* RBRACKET RCURLY
+ # XKBGrammar.g:130:2: ( LCURLY LBRACKET NAME ( COMMA NAME )* RBRACKET RCURLY )
+ # XKBGrammar.g:130:4: LCURLY LBRACKET NAME ( COMMA NAME )* RBRACKET RCURLY
root_0 = self.adaptor.nil()
- LCURLY37 = self.input.LT(1)
- self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_keysyms634)
+ LCURLY35 = self.input.LT(1)
+ self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_keysyms663)
- LCURLY37_tree = self.adaptor.createWithPayload(LCURLY37)
- self.adaptor.addChild(root_0, LCURLY37_tree)
+ LCURLY35_tree = self.adaptor.createWithPayload(LCURLY35)
+ self.adaptor.addChild(root_0, LCURLY35_tree)
- LBRACKET38 = self.input.LT(1)
- self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_keysyms636)
+ LBRACKET36 = self.input.LT(1)
+ self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_keysyms665)
- LBRACKET38_tree = self.adaptor.createWithPayload(LBRACKET38)
- self.adaptor.addChild(root_0, LBRACKET38_tree)
+ LBRACKET36_tree = self.adaptor.createWithPayload(LBRACKET36)
+ self.adaptor.addChild(root_0, LBRACKET36_tree)
- set39 = self.input.LT(1)
- if self.input.LA(1) == NAME or self.input.LA(1) == NAME_KEYSYM:
- self.input.consume();
- self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set39))
- self.errorRecovery = False
+ NAME37 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms667)
- else:
- mse = MismatchedSetException(None, self.input)
- self.recoverFromMismatchedSet(
- self.input, mse, self.FOLLOW_set_in_keysyms638
- )
- raise mse
+ NAME37_tree = self.adaptor.createWithPayload(NAME37)
+ self.adaptor.addChild(root_0, NAME37_tree)
- # XKBGrammar.g:124:39: ( COMMA ( NAME | NAME_KEYSYM ) )*
+ # XKBGrammar.g:130:25: ( COMMA NAME )*
while True: #loop5
alt5 = 2
LA5_0 = self.input.LA(1)
@@ -1205,27 +1125,20 @@ class XKBGrammarParser(Parser):
if alt5 == 1:
- # XKBGrammar.g:124:40: COMMA ( NAME | NAME_KEYSYM )
- COMMA40 = self.input.LT(1)
- self.match(self.input, COMMA, self.FOLLOW_COMMA_in_keysyms645)
+ # XKBGrammar.g:130:26: COMMA NAME
+ COMMA38 = self.input.LT(1)
+ self.match(self.input, COMMA, self.FOLLOW_COMMA_in_keysyms670)
- COMMA40_tree = self.adaptor.createWithPayload(COMMA40)
- self.adaptor.addChild(root_0, COMMA40_tree)
+ COMMA38_tree = self.adaptor.createWithPayload(COMMA38)
+ self.adaptor.addChild(root_0, COMMA38_tree)
- set41 = self.input.LT(1)
- if self.input.LA(1) == NAME or self.input.LA(1) == NAME_KEYSYM:
- self.input.consume();
- self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set41))
- self.errorRecovery = False
+ NAME39 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms672)
- else:
- mse = MismatchedSetException(None, self.input)
- self.recoverFromMismatchedSet(
- self.input, mse, self.FOLLOW_set_in_keysyms647
- )
- raise mse
+ NAME39_tree = self.adaptor.createWithPayload(NAME39)
+ self.adaptor.addChild(root_0, NAME39_tree)
@@ -1233,19 +1146,19 @@ class XKBGrammarParser(Parser):
break #loop5
- RBRACKET42 = self.input.LT(1)
- self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_keysyms655)
+ RBRACKET40 = self.input.LT(1)
+ self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_keysyms676)
- RBRACKET42_tree = self.adaptor.createWithPayload(RBRACKET42)
- self.adaptor.addChild(root_0, RBRACKET42_tree)
+ RBRACKET40_tree = self.adaptor.createWithPayload(RBRACKET40)
+ self.adaptor.addChild(root_0, RBRACKET40_tree)
- RCURLY43 = self.input.LT(1)
- self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_keysyms657)
+ RCURLY41 = self.input.LT(1)
+ self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_keysyms678)
- RCURLY43_tree = self.adaptor.createWithPayload(RCURLY43)
- self.adaptor.addChild(root_0, RCURLY43_tree)
+ RCURLY41_tree = self.adaptor.createWithPayload(RCURLY41)
+ self.adaptor.addChild(root_0, RCURLY41_tree)
@@ -1276,7 +1189,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start attribute_xkb
- # XKBGrammar.g:131:1: attribute_xkb : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS -> ^( ATTRIBUTES ATTRIBUTE ) );
+ # XKBGrammar.g:137:1: attribute_xkb : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS -> ^( ATTRIBUTES ATTRIBUTE ) );
def attribute_xkb(self, ):
retval = self.attribute_xkb_return()
@@ -1284,24 +1197,24 @@ class XKBGrammarParser(Parser):
root_0 = None
- TOKEN_DEFAULT44 = None
- TOKEN_HIDDEN45 = None
- TOKEN_PARTIAL46 = None
- TOKEN_ALPHANUMERIC_KEYS47 = None
- TOKEN_ALTERNATE_GROUP48 = None
- TOKEN_XKB_SYMBOLS49 = None
-
- TOKEN_DEFAULT44_tree = None
- TOKEN_HIDDEN45_tree = None
- TOKEN_PARTIAL46_tree = None
- TOKEN_ALPHANUMERIC_KEYS47_tree = None
- TOKEN_ALTERNATE_GROUP48_tree = None
- TOKEN_XKB_SYMBOLS49_tree = None
+ TOKEN_DEFAULT42 = None
+ TOKEN_HIDDEN43 = None
+ TOKEN_PARTIAL44 = None
+ TOKEN_ALPHANUMERIC_KEYS45 = None
+ TOKEN_ALTERNATE_GROUP46 = None
+ TOKEN_XKB_SYMBOLS47 = None
+
+ TOKEN_DEFAULT42_tree = None
+ TOKEN_HIDDEN43_tree = None
+ TOKEN_PARTIAL44_tree = None
+ TOKEN_ALPHANUMERIC_KEYS45_tree = None
+ TOKEN_ALTERNATE_GROUP46_tree = None
+ TOKEN_XKB_SYMBOLS47_tree = None
stream_TOKEN_XKB_SYMBOLS = RewriteRuleTokenStream(self.adaptor, "token TOKEN_XKB_SYMBOLS")
try:
try:
- # XKBGrammar.g:132:2: ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS -> ^( ATTRIBUTES ATTRIBUTE ) )
+ # XKBGrammar.g:138:2: ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS -> ^( ATTRIBUTES ATTRIBUTE ) )
alt6 = 6
LA6 = self.input.LA(1)
if LA6 == TOKEN_DEFAULT:
@@ -1317,20 +1230,20 @@ class XKBGrammarParser(Parser):
elif LA6 == TOKEN_XKB_SYMBOLS:
alt6 = 6
else:
- nvae = NoViableAltException("131:1: attribute_xkb : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS -> ^( ATTRIBUTES ATTRIBUTE ) );", 6, 0, self.input)
+ nvae = NoViableAltException("137:1: attribute_xkb : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS -> ^( ATTRIBUTES ATTRIBUTE ) );", 6, 0, self.input)
raise nvae
if alt6 == 1:
- # XKBGrammar.g:132:4: TOKEN_DEFAULT
+ # XKBGrammar.g:138:4: TOKEN_DEFAULT
root_0 = self.adaptor.nil()
- TOKEN_DEFAULT44 = self.input.LT(1)
- self.match(self.input, TOKEN_DEFAULT, self.FOLLOW_TOKEN_DEFAULT_in_attribute_xkb674)
+ TOKEN_DEFAULT42 = self.input.LT(1)
+ self.match(self.input, TOKEN_DEFAULT, self.FOLLOW_TOKEN_DEFAULT_in_attribute_xkb695)
- TOKEN_DEFAULT44_tree = self.adaptor.createWithPayload(TOKEN_DEFAULT44)
- self.adaptor.addChild(root_0, TOKEN_DEFAULT44_tree)
+ TOKEN_DEFAULT42_tree = self.adaptor.createWithPayload(TOKEN_DEFAULT42)
+ self.adaptor.addChild(root_0, TOKEN_DEFAULT42_tree)
#action start
print "default",
@@ -1338,15 +1251,15 @@ class XKBGrammarParser(Parser):
elif alt6 == 2:
- # XKBGrammar.g:133:4: TOKEN_HIDDEN
+ # XKBGrammar.g:139:4: TOKEN_HIDDEN
root_0 = self.adaptor.nil()
- TOKEN_HIDDEN45 = self.input.LT(1)
- self.match(self.input, TOKEN_HIDDEN, self.FOLLOW_TOKEN_HIDDEN_in_attribute_xkb682)
+ TOKEN_HIDDEN43 = self.input.LT(1)
+ self.match(self.input, TOKEN_HIDDEN, self.FOLLOW_TOKEN_HIDDEN_in_attribute_xkb703)
- TOKEN_HIDDEN45_tree = self.adaptor.createWithPayload(TOKEN_HIDDEN45)
- self.adaptor.addChild(root_0, TOKEN_HIDDEN45_tree)
+ TOKEN_HIDDEN43_tree = self.adaptor.createWithPayload(TOKEN_HIDDEN43)
+ self.adaptor.addChild(root_0, TOKEN_HIDDEN43_tree)
#action start
print "hidden",
@@ -1354,15 +1267,15 @@ class XKBGrammarParser(Parser):
elif alt6 == 3:
- # XKBGrammar.g:134:4: TOKEN_PARTIAL
+ # XKBGrammar.g:140:4: TOKEN_PARTIAL
root_0 = self.adaptor.nil()
- TOKEN_PARTIAL46 = self.input.LT(1)
- self.match(self.input, TOKEN_PARTIAL, self.FOLLOW_TOKEN_PARTIAL_in_attribute_xkb691)
+ TOKEN_PARTIAL44 = self.input.LT(1)
+ self.match(self.input, TOKEN_PARTIAL, self.FOLLOW_TOKEN_PARTIAL_in_attribute_xkb712)
- TOKEN_PARTIAL46_tree = self.adaptor.createWithPayload(TOKEN_PARTIAL46)
- self.adaptor.addChild(root_0, TOKEN_PARTIAL46_tree)
+ TOKEN_PARTIAL44_tree = self.adaptor.createWithPayload(TOKEN_PARTIAL44)
+ self.adaptor.addChild(root_0, TOKEN_PARTIAL44_tree)
#action start
print "partial",
@@ -1370,15 +1283,15 @@ class XKBGrammarParser(Parser):
elif alt6 == 4:
- # XKBGrammar.g:135:4: TOKEN_ALPHANUMERIC_KEYS
+ # XKBGrammar.g:141:4: TOKEN_ALPHANUMERIC_KEYS
root_0 = self.adaptor.nil()
- TOKEN_ALPHANUMERIC_KEYS47 = self.input.LT(1)
- self.match(self.input, TOKEN_ALPHANUMERIC_KEYS, self.FOLLOW_TOKEN_ALPHANUMERIC_KEYS_in_attribute_xkb700)
+ TOKEN_ALPHANUMERIC_KEYS45 = self.input.LT(1)
+ self.match(self.input, TOKEN_ALPHANUMERIC_KEYS, self.FOLLOW_TOKEN_ALPHANUMERIC_KEYS_in_attribute_xkb721)
- TOKEN_ALPHANUMERIC_KEYS47_tree = self.adaptor.createWithPayload(TOKEN_ALPHANUMERIC_KEYS47)
- self.adaptor.addChild(root_0, TOKEN_ALPHANUMERIC_KEYS47_tree)
+ TOKEN_ALPHANUMERIC_KEYS45_tree = self.adaptor.createWithPayload(TOKEN_ALPHANUMERIC_KEYS45)
+ self.adaptor.addChild(root_0, TOKEN_ALPHANUMERIC_KEYS45_tree)
#action start
print "alphanumeric_keys",
@@ -1386,15 +1299,15 @@ class XKBGrammarParser(Parser):
elif alt6 == 5:
- # XKBGrammar.g:137:4: TOKEN_ALTERNATE_GROUP
+ # XKBGrammar.g:142:4: TOKEN_ALTERNATE_GROUP
root_0 = self.adaptor.nil()
- TOKEN_ALTERNATE_GROUP48 = self.input.LT(1)
- self.match(self.input, TOKEN_ALTERNATE_GROUP, self.FOLLOW_TOKEN_ALTERNATE_GROUP_in_attribute_xkb710)
+ TOKEN_ALTERNATE_GROUP46 = self.input.LT(1)
+ self.match(self.input, TOKEN_ALTERNATE_GROUP, self.FOLLOW_TOKEN_ALTERNATE_GROUP_in_attribute_xkb730)
- TOKEN_ALTERNATE_GROUP48_tree = self.adaptor.createWithPayload(TOKEN_ALTERNATE_GROUP48)
- self.adaptor.addChild(root_0, TOKEN_ALTERNATE_GROUP48_tree)
+ TOKEN_ALTERNATE_GROUP46_tree = self.adaptor.createWithPayload(TOKEN_ALTERNATE_GROUP46)
+ self.adaptor.addChild(root_0, TOKEN_ALTERNATE_GROUP46_tree)
#action start
print "alternate_group",
@@ -1402,11 +1315,11 @@ class XKBGrammarParser(Parser):
elif alt6 == 6:
- # XKBGrammar.g:138:4: TOKEN_XKB_SYMBOLS
- TOKEN_XKB_SYMBOLS49 = self.input.LT(1)
- self.match(self.input, TOKEN_XKB_SYMBOLS, self.FOLLOW_TOKEN_XKB_SYMBOLS_in_attribute_xkb717)
+ # XKBGrammar.g:143:4: TOKEN_XKB_SYMBOLS
+ TOKEN_XKB_SYMBOLS47 = self.input.LT(1)
+ self.match(self.input, TOKEN_XKB_SYMBOLS, self.FOLLOW_TOKEN_XKB_SYMBOLS_in_attribute_xkb737)
- stream_TOKEN_XKB_SYMBOLS.add(TOKEN_XKB_SYMBOLS49)
+ stream_TOKEN_XKB_SYMBOLS.add(TOKEN_XKB_SYMBOLS47)
#action start
print "xkb_symbols",
#action end
@@ -1426,8 +1339,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 139:2: -> ^( ATTRIBUTES ATTRIBUTE )
- # XKBGrammar.g:139:5: ^( ATTRIBUTES ATTRIBUTE )
+ # 144:2: -> ^( ATTRIBUTES ATTRIBUTE )
+ # XKBGrammar.g:144:5: ^( ATTRIBUTES ATTRIBUTE )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(ATTRIBUTES, "ATTRIBUTES"), root_1)
@@ -1459,60 +1372,58 @@ class XKBGrammarParser(Parser):
- FOLLOW_section_in_layout349 = frozenset([4, 5, 6, 7, 9, 10])
- FOLLOW_EOF_in_layout352 = frozenset([1])
- FOLLOW_preamble_in_section368 = frozenset([18])
- FOLLOW_sectionmaterial_in_section370 = frozenset([1])
- FOLLOW_attribute_xkb_in_preamble391 = frozenset([4, 5, 6, 7, 9, 10, 21])
- FOLLOW_quotedstring_in_preamble396 = frozenset([1])
- FOLLOW_DQUOTE_in_quotedstring414 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42])
- FOLLOW_set_in_quotedstring418 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42])
- FOLLOW_DQUOTE_in_quotedstring424 = frozenset([1])
- FOLLOW_LCURLY_in_sectionmaterial438 = frozenset([11, 12, 13, 14, 37])
- FOLLOW_line_include_in_sectionmaterial441 = frozenset([11, 12, 13, 14, 19, 37])
- FOLLOW_line_name_in_sectionmaterial447 = frozenset([11, 12, 13, 14, 19, 37])
- FOLLOW_line_keytype_in_sectionmaterial453 = frozenset([11, 12, 13, 14, 19, 37])
- FOLLOW_line_key_in_sectionmaterial459 = frozenset([11, 12, 13, 14, 19, 37])
- FOLLOW_line_comment_in_sectionmaterial466 = frozenset([11, 12, 13, 14, 19, 37])
- FOLLOW_RCURLY_in_sectionmaterial470 = frozenset([24])
- FOLLOW_SEMICOLON_in_sectionmaterial472 = frozenset([1])
- FOLLOW_COMMENT_in_line_comment483 = frozenset([1])
- FOLLOW_TOKEN_INCLUDE_in_line_include497 = frozenset([21])
- FOLLOW_quotedstring_in_line_include501 = frozenset([1])
- FOLLOW_TOKEN_NAME_in_line_name516 = frozenset([16])
- FOLLOW_LBRACKET_in_line_name518 = frozenset([32])
- FOLLOW_NAME_in_line_name522 = frozenset([17])
- FOLLOW_RBRACKET_in_line_name524 = frozenset([25])
- FOLLOW_EQUAL_in_line_name526 = frozenset([21])
- FOLLOW_quotedstring_in_line_name530 = frozenset([24])
- FOLLOW_SEMICOLON_in_line_name532 = frozenset([1])
- FOLLOW_TOKEN_KEY_TYPE_in_line_keytype546 = frozenset([16])
- FOLLOW_LBRACKET_in_line_keytype548 = frozenset([32])
- FOLLOW_NAME_in_line_keytype552 = frozenset([17])
- FOLLOW_RBRACKET_in_line_keytype554 = frozenset([25])
- FOLLOW_EQUAL_in_line_keytype556 = frozenset([21])
- FOLLOW_DQUOTE_in_line_keytype558 = frozenset([32])
- FOLLOW_NAME_in_line_keytype562 = frozenset([21])
- FOLLOW_DQUOTE_in_line_keytype564 = frozenset([24])
- FOLLOW_SEMICOLON_in_line_keytype566 = frozenset([1])
- FOLLOW_TOKEN_KEY_in_line_key586 = frozenset([26])
- FOLLOW_keycode_in_line_key588 = frozenset([18])
- FOLLOW_keysyms_in_line_key590 = frozenset([24])
- FOLLOW_SEMICOLON_in_line_key592 = frozenset([1])
- FOLLOW_LOWERTHAN_in_keycode608 = frozenset([32])
- FOLLOW_NAME_in_keycode610 = frozenset([27])
- FOLLOW_GREATERTHAN_in_keycode612 = frozenset([1])
- FOLLOW_LCURLY_in_keysyms634 = frozenset([16])
- FOLLOW_LBRACKET_in_keysyms636 = frozenset([32, 38])
- FOLLOW_set_in_keysyms638 = frozenset([17, 20])
- FOLLOW_COMMA_in_keysyms645 = frozenset([32, 38])
- FOLLOW_set_in_keysyms647 = frozenset([17, 20])
- FOLLOW_RBRACKET_in_keysyms655 = frozenset([19])
- FOLLOW_RCURLY_in_keysyms657 = frozenset([1])
- FOLLOW_TOKEN_DEFAULT_in_attribute_xkb674 = frozenset([1])
- FOLLOW_TOKEN_HIDDEN_in_attribute_xkb682 = frozenset([1])
- FOLLOW_TOKEN_PARTIAL_in_attribute_xkb691 = frozenset([1])
- FOLLOW_TOKEN_ALPHANUMERIC_KEYS_in_attribute_xkb700 = frozenset([1])
- FOLLOW_TOKEN_ALTERNATE_GROUP_in_attribute_xkb710 = frozenset([1])
- FOLLOW_TOKEN_XKB_SYMBOLS_in_attribute_xkb717 = frozenset([1])
+ FOLLOW_section_in_layout359 = frozenset([4, 5, 6, 7, 9, 10])
+ FOLLOW_EOF_in_layout362 = frozenset([1])
+ FOLLOW_preamble_in_section377 = frozenset([17])
+ FOLLOW_sectionmaterial_in_section379 = frozenset([1])
+ FOLLOW_attribute_xkb_in_preamble405 = frozenset([4, 5, 6, 7, 9, 10, 20])
+ FOLLOW_quotedstring_in_preamble410 = frozenset([1])
+ FOLLOW_DQUOTE_in_quotedstring438 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
+ FOLLOW_set_in_quotedstring442 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
+ FOLLOW_DQUOTE_in_quotedstring448 = frozenset([1])
+ FOLLOW_LCURLY_in_sectionmaterial485 = frozenset([11, 12, 13, 14])
+ FOLLOW_line_include_in_sectionmaterial488 = frozenset([11, 12, 13, 14, 18])
+ FOLLOW_line_name_in_sectionmaterial495 = frozenset([11, 12, 13, 14, 18])
+ FOLLOW_line_keytype_in_sectionmaterial502 = frozenset([11, 12, 13, 14, 18])
+ FOLLOW_line_key_in_sectionmaterial509 = frozenset([11, 12, 13, 14, 18])
+ FOLLOW_RCURLY_in_sectionmaterial517 = frozenset([23])
+ FOLLOW_SEMICOLON_in_sectionmaterial519 = frozenset([1])
+ FOLLOW_TOKEN_INCLUDE_in_line_include531 = frozenset([20])
+ FOLLOW_quotedstring_in_line_include535 = frozenset([1])
+ FOLLOW_TOKEN_NAME_in_line_name550 = frozenset([15])
+ FOLLOW_LBRACKET_in_line_name552 = frozenset([35])
+ FOLLOW_NAME_in_line_name556 = frozenset([16])
+ FOLLOW_RBRACKET_in_line_name558 = frozenset([24])
+ FOLLOW_EQUAL_in_line_name560 = frozenset([20])
+ FOLLOW_quotedstring_in_line_name564 = frozenset([23])
+ FOLLOW_SEMICOLON_in_line_name566 = frozenset([1])
+ FOLLOW_TOKEN_KEY_TYPE_in_line_keytype580 = frozenset([15])
+ FOLLOW_LBRACKET_in_line_keytype582 = frozenset([35])
+ FOLLOW_NAME_in_line_keytype586 = frozenset([16])
+ FOLLOW_RBRACKET_in_line_keytype588 = frozenset([24])
+ FOLLOW_EQUAL_in_line_keytype590 = frozenset([20])
+ FOLLOW_DQUOTE_in_line_keytype592 = frozenset([35])
+ FOLLOW_NAME_in_line_keytype596 = frozenset([20])
+ FOLLOW_DQUOTE_in_line_keytype598 = frozenset([23])
+ FOLLOW_SEMICOLON_in_line_keytype600 = frozenset([1])
+ FOLLOW_TOKEN_KEY_in_line_key615 = frozenset([25])
+ FOLLOW_keycode_in_line_key617 = frozenset([17])
+ FOLLOW_keysyms_in_line_key619 = frozenset([23])
+ FOLLOW_SEMICOLON_in_line_key621 = frozenset([1])
+ FOLLOW_LOWERTHAN_in_keycode637 = frozenset([35])
+ FOLLOW_NAME_in_keycode639 = frozenset([26])
+ FOLLOW_GREATERTHAN_in_keycode641 = frozenset([1])
+ FOLLOW_LCURLY_in_keysyms663 = frozenset([15])
+ FOLLOW_LBRACKET_in_keysyms665 = frozenset([35])
+ FOLLOW_NAME_in_keysyms667 = frozenset([16, 19])
+ FOLLOW_COMMA_in_keysyms670 = frozenset([35])
+ FOLLOW_NAME_in_keysyms672 = frozenset([16, 19])
+ FOLLOW_RBRACKET_in_keysyms676 = frozenset([18])
+ FOLLOW_RCURLY_in_keysyms678 = frozenset([1])
+ FOLLOW_TOKEN_DEFAULT_in_attribute_xkb695 = frozenset([1])
+ FOLLOW_TOKEN_HIDDEN_in_attribute_xkb703 = frozenset([1])
+ FOLLOW_TOKEN_PARTIAL_in_attribute_xkb712 = frozenset([1])
+ FOLLOW_TOKEN_ALPHANUMERIC_KEYS_in_attribute_xkb721 = frozenset([1])
+ FOLLOW_TOKEN_ALTERNATE_GROUP_in_attribute_xkb730 = frozenset([1])
+ FOLLOW_TOKEN_XKB_SYMBOLS_in_attribute_xkb737 = frozenset([1])
diff --git a/XKBGrammar/XKBGrammar__.g b/XKBGrammar/XKBGrammar__.g
@@ -15,7 +15,6 @@ TOKEN_INCLUDE : 'include' ;
TOKEN_KEY_TYPE : 'key.type' ;
TOKEN_NAME : 'name' ;
TOKEN_KEY : 'key' ;
-TOKEN_MODIFIER_MAP : 'modifier_map' ;
LBRACKET : '[' ;
RBRACKET : ']' ;
LCURLY : '{' ;
@@ -30,37 +29,28 @@ LOWERTHAN : '<' ;
GREATERTHAN : '>' ;
DOT : '.' ;
-// $ANTLR src "XKBGrammar.g" 154
-NAME
- : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
- ;
-
-// $ANTLR src "XKBGrammar.g" 158
-NAME_INCLUDE
- : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'('|')'|'0'..'9')*
+// $ANTLR src "XKBGrammar.g" 147
+fragment GENERIC_NAME
+ : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'0'..'9')
;
-// $ANTLR src "XKBGrammar.g" 162
-NAME_KEYSYM
- : ('0'..'9'|'a'..'z'|'A'..'Z')('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
- ;
-
-// $ANTLR src "XKBGrammar.g" 166
-NAME_GROUP
- : ('0'..'9'|'a'..'z'|'A'..'Z')('a'..'z'|'A'..'Z'|'_'|'-'|'.'|'0'..'9')*
+// $ANTLR src "XKBGrammar.g" 151
+NAME
+ : ('a'..'z'|'A'..'Z'|'_'|'('|')'|'0'..'9')*
;
-// $ANTLR src "XKBGrammar.g" 170
+// Comments are currently ignored.
+// $ANTLR src "XKBGrammar.g" 156
COMMENT : '//' (~('\n'|'\r'))*
{ $channel = HIDDEN; }
;
-// $ANTLR src "XKBGrammar.g" 174
+// $ANTLR src "XKBGrammar.g" 160
WS : ('\t'|' '|NEWLINE)+
{ $channel=HIDDEN; }
;
-// $ANTLR src "XKBGrammar.g" 178
+// $ANTLR src "XKBGrammar.g" 164
fragment NEWLINE
: '\r'|'\n'
;
diff --git a/XKBGrammar/gr b/XKBGrammar/gr
@@ -2,6 +2,7 @@
partial alphanumeric_keys alternate_group
xkb_symbols "extended" {
// my comment
+ // more of these comments
include "gr(basic)"
name[Group1] = "Greece - Extended";
key.type[Group1] = "THREE_LEVEL"; // yeah, comment