commit a2e15702a3d8f3eaf6e6530240fcabe6ceacb951
parent 1fc73cd2002c4dc092a5513aa9f8dd524092b9f9
Author: simos.lists <simos.lists@70737e48-4f4a-0410-8df8-290828ad50c4>
Date: Wed, 14 May 2008 21:04:49 +0000
Initial tree generation
git-svn-id: http://keyboardlayouteditor.googlecode.com/svn/trunk@20 70737e48-4f4a-0410-8df8-290828ad50c4
Diffstat:
10 files changed, 1296 insertions(+), 1732 deletions(-)
diff --git a/XKBGrammar/XKBGrammar.g b/XKBGrammar/XKBGrammar.g
@@ -13,40 +13,23 @@ options
tokens
{
// Map options
- TOKEN_DEFAULT = 'default';
- TOKEN_HIDDEN = 'hidden';
- TOKEN_PARTIAL = 'partial';
- TOKEN_ALPHANUMERIC_KEYS = 'alphanumeric_keys';
- TOKEN_MODIFIER_KEYS = 'modifier_keys';
- TOKEN_ALTERNATE_GROUP = 'alternate_group';
- TOKEN_XKB_SYMBOLS = 'xkb_symbols';
+ TOKEN_DEFAULT;
+ TOKEN_HIDDEN;
+ TOKEN_PARTIAL;
+ TOKEN_ALPHANUMERIC_KEYS;
+ TOKEN_MODIFIER_KEYS;
+ TOKEN_ALTERNATE_GROUP;
+ TOKEN_XKB_SYMBOLS;
// Keywords [TODO: check terminology]
- TOKEN_INCLUDE = 'include';
- TOKEN_KEY_TYPE = 'key.type';
- TOKEN_NAME = 'name';
- TOKEN_KEY = 'key';
-
- // Punctuators
- LBRACKET = '[';
- RBRACKET = ']';
- LCURLY = '{';
- RCURLY = '}';
- COMMA = ',';
- DQUOTE = '"';
- MINUS = '-';
- PLUS = '+';
- SEMICOLON = ';';
- EQUAL = '=';
- LOWERTHAN = '<';
- GREATERTHAN = '>';
- DOT = '.';
- // HYPHEN = '-';
- // SPACE = ' ';
- // UNDERSCORE = '_';
-
+ TOKEN_INCLUDE;
+ TOKEN_KEY_TYPE;
+ TOKEN_NAME;
+ TOKEN_KEY;
+
// Tokens for tree.
MAPTYPE;
+ MAPMATERIAL;
ATTRIBUTES;
ATTRIBUTE;
INCLUDE;
@@ -55,6 +38,8 @@ tokens
KEYCODE;
SECTION;
SECTIONNAME;
+ QUOTEDSTRING;
+ KEYSYMS;
}
// We cover XKB symbol files that look like
@@ -74,112 +59,76 @@ tokens
// // can have several sections as above.
layout
- : section+
+ : section+ EOF!
;
section
- : mapType sectionmaterial
- { print '}' }
+ : mapType mapMaterial
+ -> ^(SECTION mapType mapMaterial)
;
mapType
- : mapOptions+ sectionname=quotedstring
- { print '\%(sectionname)s {' \% { "sectionname": $sectionname.text } }
- -> ^(MAPTYPE mapOptions+ $sectionname)
+ : mapOptions+ '"' NAME '"'
+ -> ^(MAPTYPE mapOptions+ NAME)
;
-quotedstring returns [value]
- : DQUOTE sectionname+=~(DQUOTE)+ DQUOTE
- {
- qstring = ['"']
- for elem in $sectionname:
- qstring.append(elem.getText())
- qstring.append('"')
- $value = "".join(qstring)
- }
- ;
-
-sectionmaterial
- : lc=LCURLY (line_include
- | line_name
- | line_keytype
- | line_key
- )+ RCURLY SEMICOLON
- -> ^(SECTION)
+mapMaterial
+ : '{'
+ ( line_include
+ | line_name ';'!
+ | line_keytype ';'!
+ | line_key ';'!
+ )+ '}' ';'
;
line_include
- : TOKEN_INCLUDE include=quotedstring
- { print '\tinclude \%(inc)s' \% { "inc": $include.text } }
- -> ^(TOKEN_INCLUDE $include)
+ : 'include' '"' NAME '"'
+ -> ^(TOKEN_INCLUDE NAME)
;
line_name
- : TOKEN_NAME LBRACKET name=NAME RBRACKET EQUAL nameval=quotedstring SEMICOLON
- { print '\tname[\%(name)s] = \%(nameval)s;' \% { "name": $name.text, "nameval": $nameval.text } }
- -> ^(TOKEN_NAME $name $nameval)
+ : 'name' '[' n1=NAME ']' '=' '"' n2=NAME '"'
+ -> ^(TOKEN_NAME $n1 $n2)
;
line_keytype
- : TOKEN_KEY_TYPE LBRACKET keytype=NAME RBRACKET EQUAL DQUOTE keytypevalue=NAME DQUOTE SEMICOLON
- { print '\tkey.type[\%(kt)s] = \"\%(ktv)s\";' \% { "kt": $keytype.text, "ktv": $keytypevalue.text } }
- -> ^(TOKEN_KEY_TYPE $keytype $keytypevalue)
+ : 'key.type' '[' n1=NAME ']' '=' '"' n2=NAME '"'
+ -> ^(TOKEN_KEY_TYPE $n1 $n2)
;
line_key
- : TOKEN_KEY keycode keysyms SEMICOLON
- { print '\tkey \%(keycode)s \%(keysyms)s;' \% { "keycode": $keycode.text, "keysyms": $keysyms.value } }
+ : 'key' keycode keysyms
-> ^(TOKEN_KEY keycode keysyms)
;
keycode
- : LOWERTHAN NAME GREATERTHAN
+ : '<' NAME '>'
-> ^(KEYCODE NAME)
;
-keysyms returns [value]
- : LCURLY LBRACKET keysym+=NAME (COMMA keysym+=NAME)* RBRACKET RCURLY
- {
- qstring = ["{ [ "]
- first_elem = $keysym[0].getText()
- qstring.append(first_elem)
- for elem in $keysym:
- if first_elem != "":
- first_elem = ""
- continue
- qstring.append(", ")
- qstring.append(elem.getText())
- qstring.append(" ] }")
- $value = "".join(qstring)
- }
+keysyms
+ : '{' '[' keysym+=NAME (',' keysym+=NAME)* ']' '}'
+ -> ^(KEYSYMS $keysym+)
;
-// mapsyms
-// : LCURLY LBRACKET (NAME|keycode) (COMMA (NAME|keycode))* RBRACKET RCURLY
-// ;
-
mapOptions
- : TOKEN_DEFAULT { print "default", }
- | TOKEN_HIDDEN { print "hidden", }
- | TOKEN_PARTIAL { print "partial", }
- | TOKEN_ALPHANUMERIC_KEYS { print "alphanumeric_keys", }
- | TOKEN_ALTERNATE_GROUP { print "alternate_group", }
- | TOKEN_XKB_SYMBOLS { print "xkb_symbols", }
+ : 'default'
+ | 'hidden'
+ | 'partial'
+ | 'alphanumeric_keys'
+ | 'alternate_group'
+ | 'xkb_symbols'
;
-fragment GENERIC_NAME
- : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'0'..'9')
- ;
-
NAME
- : ('a'..'z'|'A'..'Z'|'_'|'('|')'|'0'..'9')*
+ : ('a'..'z' | 'A'..'Z' | '_' | '-' | '(' | ')' | '0'..'9')*
;
// Comments are currently ignored.
WS
:
- (' '|'\r'|'\t'|'\u000C'|'\n')
- {$channel=HIDDEN;}
+ ( ' ' | '\r' | '\t' | '\u000C' | '\n')
+ { $channel=HIDDEN; }
;
COMMENT
@@ -189,7 +138,7 @@ COMMENT
LINE_COMMENT
:
- '//' ~('\n'|'\r')* '\r'? '\n'
- {$channel=HIDDEN;}
+ '//' ~('\n' | '\r')* '\r'? '\n'
+ { $channel=HIDDEN; }
;
diff --git a/XKBGrammar/XKBGrammar.tokens b/XKBGrammar/XKBGrammar.tokens
@@ -1,62 +1,47 @@
+QUOTEDSTRING=25
TOKEN_ALTERNATE_GROUP=9
-ATTRIBUTES=29
-SECTION=35
-LINE_COMMENT=41
-KEYCODE=34
+ATTRIBUTES=17
+SECTION=23
+LINE_COMMENT=30
+KEYCODE=22
TOKEN_INCLUDE=11
-KEY=32
-KEYTYPE=33
-ATTRIBUTE=30
+KEY=20
+KEYTYPE=21
+ATTRIBUTE=18
TOKEN_NAME=13
-DQUOTE=20
-LCURLY=17
-SEMICOLON=23
-MINUS=21
TOKEN_XKB_SYMBOLS=10
-GENERIC_NAME=38
-SECTIONNAME=36
-MAPTYPE=28
-LBRACKET=15
-NAME=37
+SECTIONNAME=24
+MAPTYPE=15
+NAME=27
TOKEN_PARTIAL=6
-WS=39
+WS=28
TOKEN_ALPHANUMERIC_KEYS=7
TOKEN_HIDDEN=5
-COMMA=19
-LOWERTHAN=25
-INCLUDE=31
-EQUAL=24
-RCURLY=18
+MAPMATERIAL=16
+INCLUDE=19
TOKEN_MODIFIER_KEYS=8
-PLUS=22
+KEYSYMS=26
TOKEN_KEY=14
-RBRACKET=16
-COMMENT=40
-DOT=27
+COMMENT=29
TOKEN_DEFAULT=4
TOKEN_KEY_TYPE=12
-GREATERTHAN=26
-'alphanumeric_keys'=7
-'"'=20
-'}'=18
-'alternate_group'=9
-'key'=14
-'partial'=6
-'>'=26
-'{'=17
-'include'=11
-'hidden'=5
-'modifier_keys'=8
-';'=23
-'='=24
-'<'=25
-'key.type'=12
-'xkb_symbols'=10
-'-'=21
-'['=15
-'+'=22
-'name'=13
-','=19
-'.'=27
-'default'=4
-']'=16
+'alphanumeric_keys'=48
+'"'=31
+'}'=34
+'alternate_group'=49
+'key'=41
+'partial'=47
+'{'=32
+'>'=43
+'include'=35
+'hidden'=46
+';'=33
+'='=39
+'key.type'=40
+'<'=42
+'xkb_symbols'=50
+'['=37
+'name'=36
+','=44
+'default'=45
+']'=38
diff --git a/XKBGrammar/XKBGrammarLexer.py b/XKBGrammar/XKBGrammarLexer.py
@@ -1,4 +1,4 @@
-# $ANTLR 3.0.1 XKBGrammar.g 2008-05-09 21:53:06
+# $ANTLR 3.0.1 XKBGrammar.g 2008-05-14 21:55:39
from antlr3 import *
from antlr3.compat import set, frozenset
@@ -8,46 +8,55 @@ from antlr3.compat import set, frozenset
HIDDEN = BaseRecognizer.HIDDEN
# token types
-TOKEN_ALTERNATE_GROUP=9
-ATTRIBUTES=29
-SECTION=35
-LINE_COMMENT=41
-KEYCODE=34
+ATTRIBUTES=17
TOKEN_INCLUDE=11
-KEY=32
-KEYTYPE=33
-ATTRIBUTE=30
-TOKEN_NAME=13
-DQUOTE=20
-LCURLY=17
-SEMICOLON=23
-MINUS=21
+ATTRIBUTE=18
TOKEN_XKB_SYMBOLS=10
-Tokens=42
EOF=-1
-SECTIONNAME=36
-GENERIC_NAME=38
-MAPTYPE=28
-LBRACKET=15
+SECTIONNAME=24
+MAPTYPE=15
TOKEN_PARTIAL=6
-NAME=37
-WS=39
-TOKEN_HIDDEN=5
+NAME=27
+MAPMATERIAL=16
+INCLUDE=19
+T38=38
+KEYSYMS=26
+T37=37
+T39=39
+COMMENT=29
+T34=34
+TOKEN_DEFAULT=4
+T33=33
+T36=36
+T35=35
+T32=32
+T31=31
+QUOTEDSTRING=25
+TOKEN_ALTERNATE_GROUP=9
+SECTION=23
+LINE_COMMENT=30
+KEYCODE=22
+KEY=20
+KEYTYPE=21
+TOKEN_NAME=13
+T49=49
+T48=48
+T43=43
+Tokens=51
+T42=42
+T41=41
+T40=40
+T47=47
+T46=46
+T45=45
+T44=44
+WS=28
TOKEN_ALPHANUMERIC_KEYS=7
-COMMA=19
-LOWERTHAN=25
-EQUAL=24
-INCLUDE=31
-RCURLY=18
+TOKEN_HIDDEN=5
+T50=50
TOKEN_MODIFIER_KEYS=8
-PLUS=22
TOKEN_KEY=14
-RBRACKET=16
-DOT=27
-COMMENT=40
-TOKEN_DEFAULT=4
TOKEN_KEY_TYPE=12
-GREATERTHAN=26
class XKBGrammarLexer(Lexer):
@@ -60,16 +69,15 @@ class XKBGrammarLexer(Lexer):
- # $ANTLR start TOKEN_DEFAULT
- def mTOKEN_DEFAULT(self, ):
+ # $ANTLR start T31
+ def mT31(self, ):
try:
- self.type = TOKEN_DEFAULT
-
- # XKBGrammar.g:7:15: ( 'default' )
- # XKBGrammar.g:7:17: 'default'
- self.match("default")
+ self.type = T31
+ # XKBGrammar.g:7:5: ( '\"' )
+ # XKBGrammar.g:7:7: '\"'
+ self.match(u'"')
@@ -79,20 +87,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_DEFAULT
+ # $ANTLR end T31
- # $ANTLR start TOKEN_HIDDEN
- def mTOKEN_HIDDEN(self, ):
+ # $ANTLR start T32
+ def mT32(self, ):
try:
- self.type = TOKEN_HIDDEN
-
- # XKBGrammar.g:8:14: ( 'hidden' )
- # XKBGrammar.g:8:16: 'hidden'
- self.match("hidden")
+ self.type = T32
+ # XKBGrammar.g:8:5: ( '{' )
+ # XKBGrammar.g:8:7: '{'
+ self.match(u'{')
@@ -102,20 +109,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_HIDDEN
+ # $ANTLR end T32
- # $ANTLR start TOKEN_PARTIAL
- def mTOKEN_PARTIAL(self, ):
+ # $ANTLR start T33
+ def mT33(self, ):
try:
- self.type = TOKEN_PARTIAL
-
- # XKBGrammar.g:9:15: ( 'partial' )
- # XKBGrammar.g:9:17: 'partial'
- self.match("partial")
+ self.type = T33
+ # XKBGrammar.g:9:5: ( ';' )
+ # XKBGrammar.g:9:7: ';'
+ self.match(u';')
@@ -125,20 +131,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_PARTIAL
+ # $ANTLR end T33
- # $ANTLR start TOKEN_ALPHANUMERIC_KEYS
- def mTOKEN_ALPHANUMERIC_KEYS(self, ):
+ # $ANTLR start T34
+ def mT34(self, ):
try:
- self.type = TOKEN_ALPHANUMERIC_KEYS
-
- # XKBGrammar.g:10:25: ( 'alphanumeric_keys' )
- # XKBGrammar.g:10:27: 'alphanumeric_keys'
- self.match("alphanumeric_keys")
+ self.type = T34
+ # XKBGrammar.g:10:5: ( '}' )
+ # XKBGrammar.g:10:7: '}'
+ self.match(u'}')
@@ -148,19 +153,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_ALPHANUMERIC_KEYS
+ # $ANTLR end T34
- # $ANTLR start TOKEN_MODIFIER_KEYS
- def mTOKEN_MODIFIER_KEYS(self, ):
+ # $ANTLR start T35
+ def mT35(self, ):
try:
- self.type = TOKEN_MODIFIER_KEYS
+ self.type = T35
- # XKBGrammar.g:11:21: ( 'modifier_keys' )
- # XKBGrammar.g:11:23: 'modifier_keys'
- self.match("modifier_keys")
+ # XKBGrammar.g:11:5: ( 'include' )
+ # XKBGrammar.g:11:7: 'include'
+ self.match("include")
@@ -171,19 +176,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_MODIFIER_KEYS
+ # $ANTLR end T35
- # $ANTLR start TOKEN_ALTERNATE_GROUP
- def mTOKEN_ALTERNATE_GROUP(self, ):
+ # $ANTLR start T36
+ def mT36(self, ):
try:
- self.type = TOKEN_ALTERNATE_GROUP
+ self.type = T36
- # XKBGrammar.g:12:23: ( 'alternate_group' )
- # XKBGrammar.g:12:25: 'alternate_group'
- self.match("alternate_group")
+ # XKBGrammar.g:12:5: ( 'name' )
+ # XKBGrammar.g:12:7: 'name'
+ self.match("name")
@@ -194,20 +199,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_ALTERNATE_GROUP
+ # $ANTLR end T36
- # $ANTLR start TOKEN_XKB_SYMBOLS
- def mTOKEN_XKB_SYMBOLS(self, ):
+ # $ANTLR start T37
+ def mT37(self, ):
try:
- self.type = TOKEN_XKB_SYMBOLS
-
- # XKBGrammar.g:13:19: ( 'xkb_symbols' )
- # XKBGrammar.g:13:21: 'xkb_symbols'
- self.match("xkb_symbols")
+ self.type = T37
+ # XKBGrammar.g:13:5: ( '[' )
+ # XKBGrammar.g:13:7: '['
+ self.match(u'[')
@@ -217,20 +221,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_XKB_SYMBOLS
+ # $ANTLR end T37
- # $ANTLR start TOKEN_INCLUDE
- def mTOKEN_INCLUDE(self, ):
+ # $ANTLR start T38
+ def mT38(self, ):
try:
- self.type = TOKEN_INCLUDE
-
- # XKBGrammar.g:14:15: ( 'include' )
- # XKBGrammar.g:14:17: 'include'
- self.match("include")
+ self.type = T38
+ # XKBGrammar.g:14:5: ( ']' )
+ # XKBGrammar.g:14:7: ']'
+ self.match(u']')
@@ -240,20 +243,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_INCLUDE
+ # $ANTLR end T38
- # $ANTLR start TOKEN_KEY_TYPE
- def mTOKEN_KEY_TYPE(self, ):
+ # $ANTLR start T39
+ def mT39(self, ):
try:
- self.type = TOKEN_KEY_TYPE
-
- # XKBGrammar.g:15:16: ( 'key.type' )
- # XKBGrammar.g:15:18: 'key.type'
- self.match("key.type")
+ self.type = T39
+ # XKBGrammar.g:15:5: ( '=' )
+ # XKBGrammar.g:15:7: '='
+ self.match(u'=')
@@ -263,19 +265,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_KEY_TYPE
+ # $ANTLR end T39
- # $ANTLR start TOKEN_NAME
- def mTOKEN_NAME(self, ):
+ # $ANTLR start T40
+ def mT40(self, ):
try:
- self.type = TOKEN_NAME
+ self.type = T40
- # XKBGrammar.g:16:12: ( 'name' )
- # XKBGrammar.g:16:14: 'name'
- self.match("name")
+ # XKBGrammar.g:16:5: ( 'key.type' )
+ # XKBGrammar.g:16:7: 'key.type'
+ self.match("key.type")
@@ -286,18 +288,18 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_NAME
+ # $ANTLR end T40
- # $ANTLR start TOKEN_KEY
- def mTOKEN_KEY(self, ):
+ # $ANTLR start T41
+ def mT41(self, ):
try:
- self.type = TOKEN_KEY
+ self.type = T41
- # XKBGrammar.g:17:11: ( 'key' )
- # XKBGrammar.g:17:13: 'key'
+ # XKBGrammar.g:17:5: ( 'key' )
+ # XKBGrammar.g:17:7: 'key'
self.match("key")
@@ -309,19 +311,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end TOKEN_KEY
+ # $ANTLR end T41
- # $ANTLR start LBRACKET
- def mLBRACKET(self, ):
+ # $ANTLR start T42
+ def mT42(self, ):
try:
- self.type = LBRACKET
+ self.type = T42
- # XKBGrammar.g:18:10: ( '[' )
- # XKBGrammar.g:18:12: '['
- self.match(u'[')
+ # XKBGrammar.g:18:5: ( '<' )
+ # XKBGrammar.g:18:7: '<'
+ self.match(u'<')
@@ -331,19 +333,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end LBRACKET
+ # $ANTLR end T42
- # $ANTLR start RBRACKET
- def mRBRACKET(self, ):
+ # $ANTLR start T43
+ def mT43(self, ):
try:
- self.type = RBRACKET
+ self.type = T43
- # XKBGrammar.g:19:10: ( ']' )
- # XKBGrammar.g:19:12: ']'
- self.match(u']')
+ # XKBGrammar.g:19:5: ( '>' )
+ # XKBGrammar.g:19:7: '>'
+ self.match(u'>')
@@ -353,19 +355,19 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end RBRACKET
+ # $ANTLR end T43
- # $ANTLR start LCURLY
- def mLCURLY(self, ):
+ # $ANTLR start T44
+ def mT44(self, ):
try:
- self.type = LCURLY
+ self.type = T44
- # XKBGrammar.g:20:8: ( '{' )
- # XKBGrammar.g:20:10: '{'
- self.match(u'{')
+ # XKBGrammar.g:20:5: ( ',' )
+ # XKBGrammar.g:20:7: ','
+ self.match(u',')
@@ -375,41 +377,20 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end LCURLY
+ # $ANTLR end T44
- # $ANTLR start RCURLY
- def mRCURLY(self, ):
+ # $ANTLR start T45
+ def mT45(self, ):
try:
- self.type = RCURLY
-
- # XKBGrammar.g:21:8: ( '}' )
- # XKBGrammar.g:21:10: '}'
- self.match(u'}')
-
-
-
-
-
- finally:
-
- pass
-
- # $ANTLR end RCURLY
-
-
-
- # $ANTLR start COMMA
- def mCOMMA(self, ):
+ self.type = T45
- try:
- self.type = COMMA
+ # XKBGrammar.g:21:5: ( 'default' )
+ # XKBGrammar.g:21:7: 'default'
+ self.match("default")
- # XKBGrammar.g:22:7: ( ',' )
- # XKBGrammar.g:22:9: ','
- self.match(u',')
@@ -419,41 +400,20 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end COMMA
+ # $ANTLR end T45
- # $ANTLR start DQUOTE
- def mDQUOTE(self, ):
+ # $ANTLR start T46
+ def mT46(self, ):
try:
- self.type = DQUOTE
-
- # XKBGrammar.g:23:8: ( '\"' )
- # XKBGrammar.g:23:10: '\"'
- self.match(u'"')
-
-
-
-
+ self.type = T46
- finally:
-
- pass
-
- # $ANTLR end DQUOTE
-
-
-
- # $ANTLR start MINUS
- def mMINUS(self, ):
-
- try:
- self.type = MINUS
+ # XKBGrammar.g:22:5: ( 'hidden' )
+ # XKBGrammar.g:22:7: 'hidden'
+ self.match("hidden")
- # XKBGrammar.g:24:7: ( '-' )
- # XKBGrammar.g:24:9: '-'
- self.match(u'-')
@@ -463,41 +423,20 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end MINUS
+ # $ANTLR end T46
- # $ANTLR start PLUS
- def mPLUS(self, ):
+ # $ANTLR start T47
+ def mT47(self, ):
try:
- self.type = PLUS
-
- # XKBGrammar.g:25:6: ( '+' )
- # XKBGrammar.g:25:8: '+'
- self.match(u'+')
-
-
-
-
-
- finally:
-
- pass
-
- # $ANTLR end PLUS
-
-
-
- # $ANTLR start SEMICOLON
- def mSEMICOLON(self, ):
+ self.type = T47
- try:
- self.type = SEMICOLON
+ # XKBGrammar.g:23:5: ( 'partial' )
+ # XKBGrammar.g:23:7: 'partial'
+ self.match("partial")
- # XKBGrammar.g:26:11: ( ';' )
- # XKBGrammar.g:26:13: ';'
- self.match(u';')
@@ -507,41 +446,20 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end SEMICOLON
+ # $ANTLR end T47
- # $ANTLR start EQUAL
- def mEQUAL(self, ):
+ # $ANTLR start T48
+ def mT48(self, ):
try:
- self.type = EQUAL
-
- # XKBGrammar.g:27:7: ( '=' )
- # XKBGrammar.g:27:9: '='
- self.match(u'=')
-
-
-
-
-
- finally:
-
- pass
+ self.type = T48
- # $ANTLR end EQUAL
-
-
-
- # $ANTLR start LOWERTHAN
- def mLOWERTHAN(self, ):
-
- try:
- self.type = LOWERTHAN
+ # XKBGrammar.g:24:5: ( 'alphanumeric_keys' )
+ # XKBGrammar.g:24:7: 'alphanumeric_keys'
+ self.match("alphanumeric_keys")
- # XKBGrammar.g:28:11: ( '<' )
- # XKBGrammar.g:28:13: '<'
- self.match(u'<')
@@ -551,76 +469,42 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end LOWERTHAN
+ # $ANTLR end T48
- # $ANTLR start GREATERTHAN
- def mGREATERTHAN(self, ):
+ # $ANTLR start T49
+ def mT49(self, ):
try:
- self.type = GREATERTHAN
-
- # XKBGrammar.g:29:13: ( '>' )
- # XKBGrammar.g:29:15: '>'
- self.match(u'>')
+ self.type = T49
+ # XKBGrammar.g:25:5: ( 'alternate_group' )
+ # XKBGrammar.g:25:7: 'alternate_group'
+ self.match("alternate_group")
- finally:
-
- pass
-
- # $ANTLR end GREATERTHAN
-
-
-
- # $ANTLR start DOT
- def mDOT(self, ):
-
- try:
- self.type = DOT
-
- # XKBGrammar.g:30:5: ( '.' )
- # XKBGrammar.g:30:7: '.'
- self.match(u'.')
-
-
-
finally:
pass
- # $ANTLR end DOT
+ # $ANTLR end T49
- # $ANTLR start GENERIC_NAME
- def mGENERIC_NAME(self, ):
+ # $ANTLR start T50
+ def mT50(self, ):
try:
- # XKBGrammar.g:171:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' ) )
- # XKBGrammar.g:171:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )
- if (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
+ self.type = T50
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
-
-
- if (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
- self.input.consume();
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recover(mse)
- raise mse
+ # XKBGrammar.g:26:5: ( 'xkb_symbols' )
+ # XKBGrammar.g:26:7: 'xkb_symbols'
+ self.match("xkb_symbols")
@@ -631,7 +515,7 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end GENERIC_NAME
+ # $ANTLR end T50
@@ -641,20 +525,20 @@ class XKBGrammarLexer(Lexer):
try:
self.type = NAME
- # XKBGrammar.g:175:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )* )
- # XKBGrammar.g:175:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )*
- # XKBGrammar.g:175:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' )*
+ # XKBGrammar.g:124:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '-' | '(' | ')' | '0' .. '9' )* )
+ # XKBGrammar.g:124:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '-' | '(' | ')' | '0' .. '9' )*
+ # XKBGrammar.g:124:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '-' | '(' | ')' | '0' .. '9' )*
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
- if ((u'(' <= LA1_0 <= u')') or (u'0' <= LA1_0 <= u'9') or (u'A' <= LA1_0 <= u'Z') or LA1_0 == u'_' or (u'a' <= LA1_0 <= u'z')) :
+ if ((u'(' <= LA1_0 <= u')') or LA1_0 == u'-' or (u'0' <= LA1_0 <= u'9') or (u'A' <= LA1_0 <= u'Z') or LA1_0 == u'_' or (u'a' <= LA1_0 <= u'z')) :
alt1 = 1
if alt1 == 1:
# XKBGrammar.g:
- if (u'(' <= self.input.LA(1) <= u')') or (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
+ if (u'(' <= self.input.LA(1) <= u')') or self.input.LA(1) == u'-' or (u'0' <= self.input.LA(1) <= u'9') or (u'A' <= self.input.LA(1) <= u'Z') or self.input.LA(1) == u'_' or (u'a' <= self.input.LA(1) <= u'z'):
self.input.consume();
else:
@@ -687,8 +571,8 @@ class XKBGrammarLexer(Lexer):
try:
self.type = WS
- # XKBGrammar.g:180:2: ( ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' ) )
- # XKBGrammar.g:181:2: ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' )
+ # XKBGrammar.g:129:2: ( ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' ) )
+ # XKBGrammar.g:130:2: ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' )
if (u'\t' <= self.input.LA(1) <= u'\n') or (u'\f' <= self.input.LA(1) <= u'\r') or self.input.LA(1) == u' ':
self.input.consume();
@@ -699,7 +583,7 @@ class XKBGrammarLexer(Lexer):
#action start
- self.channel=HIDDEN;
+ self.channel=HIDDEN;
#action end
@@ -719,12 +603,12 @@ class XKBGrammarLexer(Lexer):
try:
self.type = COMMENT
- # XKBGrammar.g:186:6: ( '/*' ( . )* '*/' )
- # XKBGrammar.g:187:2: '/*' ( . )* '*/'
+ # XKBGrammar.g:135:6: ( '/*' ( . )* '*/' )
+ # XKBGrammar.g:136:2: '/*' ( . )* '*/'
self.match("/*")
- # XKBGrammar.g:187:7: ( . )*
+ # XKBGrammar.g:136:7: ( . )*
while True: #loop2
alt2 = 2
LA2_0 = self.input.LA(1)
@@ -743,7 +627,7 @@ class XKBGrammarLexer(Lexer):
if alt2 == 1:
- # XKBGrammar.g:187:7: .
+ # XKBGrammar.g:136:7: .
self.matchAny()
@@ -776,12 +660,12 @@ class XKBGrammarLexer(Lexer):
try:
self.type = LINE_COMMENT
- # XKBGrammar.g:191:6: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' )
- # XKBGrammar.g:192:2: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n'
+ # XKBGrammar.g:140:6: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' )
+ # XKBGrammar.g:141:2: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n'
self.match("//")
- # XKBGrammar.g:192:7: (~ ( '\\n' | '\\r' ) )*
+ # XKBGrammar.g:141:7: (~ ( '\\n' | '\\r' ) )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
@@ -791,7 +675,7 @@ class XKBGrammarLexer(Lexer):
if alt3 == 1:
- # XKBGrammar.g:192:7: ~ ( '\\n' | '\\r' )
+ # XKBGrammar.g:141:7: ~ ( '\\n' | '\\r' )
if (u'\u0000' <= self.input.LA(1) <= u'\t') or (u'\u000B' <= self.input.LA(1) <= u'\f') or (u'\u000E' <= self.input.LA(1) <= u'\uFFFE'):
self.input.consume();
@@ -807,14 +691,14 @@ class XKBGrammarLexer(Lexer):
break #loop3
- # XKBGrammar.g:192:21: ( '\\r' )?
+ # XKBGrammar.g:141:23: ( '\\r' )?
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == u'\r') :
alt4 = 1
if alt4 == 1:
- # XKBGrammar.g:192:21: '\\r'
+ # XKBGrammar.g:141:23: '\\r'
self.match(u'\r')
@@ -823,7 +707,7 @@ class XKBGrammarLexer(Lexer):
self.match(u'\n')
#action start
- self.channel=HIDDEN;
+ self.channel=HIDDEN;
#action end
@@ -838,675 +722,578 @@ class XKBGrammarLexer(Lexer):
def mTokens(self):
- # XKBGrammar.g:1:8: ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_MODIFIER_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS | TOKEN_INCLUDE | TOKEN_KEY_TYPE | TOKEN_NAME | TOKEN_KEY | LBRACKET | RBRACKET | LCURLY | RCURLY | COMMA | DQUOTE | MINUS | PLUS | SEMICOLON | EQUAL | LOWERTHAN | GREATERTHAN | DOT | NAME | WS | COMMENT | LINE_COMMENT )
- alt5 = 28
+ # XKBGrammar.g:1:8: ( T31 | T32 | T33 | T34 | T35 | T36 | T37 | T38 | T39 | T40 | T41 | T42 | T43 | T44 | T45 | T46 | T47 | T48 | T49 | T50 | NAME | WS | COMMENT | LINE_COMMENT )
+ alt5 = 24
LA5 = self.input.LA(1)
- if LA5 == u'd':
- LA5_1 = self.input.LA(2)
+ if LA5 == u'"':
+ alt5 = 1
+ elif LA5 == u'{':
+ alt5 = 2
+ elif LA5 == u';':
+ alt5 = 3
+ elif LA5 == u'}':
+ alt5 = 4
+ elif LA5 == u'i':
+ LA5_5 = self.input.LA(2)
- if (LA5_1 == u'e') :
- LA5_26 = self.input.LA(3)
+ if (LA5_5 == u'n') :
+ LA5_22 = self.input.LA(3)
- if (LA5_26 == u'f') :
- LA5_37 = self.input.LA(4)
+ if (LA5_22 == u'c') :
+ LA5_32 = self.input.LA(4)
- if (LA5_37 == u'a') :
- LA5_47 = self.input.LA(5)
+ if (LA5_32 == u'l') :
+ LA5_41 = self.input.LA(5)
- if (LA5_47 == u'u') :
- LA5_58 = self.input.LA(6)
+ if (LA5_41 == u'u') :
+ LA5_51 = self.input.LA(6)
- if (LA5_58 == u'l') :
- LA5_67 = self.input.LA(7)
+ if (LA5_51 == u'd') :
+ LA5_59 = self.input.LA(7)
- if (LA5_67 == u't') :
- LA5_75 = self.input.LA(8)
+ if (LA5_59 == u'e') :
+ LA5_66 = self.input.LA(8)
- if ((u'(' <= LA5_75 <= u')') or (u'0' <= LA5_75 <= u'9') or (u'A' <= LA5_75 <= u'Z') or LA5_75 == u'_' or (u'a' <= LA5_75 <= u'z')) :
- alt5 = 25
+ if ((u'(' <= LA5_66 <= u')') or LA5_66 == u'-' or (u'0' <= LA5_66 <= u'9') or (u'A' <= LA5_66 <= u'Z') or LA5_66 == u'_' or (u'a' <= LA5_66 <= u'z')) :
+ alt5 = 21
else:
- alt5 = 1
+ alt5 = 5
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- elif LA5 == u'h':
- LA5_2 = self.input.LA(2)
+ alt5 = 21
+ elif LA5 == u'n':
+ LA5_6 = self.input.LA(2)
- if (LA5_2 == u'i') :
- LA5_27 = self.input.LA(3)
+ if (LA5_6 == u'a') :
+ LA5_23 = self.input.LA(3)
- if (LA5_27 == u'd') :
- LA5_38 = self.input.LA(4)
+ if (LA5_23 == u'm') :
+ LA5_33 = self.input.LA(4)
- if (LA5_38 == u'd') :
- LA5_48 = self.input.LA(5)
+ if (LA5_33 == u'e') :
+ LA5_42 = self.input.LA(5)
- if (LA5_48 == u'e') :
- LA5_59 = self.input.LA(6)
+ if ((u'(' <= LA5_42 <= u')') or LA5_42 == u'-' or (u'0' <= LA5_42 <= u'9') or (u'A' <= LA5_42 <= u'Z') or LA5_42 == u'_' or (u'a' <= LA5_42 <= u'z')) :
+ alt5 = 21
+ else:
+ alt5 = 6
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ elif LA5 == u'[':
+ alt5 = 7
+ elif LA5 == u']':
+ alt5 = 8
+ elif LA5 == u'=':
+ alt5 = 9
+ elif LA5 == u'k':
+ LA5_10 = self.input.LA(2)
- if (LA5_59 == u'n') :
- LA5_68 = self.input.LA(7)
+ if (LA5_10 == u'e') :
+ LA5_24 = self.input.LA(3)
- if ((u'(' <= LA5_68 <= u')') or (u'0' <= LA5_68 <= u'9') or (u'A' <= LA5_68 <= u'Z') or LA5_68 == u'_' or (u'a' <= LA5_68 <= u'z')) :
- alt5 = 25
- else:
- alt5 = 2
- else:
- alt5 = 25
- else:
- alt5 = 25
+ if (LA5_24 == u'y') :
+ LA5 = self.input.LA(4)
+ if LA5 == u'.':
+ alt5 = 10
+ elif LA5 == u'(' or LA5 == u')' or LA5 == u'-' or LA5 == u'0' or LA5 == u'1' or LA5 == u'2' or LA5 == u'3' or LA5 == u'4' or LA5 == u'5' or LA5 == u'6' or LA5 == u'7' or LA5 == u'8' or LA5 == u'9' or LA5 == u'A' or LA5 == u'B' or LA5 == u'C' or LA5 == u'D' or LA5 == u'E' or LA5 == u'F' or LA5 == u'G' or LA5 == u'H' or LA5 == u'I' or LA5 == u'J' or LA5 == u'K' or LA5 == u'L' or LA5 == u'M' or LA5 == u'N' or LA5 == u'O' or LA5 == u'P' or LA5 == u'Q' or LA5 == u'R' or LA5 == u'S' or LA5 == u'T' or LA5 == u'U' or LA5 == u'V' or LA5 == u'W' or LA5 == u'X' or LA5 == u'Y' or LA5 == u'Z' or LA5 == u'_' or LA5 == u'a' or LA5 == u'b' or LA5 == u'c' or LA5 == u'd' or LA5 == u'e' or LA5 == u'f' or LA5 == u'g' or LA5 == u'h' or LA5 == u'i' or LA5 == u'j' or LA5 == u'k' or LA5 == u'l' or LA5 == u'm' or LA5 == u'n' or LA5 == u'o' or LA5 == u'p' or LA5 == u'q' or LA5 == u'r' or LA5 == u's' or LA5 == u't' or LA5 == u'u' or LA5 == u'v' or LA5 == u'w' or LA5 == u'x' or LA5 == u'y' or LA5 == u'z':
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 11
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- elif LA5 == u'p':
- LA5_3 = self.input.LA(2)
+ alt5 = 21
+ elif LA5 == u'<':
+ alt5 = 12
+ elif LA5 == u'>':
+ alt5 = 13
+ elif LA5 == u',':
+ alt5 = 14
+ elif LA5 == u'd':
+ LA5_14 = self.input.LA(2)
- if (LA5_3 == u'a') :
- LA5_28 = self.input.LA(3)
+ if (LA5_14 == u'e') :
+ LA5_25 = self.input.LA(3)
- if (LA5_28 == u'r') :
- LA5_39 = self.input.LA(4)
+ if (LA5_25 == u'f') :
+ LA5_35 = self.input.LA(4)
- if (LA5_39 == u't') :
- LA5_49 = self.input.LA(5)
+ if (LA5_35 == u'a') :
+ LA5_45 = self.input.LA(5)
- if (LA5_49 == u'i') :
- LA5_60 = self.input.LA(6)
+ if (LA5_45 == u'u') :
+ LA5_53 = self.input.LA(6)
- if (LA5_60 == u'a') :
- LA5_69 = self.input.LA(7)
+ if (LA5_53 == u'l') :
+ LA5_60 = self.input.LA(7)
- if (LA5_69 == u'l') :
- LA5_77 = self.input.LA(8)
+ if (LA5_60 == u't') :
+ LA5_67 = self.input.LA(8)
- if ((u'(' <= LA5_77 <= u')') or (u'0' <= LA5_77 <= u'9') or (u'A' <= LA5_77 <= u'Z') or LA5_77 == u'_' or (u'a' <= LA5_77 <= u'z')) :
- alt5 = 25
+ if ((u'(' <= LA5_67 <= u')') or LA5_67 == u'-' or (u'0' <= LA5_67 <= u'9') or (u'A' <= LA5_67 <= u'Z') or LA5_67 == u'_' or (u'a' <= LA5_67 <= u'z')) :
+ alt5 = 21
else:
- alt5 = 3
+ alt5 = 15
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- elif LA5 == u'a':
- LA5_4 = self.input.LA(2)
-
- if (LA5_4 == u'l') :
- LA5 = self.input.LA(3)
- if LA5 == u't':
- LA5_40 = self.input.LA(4)
+ alt5 = 21
+ elif LA5 == u'h':
+ LA5_15 = self.input.LA(2)
- if (LA5_40 == u'e') :
- LA5_50 = self.input.LA(5)
+ if (LA5_15 == u'i') :
+ LA5_26 = self.input.LA(3)
- if (LA5_50 == u'r') :
- LA5_61 = self.input.LA(6)
+ if (LA5_26 == u'd') :
+ LA5_36 = self.input.LA(4)
- if (LA5_61 == u'n') :
- LA5_70 = self.input.LA(7)
+ if (LA5_36 == u'd') :
+ LA5_46 = self.input.LA(5)
- if (LA5_70 == u'a') :
- LA5_78 = self.input.LA(8)
+ if (LA5_46 == u'e') :
+ LA5_54 = self.input.LA(6)
- if (LA5_78 == u't') :
- LA5_85 = self.input.LA(9)
+ if (LA5_54 == u'n') :
+ LA5_61 = self.input.LA(7)
- if (LA5_85 == u'e') :
- LA5_90 = self.input.LA(10)
+ if ((u'(' <= LA5_61 <= u')') or LA5_61 == u'-' or (u'0' <= LA5_61 <= u'9') or (u'A' <= LA5_61 <= u'Z') or LA5_61 == u'_' or (u'a' <= LA5_61 <= u'z')) :
+ alt5 = 21
+ else:
+ alt5 = 16
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ elif LA5 == u'p':
+ LA5_16 = self.input.LA(2)
- if (LA5_90 == u'_') :
- LA5_94 = self.input.LA(11)
+ if (LA5_16 == u'a') :
+ LA5_27 = self.input.LA(3)
- if (LA5_94 == u'g') :
- LA5_98 = self.input.LA(12)
+ if (LA5_27 == u'r') :
+ LA5_37 = self.input.LA(4)
- if (LA5_98 == u'r') :
- LA5_102 = self.input.LA(13)
+ if (LA5_37 == u't') :
+ LA5_47 = self.input.LA(5)
- if (LA5_102 == u'o') :
- LA5_106 = self.input.LA(14)
+ if (LA5_47 == u'i') :
+ LA5_55 = self.input.LA(6)
- if (LA5_106 == u'u') :
- LA5_109 = self.input.LA(15)
+ if (LA5_55 == u'a') :
+ LA5_62 = self.input.LA(7)
- if (LA5_109 == u'p') :
- LA5_112 = self.input.LA(16)
+ if (LA5_62 == u'l') :
+ LA5_69 = self.input.LA(8)
- if ((u'(' <= LA5_112 <= u')') or (u'0' <= LA5_112 <= u'9') or (u'A' <= LA5_112 <= u'Z') or LA5_112 == u'_' or (u'a' <= LA5_112 <= u'z')) :
- alt5 = 25
- else:
- alt5 = 6
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
+ if ((u'(' <= LA5_69 <= u')') or LA5_69 == u'-' or (u'0' <= LA5_69 <= u'9') or (u'A' <= LA5_69 <= u'Z') or LA5_69 == u'_' or (u'a' <= LA5_69 <= u'z')) :
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 17
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- elif LA5 == u'p':
- LA5_41 = self.input.LA(4)
+ alt5 = 21
+ else:
+ alt5 = 21
+ else:
+ alt5 = 21
+ elif LA5 == u'a':
+ LA5_17 = self.input.LA(2)
+
+ if (LA5_17 == u'l') :
+ LA5 = self.input.LA(3)
+ if LA5 == u'p':
+ LA5_38 = self.input.LA(4)
- if (LA5_41 == u'h') :
- LA5_51 = self.input.LA(5)
+ if (LA5_38 == u'h') :
+ LA5_48 = self.input.LA(5)
- if (LA5_51 == u'a') :
- LA5_62 = self.input.LA(6)
+ if (LA5_48 == u'a') :
+ LA5_56 = self.input.LA(6)
- if (LA5_62 == u'n') :
- LA5_71 = self.input.LA(7)
+ if (LA5_56 == u'n') :
+ LA5_63 = self.input.LA(7)
- if (LA5_71 == u'u') :
- LA5_79 = self.input.LA(8)
+ if (LA5_63 == u'u') :
+ LA5_70 = self.input.LA(8)
- if (LA5_79 == u'm') :
- LA5_86 = self.input.LA(9)
+ if (LA5_70 == u'm') :
+ LA5_76 = self.input.LA(9)
- if (LA5_86 == u'e') :
- LA5_91 = self.input.LA(10)
+ if (LA5_76 == u'e') :
+ LA5_79 = self.input.LA(10)
- if (LA5_91 == u'r') :
- LA5_95 = self.input.LA(11)
+ if (LA5_79 == u'r') :
+ LA5_82 = self.input.LA(11)
- if (LA5_95 == u'i') :
- LA5_99 = self.input.LA(12)
+ if (LA5_82 == u'i') :
+ LA5_85 = self.input.LA(12)
- if (LA5_99 == u'c') :
- LA5_103 = self.input.LA(13)
+ if (LA5_85 == u'c') :
+ LA5_88 = self.input.LA(13)
- if (LA5_103 == u'_') :
- LA5_107 = self.input.LA(14)
+ if (LA5_88 == u'_') :
+ LA5_91 = self.input.LA(14)
- if (LA5_107 == u'k') :
- LA5_110 = self.input.LA(15)
+ if (LA5_91 == u'k') :
+ LA5_93 = self.input.LA(15)
- if (LA5_110 == u'e') :
- LA5_113 = self.input.LA(16)
+ if (LA5_93 == u'e') :
+ LA5_95 = self.input.LA(16)
- if (LA5_113 == u'y') :
- LA5_115 = self.input.LA(17)
+ if (LA5_95 == u'y') :
+ LA5_97 = self.input.LA(17)
- if (LA5_115 == u's') :
- LA5_116 = self.input.LA(18)
+ if (LA5_97 == u's') :
+ LA5_99 = self.input.LA(18)
- if ((u'(' <= LA5_116 <= u')') or (u'0' <= LA5_116 <= u'9') or (u'A' <= LA5_116 <= u'Z') or LA5_116 == u'_' or (u'a' <= LA5_116 <= u'z')) :
- alt5 = 25
+ if ((u'(' <= LA5_99 <= u')') or LA5_99 == u'-' or (u'0' <= LA5_99 <= u'9') or (u'A' <= LA5_99 <= u'Z') or LA5_99 == u'_' or (u'a' <= LA5_99 <= u'z')) :
+ alt5 = 21
else:
- alt5 = 4
+ alt5 = 18
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- elif LA5 == u'm':
- LA5_5 = self.input.LA(2)
+ alt5 = 21
+ elif LA5 == u't':
+ LA5_39 = self.input.LA(4)
- if (LA5_5 == u'o') :
- LA5_30 = self.input.LA(3)
+ if (LA5_39 == u'e') :
+ LA5_49 = self.input.LA(5)
- if (LA5_30 == u'd') :
- LA5_42 = self.input.LA(4)
+ if (LA5_49 == u'r') :
+ LA5_57 = self.input.LA(6)
- if (LA5_42 == u'i') :
- LA5_52 = self.input.LA(5)
+ if (LA5_57 == u'n') :
+ LA5_64 = self.input.LA(7)
- if (LA5_52 == u'f') :
- LA5_63 = self.input.LA(6)
+ if (LA5_64 == u'a') :
+ LA5_71 = self.input.LA(8)
- if (LA5_63 == u'i') :
- LA5_72 = self.input.LA(7)
+ if (LA5_71 == u't') :
+ LA5_77 = self.input.LA(9)
- if (LA5_72 == u'e') :
- LA5_80 = self.input.LA(8)
+ if (LA5_77 == u'e') :
+ LA5_80 = self.input.LA(10)
- if (LA5_80 == u'r') :
- LA5_87 = self.input.LA(9)
+ if (LA5_80 == u'_') :
+ LA5_83 = self.input.LA(11)
- if (LA5_87 == u'_') :
- LA5_92 = self.input.LA(10)
+ if (LA5_83 == u'g') :
+ LA5_86 = self.input.LA(12)
- if (LA5_92 == u'k') :
- LA5_96 = self.input.LA(11)
+ if (LA5_86 == u'r') :
+ LA5_89 = self.input.LA(13)
- if (LA5_96 == u'e') :
- LA5_100 = self.input.LA(12)
+ if (LA5_89 == u'o') :
+ LA5_92 = self.input.LA(14)
- if (LA5_100 == u'y') :
- LA5_104 = self.input.LA(13)
+ if (LA5_92 == u'u') :
+ LA5_94 = self.input.LA(15)
- if (LA5_104 == u's') :
- LA5_108 = self.input.LA(14)
+ if (LA5_94 == u'p') :
+ LA5_96 = self.input.LA(16)
- if ((u'(' <= LA5_108 <= u')') or (u'0' <= LA5_108 <= u'9') or (u'A' <= LA5_108 <= u'Z') or LA5_108 == u'_' or (u'a' <= LA5_108 <= u'z')) :
- alt5 = 25
+ if ((u'(' <= LA5_96 <= u')') or LA5_96 == u'-' or (u'0' <= LA5_96 <= u'9') or (u'A' <= LA5_96 <= u'Z') or LA5_96 == u'_' or (u'a' <= LA5_96 <= u'z')) :
+ alt5 = 21
+ else:
+ alt5 = 19
+ else:
+ alt5 = 21
else:
- alt5 = 5
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
elif LA5 == u'x':
- LA5_6 = self.input.LA(2)
+ LA5_18 = self.input.LA(2)
- if (LA5_6 == u'k') :
- LA5_31 = self.input.LA(3)
+ if (LA5_18 == u'k') :
+ LA5_29 = self.input.LA(3)
- if (LA5_31 == u'b') :
- LA5_43 = self.input.LA(4)
+ if (LA5_29 == u'b') :
+ LA5_40 = self.input.LA(4)
- if (LA5_43 == u'_') :
- LA5_53 = self.input.LA(5)
+ if (LA5_40 == u'_') :
+ LA5_50 = self.input.LA(5)
- if (LA5_53 == u's') :
- LA5_64 = self.input.LA(6)
+ if (LA5_50 == u's') :
+ LA5_58 = self.input.LA(6)
- if (LA5_64 == u'y') :
- LA5_73 = self.input.LA(7)
+ if (LA5_58 == u'y') :
+ LA5_65 = self.input.LA(7)
- if (LA5_73 == u'm') :
- LA5_81 = self.input.LA(8)
+ if (LA5_65 == u'm') :
+ LA5_72 = self.input.LA(8)
- if (LA5_81 == u'b') :
- LA5_88 = self.input.LA(9)
+ if (LA5_72 == u'b') :
+ LA5_78 = self.input.LA(9)
- if (LA5_88 == u'o') :
- LA5_93 = self.input.LA(10)
+ if (LA5_78 == u'o') :
+ LA5_81 = self.input.LA(10)
- if (LA5_93 == u'l') :
- LA5_97 = self.input.LA(11)
+ if (LA5_81 == u'l') :
+ LA5_84 = self.input.LA(11)
- if (LA5_97 == u's') :
- LA5_101 = self.input.LA(12)
+ if (LA5_84 == u's') :
+ LA5_87 = self.input.LA(12)
- if ((u'(' <= LA5_101 <= u')') or (u'0' <= LA5_101 <= u'9') or (u'A' <= LA5_101 <= u'Z') or LA5_101 == u'_' or (u'a' <= LA5_101 <= u'z')) :
- alt5 = 25
+ if ((u'(' <= LA5_87 <= u')') or LA5_87 == u'-' or (u'0' <= LA5_87 <= u'9') or (u'A' <= LA5_87 <= u'Z') or LA5_87 == u'_' or (u'a' <= LA5_87 <= u'z')) :
+ alt5 = 21
else:
- alt5 = 7
+ alt5 = 20
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- elif LA5 == u'i':
- LA5_7 = self.input.LA(2)
-
- if (LA5_7 == u'n') :
- LA5_32 = self.input.LA(3)
-
- if (LA5_32 == u'c') :
- LA5_44 = self.input.LA(4)
-
- if (LA5_44 == u'l') :
- LA5_54 = self.input.LA(5)
-
- if (LA5_54 == u'u') :
- LA5_65 = self.input.LA(6)
-
- if (LA5_65 == u'd') :
- LA5_74 = self.input.LA(7)
-
- if (LA5_74 == u'e') :
- LA5_82 = self.input.LA(8)
-
- if ((u'(' <= LA5_82 <= u')') or (u'0' <= LA5_82 <= u'9') or (u'A' <= LA5_82 <= u'Z') or LA5_82 == u'_' or (u'a' <= LA5_82 <= u'z')) :
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 8
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- elif LA5 == u'k':
- LA5_8 = self.input.LA(2)
-
- if (LA5_8 == u'e') :
- LA5_33 = self.input.LA(3)
-
- if (LA5_33 == u'y') :
- LA5 = self.input.LA(4)
- if LA5 == u'.':
- alt5 = 9
- elif LA5 == u'(' or LA5 == u')' or LA5 == u'0' or LA5 == u'1' or LA5 == u'2' or LA5 == u'3' or LA5 == u'4' or LA5 == u'5' or LA5 == u'6' or LA5 == u'7' or LA5 == u'8' or LA5 == u'9' or LA5 == u'A' or LA5 == u'B' or LA5 == u'C' or LA5 == u'D' or LA5 == u'E' or LA5 == u'F' or LA5 == u'G' or LA5 == u'H' or LA5 == u'I' or LA5 == u'J' or LA5 == u'K' or LA5 == u'L' or LA5 == u'M' or LA5 == u'N' or LA5 == u'O' or LA5 == u'P' or LA5 == u'Q' or LA5 == u'R' or LA5 == u'S' or LA5 == u'T' or LA5 == u'U' or LA5 == u'V' or LA5 == u'W' or LA5 == u'X' or LA5 == u'Y' or LA5 == u'Z' or LA5 == u'_' or LA5 == u'a' or LA5 == u'b' or LA5 == u'c' or LA5 == u'd' or LA5 == u'e' or LA5 == u'f' or LA5 == u'g' or LA5 == u'h' or LA5 == u'i' or LA5 == u'j' or LA5 == u'k' or LA5 == u'l' or LA5 == u'm' or LA5 == u'n' or LA5 == u'o' or LA5 == u'p' or LA5 == u'q' or LA5 == u'r' or LA5 == u's' or LA5 == u't' or LA5 == u'u' or LA5 == u'v' or LA5 == u'w' or LA5 == u'x' or LA5 == u'y' or LA5 == u'z':
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 11
+ alt5 = 21
else:
- alt5 = 25
+ alt5 = 21
else:
- alt5 = 25
- elif LA5 == u'n':
- LA5_9 = self.input.LA(2)
-
- if (LA5_9 == u'a') :
- LA5_34 = self.input.LA(3)
-
- if (LA5_34 == u'm') :
- LA5_46 = self.input.LA(4)
-
- if (LA5_46 == u'e') :
- LA5_57 = self.input.LA(5)
-
- if ((u'(' <= LA5_57 <= u')') or (u'0' <= LA5_57 <= u'9') or (u'A' <= LA5_57 <= u'Z') or LA5_57 == u'_' or (u'a' <= LA5_57 <= u'z')) :
- alt5 = 25
- else:
- alt5 = 10
- else:
- alt5 = 25
- else:
- alt5 = 25
- else:
- alt5 = 25
- elif LA5 == u'[':
- alt5 = 12
- elif LA5 == u']':
- alt5 = 13
- elif LA5 == u'{':
- alt5 = 14
- elif LA5 == u'}':
- alt5 = 15
- elif LA5 == u',':
- alt5 = 16
- elif LA5 == u'"':
- alt5 = 17
- elif LA5 == u'-':
- alt5 = 18
- elif LA5 == u'+':
- alt5 = 19
- elif LA5 == u';':
- alt5 = 20
- elif LA5 == u'=':
- alt5 = 21
- elif LA5 == u'<':
- alt5 = 22
- elif LA5 == u'>':
- alt5 = 23
- elif LA5 == u'.':
- alt5 = 24
+ alt5 = 21
elif LA5 == u'\t' or LA5 == u'\n' or LA5 == u'\f' or LA5 == u'\r' or LA5 == u' ':
- alt5 = 26
+ alt5 = 22
elif LA5 == u'/':
- LA5_25 = self.input.LA(2)
+ LA5_21 = self.input.LA(2)
- if (LA5_25 == u'*') :
- alt5 = 27
- elif (LA5_25 == u'/') :
- alt5 = 28
+ if (LA5_21 == u'/') :
+ alt5 = 24
+ elif (LA5_21 == u'*') :
+ alt5 = 23
else:
- nvae = NoViableAltException("1:1: Tokens : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_MODIFIER_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS | TOKEN_INCLUDE | TOKEN_KEY_TYPE | TOKEN_NAME | TOKEN_KEY | LBRACKET | RBRACKET | LCURLY | RCURLY | COMMA | DQUOTE | MINUS | PLUS | SEMICOLON | EQUAL | LOWERTHAN | GREATERTHAN | DOT | NAME | WS | COMMENT | LINE_COMMENT );", 5, 25, self.input)
+ nvae = NoViableAltException("1:1: Tokens : ( T31 | T32 | T33 | T34 | T35 | T36 | T37 | T38 | T39 | T40 | T41 | T42 | T43 | T44 | T45 | T46 | T47 | T48 | T49 | T50 | NAME | WS | COMMENT | LINE_COMMENT );", 5, 21, self.input)
raise nvae
else:
- alt5 = 25
+ alt5 = 21
if alt5 == 1:
- # XKBGrammar.g:1:10: TOKEN_DEFAULT
- self.mTOKEN_DEFAULT()
+ # XKBGrammar.g:1:10: T31
+ self.mT31()
elif alt5 == 2:
- # XKBGrammar.g:1:24: TOKEN_HIDDEN
- self.mTOKEN_HIDDEN()
+ # XKBGrammar.g:1:14: T32
+ self.mT32()
elif alt5 == 3:
- # XKBGrammar.g:1:37: TOKEN_PARTIAL
- self.mTOKEN_PARTIAL()
+ # XKBGrammar.g:1:18: T33
+ self.mT33()
elif alt5 == 4:
- # XKBGrammar.g:1:51: TOKEN_ALPHANUMERIC_KEYS
- self.mTOKEN_ALPHANUMERIC_KEYS()
+ # XKBGrammar.g:1:22: T34
+ self.mT34()
elif alt5 == 5:
- # XKBGrammar.g:1:75: TOKEN_MODIFIER_KEYS
- self.mTOKEN_MODIFIER_KEYS()
+ # XKBGrammar.g:1:26: T35
+ self.mT35()
elif alt5 == 6:
- # XKBGrammar.g:1:95: TOKEN_ALTERNATE_GROUP
- self.mTOKEN_ALTERNATE_GROUP()
+ # XKBGrammar.g:1:30: T36
+ self.mT36()
elif alt5 == 7:
- # XKBGrammar.g:1:117: TOKEN_XKB_SYMBOLS
- self.mTOKEN_XKB_SYMBOLS()
+ # XKBGrammar.g:1:34: T37
+ self.mT37()
elif alt5 == 8:
- # XKBGrammar.g:1:135: TOKEN_INCLUDE
- self.mTOKEN_INCLUDE()
+ # XKBGrammar.g:1:38: T38
+ self.mT38()
elif alt5 == 9:
- # XKBGrammar.g:1:149: TOKEN_KEY_TYPE
- self.mTOKEN_KEY_TYPE()
+ # XKBGrammar.g:1:42: T39
+ self.mT39()
elif alt5 == 10:
- # XKBGrammar.g:1:164: TOKEN_NAME
- self.mTOKEN_NAME()
+ # XKBGrammar.g:1:46: T40
+ self.mT40()
elif alt5 == 11:
- # XKBGrammar.g:1:175: TOKEN_KEY
- self.mTOKEN_KEY()
+ # XKBGrammar.g:1:50: T41
+ self.mT41()
elif alt5 == 12:
- # XKBGrammar.g:1:185: LBRACKET
- self.mLBRACKET()
+ # XKBGrammar.g:1:54: T42
+ self.mT42()
elif alt5 == 13:
- # XKBGrammar.g:1:194: RBRACKET
- self.mRBRACKET()
+ # XKBGrammar.g:1:58: T43
+ self.mT43()
elif alt5 == 14:
- # XKBGrammar.g:1:203: LCURLY
- self.mLCURLY()
+ # XKBGrammar.g:1:62: T44
+ self.mT44()
elif alt5 == 15:
- # XKBGrammar.g:1:210: RCURLY
- self.mRCURLY()
+ # XKBGrammar.g:1:66: T45
+ self.mT45()
elif alt5 == 16:
- # XKBGrammar.g:1:217: COMMA
- self.mCOMMA()
+ # XKBGrammar.g:1:70: T46
+ self.mT46()
elif alt5 == 17:
- # XKBGrammar.g:1:223: DQUOTE
- self.mDQUOTE()
+ # XKBGrammar.g:1:74: T47
+ self.mT47()
elif alt5 == 18:
- # XKBGrammar.g:1:230: MINUS
- self.mMINUS()
+ # XKBGrammar.g:1:78: T48
+ self.mT48()
elif alt5 == 19:
- # XKBGrammar.g:1:236: PLUS
- self.mPLUS()
+ # XKBGrammar.g:1:82: T49
+ self.mT49()
elif alt5 == 20:
- # XKBGrammar.g:1:241: SEMICOLON
- self.mSEMICOLON()
+ # XKBGrammar.g:1:86: T50
+ self.mT50()
elif alt5 == 21:
- # XKBGrammar.g:1:251: EQUAL
- self.mEQUAL()
-
-
-
- elif alt5 == 22:
- # XKBGrammar.g:1:257: LOWERTHAN
- self.mLOWERTHAN()
-
-
-
- elif alt5 == 23:
- # XKBGrammar.g:1:267: GREATERTHAN
- self.mGREATERTHAN()
-
-
-
- elif alt5 == 24:
- # XKBGrammar.g:1:279: DOT
- self.mDOT()
-
-
-
- elif alt5 == 25:
- # XKBGrammar.g:1:283: NAME
+ # XKBGrammar.g:1:90: NAME
self.mNAME()
- elif alt5 == 26:
- # XKBGrammar.g:1:288: WS
+ elif alt5 == 22:
+ # XKBGrammar.g:1:95: WS
self.mWS()
- elif alt5 == 27:
- # XKBGrammar.g:1:291: COMMENT
+ elif alt5 == 23:
+ # XKBGrammar.g:1:98: COMMENT
self.mCOMMENT()
- elif alt5 == 28:
- # XKBGrammar.g:1:299: LINE_COMMENT
+ elif alt5 == 24:
+ # XKBGrammar.g:1:106: LINE_COMMENT
self.mLINE_COMMENT()
diff --git a/XKBGrammar/XKBGrammarLexer.pyc b/XKBGrammar/XKBGrammarLexer.pyc
Binary files differ.
diff --git a/XKBGrammar/XKBGrammarParser.py b/XKBGrammar/XKBGrammarParser.py
@@ -1,4 +1,4 @@
-# $ANTLR 3.0.1 XKBGrammar.g 2008-05-09 21:53:06
+# $ANTLR 3.0.1 XKBGrammar.g 2008-05-14 21:55:38
from antlr3 import *
from antlr3.compat import set, frozenset
@@ -11,56 +11,47 @@ from antlr3.tree import *
HIDDEN = BaseRecognizer.HIDDEN
# token types
+QUOTEDSTRING=25
TOKEN_ALTERNATE_GROUP=9
-ATTRIBUTES=29
-SECTION=35
-LINE_COMMENT=41
-KEYCODE=34
+ATTRIBUTES=17
+SECTION=23
+LINE_COMMENT=30
+KEYCODE=22
TOKEN_INCLUDE=11
-KEY=32
-KEYTYPE=33
-ATTRIBUTE=30
+KEY=20
+KEYTYPE=21
+ATTRIBUTE=18
TOKEN_NAME=13
-DQUOTE=20
-LCURLY=17
-SEMICOLON=23
-MINUS=21
TOKEN_XKB_SYMBOLS=10
EOF=-1
-GENERIC_NAME=38
-SECTIONNAME=36
-MAPTYPE=28
-LBRACKET=15
-NAME=37
+SECTIONNAME=24
+MAPTYPE=15
+NAME=27
TOKEN_PARTIAL=6
-WS=39
+WS=28
TOKEN_ALPHANUMERIC_KEYS=7
TOKEN_HIDDEN=5
-COMMA=19
-LOWERTHAN=25
-INCLUDE=31
-EQUAL=24
-RCURLY=18
+MAPMATERIAL=16
+INCLUDE=19
TOKEN_MODIFIER_KEYS=8
-PLUS=22
+KEYSYMS=26
TOKEN_KEY=14
-RBRACKET=16
-COMMENT=40
-DOT=27
+COMMENT=29
TOKEN_DEFAULT=4
TOKEN_KEY_TYPE=12
-GREATERTHAN=26
# token names
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"TOKEN_DEFAULT", "TOKEN_HIDDEN", "TOKEN_PARTIAL", "TOKEN_ALPHANUMERIC_KEYS",
"TOKEN_MODIFIER_KEYS", "TOKEN_ALTERNATE_GROUP", "TOKEN_XKB_SYMBOLS",
- "TOKEN_INCLUDE", "TOKEN_KEY_TYPE", "TOKEN_NAME", "TOKEN_KEY", "LBRACKET",
- "RBRACKET", "LCURLY", "RCURLY", "COMMA", "DQUOTE", "MINUS", "PLUS",
- "SEMICOLON", "EQUAL", "LOWERTHAN", "GREATERTHAN", "DOT", "MAPTYPE",
- "ATTRIBUTES", "ATTRIBUTE", "INCLUDE", "KEY", "KEYTYPE", "KEYCODE", "SECTION",
- "SECTIONNAME", "NAME", "GENERIC_NAME", "WS", "COMMENT", "LINE_COMMENT"
+ "TOKEN_INCLUDE", "TOKEN_KEY_TYPE", "TOKEN_NAME", "TOKEN_KEY", "MAPTYPE",
+ "MAPMATERIAL", "ATTRIBUTES", "ATTRIBUTE", "INCLUDE", "KEY", "KEYTYPE",
+ "KEYCODE", "SECTION", "SECTIONNAME", "QUOTEDSTRING", "KEYSYMS", "NAME",
+ "WS", "COMMENT", "LINE_COMMENT", "'\"'", "'{'", "';'", "'}'", "'include'",
+ "'name'", "'['", "']'", "'='", "'key.type'", "'key'", "'<'", "'>'",
+ "','", "'default'", "'hidden'", "'partial'", "'alphanumeric_keys'",
+ "'alternate_group'", "'xkb_symbols'"
]
@@ -89,7 +80,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start layout
- # XKBGrammar.g:76:1: layout : ( section )+ ;
+ # XKBGrammar.g:61:1: layout : ( section )+ EOF ;
def layout(self, ):
retval = self.layout_return()
@@ -97,29 +88,31 @@ class XKBGrammarParser(Parser):
root_0 = None
+ EOF2 = None
section1 = None
+ EOF2_tree = None
try:
try:
- # XKBGrammar.g:77:2: ( ( section )+ )
- # XKBGrammar.g:77:4: ( section )+
+ # XKBGrammar.g:62:2: ( ( section )+ EOF )
+ # XKBGrammar.g:62:4: ( section )+ EOF
root_0 = self.adaptor.nil()
- # XKBGrammar.g:77:4: ( section )+
+ # XKBGrammar.g:62:4: ( section )+
cnt1 = 0
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
- if ((TOKEN_DEFAULT <= LA1_0 <= TOKEN_ALPHANUMERIC_KEYS) or (TOKEN_ALTERNATE_GROUP <= LA1_0 <= TOKEN_XKB_SYMBOLS)) :
+ if ((45 <= LA1_0 <= 50)) :
alt1 = 1
if alt1 == 1:
- # XKBGrammar.g:77:4: section
- self.following.append(self.FOLLOW_section_in_layout369)
+ # XKBGrammar.g:62:4: section
+ self.following.append(self.FOLLOW_section_in_layout160)
section1 = self.section()
self.following.pop()
@@ -136,6 +129,9 @@ class XKBGrammarParser(Parser):
cnt1 += 1
+ EOF2 = self.input.LT(1)
+ self.match(self.input, EOF, self.FOLLOW_EOF_in_layout163)
+
@@ -165,7 +161,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start section
- # XKBGrammar.g:80:1: section : mapType sectionmaterial ;
+ # XKBGrammar.g:65:1: section : mapType mapMaterial -> ^( SECTION mapType mapMaterial ) ;
def section(self, ):
retval = self.section_return()
@@ -173,31 +169,55 @@ class XKBGrammarParser(Parser):
root_0 = None
- mapType2 = None
-
- sectionmaterial3 = None
+ mapType3 = None
+ mapMaterial4 = None
+ stream_mapMaterial = RewriteRuleSubtreeStream(self.adaptor, "rule mapMaterial")
+ stream_mapType = RewriteRuleSubtreeStream(self.adaptor, "rule mapType")
try:
try:
- # XKBGrammar.g:81:2: ( mapType sectionmaterial )
- # XKBGrammar.g:81:4: mapType sectionmaterial
- root_0 = self.adaptor.nil()
-
- self.following.append(self.FOLLOW_mapType_in_section383)
- mapType2 = self.mapType()
+ # XKBGrammar.g:66:2: ( mapType mapMaterial -> ^( SECTION mapType mapMaterial ) )
+ # XKBGrammar.g:66:4: mapType mapMaterial
+ self.following.append(self.FOLLOW_mapType_in_section177)
+ mapType3 = self.mapType()
self.following.pop()
- self.adaptor.addChild(root_0, mapType2.tree)
- self.following.append(self.FOLLOW_sectionmaterial_in_section385)
- sectionmaterial3 = self.sectionmaterial()
+ stream_mapType.add(mapType3.tree)
+ self.following.append(self.FOLLOW_mapMaterial_in_section179)
+ mapMaterial4 = self.mapMaterial()
self.following.pop()
- self.adaptor.addChild(root_0, sectionmaterial3.tree)
- #action start
- print '}'
- #action end
+ stream_mapMaterial.add(mapMaterial4.tree)
+ # AST Rewrite
+ # elements: mapType, mapMaterial
+ # token labels:
+ # rule labels: retval
+ # token list labels:
+ # rule list labels:
+
+ retval.tree = root_0
+
+ if retval is not None:
+ stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
+ else:
+ stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", None)
+
+
+ root_0 = self.adaptor.nil()
+ # 67:2: -> ^( SECTION mapType mapMaterial )
+ # XKBGrammar.g:67:5: ^( SECTION mapType mapMaterial )
+ root_1 = self.adaptor.nil()
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(SECTION, "SECTION"), root_1)
+
+ self.adaptor.addChild(root_1, stream_mapType.next())
+ self.adaptor.addChild(root_1, stream_mapMaterial.next())
+
+ self.adaptor.addChild(root_0, root_1)
+
+
+
@@ -227,7 +247,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start mapType
- # XKBGrammar.g:85:1: mapType : ( mapOptions )+ sectionname= quotedstring -> ^( MAPTYPE ( mapOptions )+ $sectionname) ;
+ # XKBGrammar.g:70:1: mapType : ( mapOptions )+ '\"' NAME '\"' -> ^( MAPTYPE ( mapOptions )+ NAME ) ;
def mapType(self, ):
retval = self.mapType_return()
@@ -235,34 +255,39 @@ class XKBGrammarParser(Parser):
root_0 = None
- sectionname = None
-
- mapOptions4 = None
+ char_literal6 = None
+ NAME7 = None
+ char_literal8 = None
+ mapOptions5 = None
- stream_quotedstring = RewriteRuleSubtreeStream(self.adaptor, "rule quotedstring")
+ char_literal6_tree = None
+ NAME7_tree = None
+ char_literal8_tree = None
+ stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
+ stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
stream_mapOptions = RewriteRuleSubtreeStream(self.adaptor, "rule mapOptions")
try:
try:
- # XKBGrammar.g:86:2: ( ( mapOptions )+ sectionname= quotedstring -> ^( MAPTYPE ( mapOptions )+ $sectionname) )
- # XKBGrammar.g:86:4: ( mapOptions )+ sectionname= quotedstring
- # XKBGrammar.g:86:4: ( mapOptions )+
+ # XKBGrammar.g:71:2: ( ( mapOptions )+ '\"' NAME '\"' -> ^( MAPTYPE ( mapOptions )+ NAME ) )
+ # XKBGrammar.g:71:4: ( mapOptions )+ '\"' NAME '\"'
+ # XKBGrammar.g:71:4: ( mapOptions )+
cnt2 = 0
while True: #loop2
alt2 = 2
LA2_0 = self.input.LA(1)
- if ((TOKEN_DEFAULT <= LA2_0 <= TOKEN_ALPHANUMERIC_KEYS) or (TOKEN_ALTERNATE_GROUP <= LA2_0 <= TOKEN_XKB_SYMBOLS)) :
+ if ((45 <= LA2_0 <= 50)) :
alt2 = 1
if alt2 == 1:
- # XKBGrammar.g:86:4: mapOptions
- self.following.append(self.FOLLOW_mapOptions_in_mapType401)
- mapOptions4 = self.mapOptions()
+ # XKBGrammar.g:71:4: mapOptions
+ self.following.append(self.FOLLOW_mapOptions_in_mapType202)
+ mapOptions5 = self.mapOptions()
self.following.pop()
- stream_mapOptions.add(mapOptions4.tree)
+ stream_mapOptions.add(mapOptions5.tree)
else:
@@ -275,18 +300,22 @@ class XKBGrammarParser(Parser):
cnt2 += 1
- self.following.append(self.FOLLOW_quotedstring_in_mapType406)
- sectionname = self.quotedstring()
- self.following.pop()
+ char_literal6 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_mapType205)
+
+ stream_31.add(char_literal6)
+ NAME7 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_mapType207)
- stream_quotedstring.add(sectionname.tree)
- #action start
- print '%(sectionname)s {' % { "sectionname": self.input.toString(sectionname.start,sectionname.stop) }
- #action end
+ stream_NAME.add(NAME7)
+ char_literal8 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_mapType209)
+
+ stream_31.add(char_literal8)
# AST Rewrite
- # elements: mapOptions, sectionname
+ # elements: mapOptions, NAME
# token labels:
- # rule labels: retval, sectionname
+ # rule labels: retval
# token list labels:
# rule list labels:
@@ -298,19 +327,13 @@ class XKBGrammarParser(Parser):
stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", None)
- if sectionname is not None:
- stream_sectionname = RewriteRuleSubtreeStream(self.adaptor, "token sectionname", sectionname.tree)
- else:
- stream_sectionname = RewriteRuleSubtreeStream(self.adaptor, "token sectionname", None)
-
-
root_0 = self.adaptor.nil()
- # 88:2: -> ^( MAPTYPE ( mapOptions )+ $sectionname)
- # XKBGrammar.g:88:5: ^( MAPTYPE ( mapOptions )+ $sectionname)
+ # 72:2: -> ^( MAPTYPE ( mapOptions )+ NAME )
+ # XKBGrammar.g:72:5: ^( MAPTYPE ( mapOptions )+ NAME )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(MAPTYPE, "MAPTYPE"), root_1)
- # XKBGrammar.g:88:15: ( mapOptions )+
+ # XKBGrammar.g:72:15: ( mapOptions )+
if not (stream_mapOptions.hasNext()):
raise RewriteEarlyExitException()
@@ -319,7 +342,7 @@ class XKBGrammarParser(Parser):
stream_mapOptions.reset()
- self.adaptor.addChild(root_1, stream_sectionname.next())
+ self.adaptor.addChild(root_1, stream_NAME.next())
self.adaptor.addChild(root_0, root_1)
@@ -345,263 +368,140 @@ class XKBGrammarParser(Parser):
# $ANTLR end mapType
- class quotedstring_return(object):
+ class mapMaterial_return(object):
def __init__(self):
self.start = None
self.stop = None
- self.value = None
self.tree = None
- # $ANTLR start quotedstring
- # XKBGrammar.g:91:1: quotedstring returns [value] : DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE ;
- def quotedstring(self, ):
+ # $ANTLR start mapMaterial
+ # XKBGrammar.g:75:1: mapMaterial : '{' ( line_include | line_name ';' | line_keytype ';' | line_key ';' )+ '}' ';' ;
+ def mapMaterial(self, ):
- retval = self.quotedstring_return()
+ retval = self.mapMaterial_return()
retval.start = self.input.LT(1)
root_0 = None
- DQUOTE5 = None
- DQUOTE6 = None
- sectionname = None
- list_sectionname = None
+ char_literal9 = None
+ char_literal12 = None
+ char_literal14 = None
+ char_literal16 = None
+ char_literal17 = None
+ char_literal18 = None
+ line_include10 = None
- DQUOTE5_tree = None
- DQUOTE6_tree = None
- sectionname_tree = None
+ line_name11 = None
+
+ line_keytype13 = None
+
+ line_key15 = None
+
+
+ char_literal9_tree = None
+ char_literal12_tree = None
+ char_literal14_tree = None
+ char_literal16_tree = None
+ char_literal17_tree = None
+ char_literal18_tree = None
try:
try:
- # XKBGrammar.g:92:9: ( DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE )
- # XKBGrammar.g:92:11: DQUOTE (sectionname+=~ ( DQUOTE ) )+ DQUOTE
+ # XKBGrammar.g:76:2: ( '{' ( line_include | line_name ';' | line_keytype ';' | line_key ';' )+ '}' ';' )
+ # XKBGrammar.g:76:4: '{' ( line_include | line_name ';' | line_keytype ';' | line_key ';' )+ '}' ';'
root_0 = self.adaptor.nil()
- DQUOTE5 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_quotedstring444)
+ char_literal9 = self.input.LT(1)
+ self.match(self.input, 32, self.FOLLOW_32_in_mapMaterial233)
- DQUOTE5_tree = self.adaptor.createWithPayload(DQUOTE5)
- self.adaptor.addChild(root_0, DQUOTE5_tree)
+ char_literal9_tree = self.adaptor.createWithPayload(char_literal9)
+ self.adaptor.addChild(root_0, char_literal9_tree)
- # XKBGrammar.g:92:29: (sectionname+=~ ( DQUOTE ) )+
+ # XKBGrammar.g:77:2: ( line_include | line_name ';' | line_keytype ';' | line_key ';' )+
cnt3 = 0
while True: #loop3
- alt3 = 2
- LA3_0 = self.input.LA(1)
-
- if ((TOKEN_DEFAULT <= LA3_0 <= COMMA) or (MINUS <= LA3_0 <= LINE_COMMENT)) :
+ alt3 = 5
+ LA3 = self.input.LA(1)
+ if LA3 == 35:
alt3 = 1
-
+ elif LA3 == 36:
+ alt3 = 2
+ elif LA3 == 40:
+ alt3 = 3
+ elif LA3 == 41:
+ alt3 = 4
if alt3 == 1:
- # XKBGrammar.g:92:29: sectionname+=~ ( DQUOTE )
- sectionname = self.input.LT(1)
- if (TOKEN_DEFAULT <= self.input.LA(1) <= COMMA) or (MINUS <= self.input.LA(1) <= LINE_COMMENT):
- self.input.consume();
- self.adaptor.addChild(root_0, self.adaptor.createWithPayload(sectionname))
- self.errorRecovery = False
-
- else:
- mse = MismatchedSetException(None, self.input)
- self.recoverFromMismatchedSet(
- self.input, mse, self.FOLLOW_set_in_quotedstring448
- )
- raise mse
-
-
- if list_sectionname is None:
- list_sectionname = []
- list_sectionname.append(sectionname)
-
-
-
- else:
- if cnt3 >= 1:
- break #loop3
-
- eee = EarlyExitException(3, self.input)
- raise eee
-
- cnt3 += 1
-
-
- DQUOTE6 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_quotedstring454)
-
-
- DQUOTE6_tree = self.adaptor.createWithPayload(DQUOTE6)
- self.adaptor.addChild(root_0, DQUOTE6_tree)
-
- #action start
-
- qstring = ['"']
- for elem in list_sectionname:
- qstring.append(elem.getText())
- qstring.append('"')
- retval.value = "".join(qstring)
-
- #action end
-
-
-
- retval.stop = self.input.LT(-1)
-
-
- retval.tree = self.adaptor.rulePostProcessing(root_0)
- self.adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
-
- except RecognitionException, re:
- self.reportError(re)
- self.recover(self.input, re)
- finally:
-
- pass
-
- return retval
-
- # $ANTLR end quotedstring
-
- class sectionmaterial_return(object):
- def __init__(self):
- self.start = None
- self.stop = None
-
- self.tree = None
-
-
- # $ANTLR start sectionmaterial
- # XKBGrammar.g:102:1: sectionmaterial : lc= LCURLY ( line_include | line_name | line_keytype | line_key )+ RCURLY SEMICOLON -> ^( SECTION ) ;
- def sectionmaterial(self, ):
-
- retval = self.sectionmaterial_return()
- retval.start = self.input.LT(1)
-
- root_0 = None
+ # XKBGrammar.g:77:4: line_include
+ self.following.append(self.FOLLOW_line_include_in_mapMaterial239)
+ line_include10 = self.line_include()
+ self.following.pop()
- lc = None
- RCURLY11 = None
- SEMICOLON12 = None
- line_include7 = None
+ self.adaptor.addChild(root_0, line_include10.tree)
- line_name8 = None
- line_keytype9 = None
+ elif alt3 == 2:
+ # XKBGrammar.g:78:4: line_name ';'
+ self.following.append(self.FOLLOW_line_name_in_mapMaterial245)
+ line_name11 = self.line_name()
+ self.following.pop()
- line_key10 = None
+ self.adaptor.addChild(root_0, line_name11.tree)
+ char_literal12 = self.input.LT(1)
+ self.match(self.input, 33, self.FOLLOW_33_in_mapMaterial247)
- lc_tree = None
- RCURLY11_tree = None
- SEMICOLON12_tree = None
- stream_LCURLY = RewriteRuleTokenStream(self.adaptor, "token LCURLY")
- stream_SEMICOLON = RewriteRuleTokenStream(self.adaptor, "token SEMICOLON")
- stream_RCURLY = RewriteRuleTokenStream(self.adaptor, "token RCURLY")
- stream_line_name = RewriteRuleSubtreeStream(self.adaptor, "rule line_name")
- stream_line_include = RewriteRuleSubtreeStream(self.adaptor, "rule line_include")
- stream_line_keytype = RewriteRuleSubtreeStream(self.adaptor, "rule line_keytype")
- stream_line_key = RewriteRuleSubtreeStream(self.adaptor, "rule line_key")
- try:
- try:
- # XKBGrammar.g:103:2: (lc= LCURLY ( line_include | line_name | line_keytype | line_key )+ RCURLY SEMICOLON -> ^( SECTION ) )
- # XKBGrammar.g:103:4: lc= LCURLY ( line_include | line_name | line_keytype | line_key )+ RCURLY SEMICOLON
- lc = self.input.LT(1)
- self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_sectionmaterial486)
-
- stream_LCURLY.add(lc)
- # XKBGrammar.g:103:14: ( line_include | line_name | line_keytype | line_key )+
- cnt4 = 0
- while True: #loop4
- alt4 = 5
- LA4 = self.input.LA(1)
- if LA4 == TOKEN_INCLUDE:
- alt4 = 1
- elif LA4 == TOKEN_NAME:
- alt4 = 2
- elif LA4 == TOKEN_KEY_TYPE:
- alt4 = 3
- elif LA4 == TOKEN_KEY:
- alt4 = 4
- if alt4 == 1:
- # XKBGrammar.g:103:15: line_include
- self.following.append(self.FOLLOW_line_include_in_sectionmaterial489)
- line_include7 = self.line_include()
+ elif alt3 == 3:
+ # XKBGrammar.g:79:4: line_keytype ';'
+ self.following.append(self.FOLLOW_line_keytype_in_mapMaterial253)
+ line_keytype13 = self.line_keytype()
self.following.pop()
- stream_line_include.add(line_include7.tree)
-
-
- elif alt4 == 2:
- # XKBGrammar.g:104:4: line_name
- self.following.append(self.FOLLOW_line_name_in_sectionmaterial495)
- line_name8 = self.line_name()
- self.following.pop()
+ self.adaptor.addChild(root_0, line_keytype13.tree)
+ char_literal14 = self.input.LT(1)
+ self.match(self.input, 33, self.FOLLOW_33_in_mapMaterial255)
- stream_line_name.add(line_name8.tree)
- elif alt4 == 3:
- # XKBGrammar.g:105:4: line_keytype
- self.following.append(self.FOLLOW_line_keytype_in_sectionmaterial501)
- line_keytype9 = self.line_keytype()
+ elif alt3 == 4:
+ # XKBGrammar.g:80:4: line_key ';'
+ self.following.append(self.FOLLOW_line_key_in_mapMaterial261)
+ line_key15 = self.line_key()
self.following.pop()
- stream_line_keytype.add(line_keytype9.tree)
-
-
- elif alt4 == 4:
- # XKBGrammar.g:106:4: line_key
- self.following.append(self.FOLLOW_line_key_in_sectionmaterial507)
- line_key10 = self.line_key()
- self.following.pop()
+ self.adaptor.addChild(root_0, line_key15.tree)
+ char_literal16 = self.input.LT(1)
+ self.match(self.input, 33, self.FOLLOW_33_in_mapMaterial263)
- stream_line_key.add(line_key10.tree)
else:
- if cnt4 >= 1:
- break #loop4
+ if cnt3 >= 1:
+ break #loop3
- eee = EarlyExitException(4, self.input)
+ eee = EarlyExitException(3, self.input)
raise eee
- cnt4 += 1
-
-
- RCURLY11 = self.input.LT(1)
- self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_sectionmaterial514)
-
- stream_RCURLY.add(RCURLY11)
- SEMICOLON12 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_sectionmaterial516)
-
- stream_SEMICOLON.add(SEMICOLON12)
- # AST Rewrite
- # elements:
- # token labels:
- # rule labels: retval
- # token list labels:
- # rule list labels:
+ cnt3 += 1
- retval.tree = root_0
- if retval is not None:
- stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
- else:
- stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", None)
+ char_literal17 = self.input.LT(1)
+ self.match(self.input, 34, self.FOLLOW_34_in_mapMaterial270)
- root_0 = self.adaptor.nil()
- # 108:2: -> ^( SECTION )
- # XKBGrammar.g:108:5: ^( SECTION )
- root_1 = self.adaptor.nil()
- root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(SECTION, "SECTION"), root_1)
+ char_literal17_tree = self.adaptor.createWithPayload(char_literal17)
+ self.adaptor.addChild(root_0, char_literal17_tree)
- self.adaptor.addChild(root_0, root_1)
+ char_literal18 = self.input.LT(1)
+ self.match(self.input, 33, self.FOLLOW_33_in_mapMaterial272)
+ char_literal18_tree = self.adaptor.createWithPayload(char_literal18)
+ self.adaptor.addChild(root_0, char_literal18_tree)
@@ -621,7 +521,7 @@ class XKBGrammarParser(Parser):
return retval
- # $ANTLR end sectionmaterial
+ # $ANTLR end mapMaterial
class line_include_return(object):
def __init__(self):
@@ -632,7 +532,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_include
- # XKBGrammar.g:111:1: line_include : TOKEN_INCLUDE include= quotedstring -> ^( TOKEN_INCLUDE $include) ;
+ # XKBGrammar.g:84:1: line_include : 'include' '\"' NAME '\"' -> ^( TOKEN_INCLUDE NAME ) ;
def line_include(self, ):
retval = self.line_include_return()
@@ -640,33 +540,43 @@ class XKBGrammarParser(Parser):
root_0 = None
- TOKEN_INCLUDE13 = None
- include = None
+ string_literal19 = None
+ char_literal20 = None
+ NAME21 = None
+ char_literal22 = None
+ string_literal19_tree = None
+ char_literal20_tree = None
+ NAME21_tree = None
+ char_literal22_tree = None
+ stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
+ stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
+ stream_35 = RewriteRuleTokenStream(self.adaptor, "token 35")
- TOKEN_INCLUDE13_tree = None
- stream_TOKEN_INCLUDE = RewriteRuleTokenStream(self.adaptor, "token TOKEN_INCLUDE")
- stream_quotedstring = RewriteRuleSubtreeStream(self.adaptor, "rule quotedstring")
try:
try:
- # XKBGrammar.g:112:2: ( TOKEN_INCLUDE include= quotedstring -> ^( TOKEN_INCLUDE $include) )
- # XKBGrammar.g:112:4: TOKEN_INCLUDE include= quotedstring
- TOKEN_INCLUDE13 = self.input.LT(1)
- self.match(self.input, TOKEN_INCLUDE, self.FOLLOW_TOKEN_INCLUDE_in_line_include534)
-
- stream_TOKEN_INCLUDE.add(TOKEN_INCLUDE13)
- self.following.append(self.FOLLOW_quotedstring_in_line_include538)
- include = self.quotedstring()
- self.following.pop()
+ # XKBGrammar.g:85:2: ( 'include' '\"' NAME '\"' -> ^( TOKEN_INCLUDE NAME ) )
+ # XKBGrammar.g:85:4: 'include' '\"' NAME '\"'
+ string_literal19 = self.input.LT(1)
+ self.match(self.input, 35, self.FOLLOW_35_in_line_include283)
+
+ stream_35.add(string_literal19)
+ char_literal20 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_include285)
- stream_quotedstring.add(include.tree)
- #action start
- print '\tinclude %(inc)s' % { "inc": self.input.toString(include.start,include.stop) }
- #action end
+ stream_31.add(char_literal20)
+ NAME21 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_include287)
+
+ stream_NAME.add(NAME21)
+ char_literal22 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_include289)
+
+ stream_31.add(char_literal22)
# AST Rewrite
- # elements: TOKEN_INCLUDE, include
+ # elements: NAME
# token labels:
- # rule labels: retval, include
+ # rule labels: retval
# token list labels:
# rule list labels:
@@ -678,19 +588,13 @@ class XKBGrammarParser(Parser):
stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", None)
- if include is not None:
- stream_include = RewriteRuleSubtreeStream(self.adaptor, "token include", include.tree)
- else:
- stream_include = RewriteRuleSubtreeStream(self.adaptor, "token include", None)
-
-
root_0 = self.adaptor.nil()
- # 114:2: -> ^( TOKEN_INCLUDE $include)
- # XKBGrammar.g:114:5: ^( TOKEN_INCLUDE $include)
+ # 86:2: -> ^( TOKEN_INCLUDE NAME )
+ # XKBGrammar.g:86:5: ^( TOKEN_INCLUDE NAME )
root_1 = self.adaptor.nil()
- root_1 = self.adaptor.becomeRoot(stream_TOKEN_INCLUDE.next(), root_1)
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_INCLUDE, "TOKEN_INCLUDE"), root_1)
- self.adaptor.addChild(root_1, stream_include.next())
+ self.adaptor.addChild(root_1, stream_NAME.next())
self.adaptor.addChild(root_0, root_1)
@@ -725,7 +629,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_name
- # XKBGrammar.g:117:1: line_name : TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON -> ^( TOKEN_NAME $name $nameval) ;
+ # XKBGrammar.g:89:1: line_name : 'name' '[' n1= NAME ']' '=' '\"' n2= NAME '\"' -> ^( TOKEN_NAME $n1 $n2) ;
def line_name(self, ):
retval = self.line_name_return()
@@ -733,79 +637,76 @@ class XKBGrammarParser(Parser):
root_0 = None
- name = None
- TOKEN_NAME14 = None
- LBRACKET15 = None
- RBRACKET16 = None
- EQUAL17 = None
- SEMICOLON18 = None
- nameval = None
-
-
- name_tree = None
- TOKEN_NAME14_tree = None
- LBRACKET15_tree = None
- RBRACKET16_tree = None
- EQUAL17_tree = None
- SEMICOLON18_tree = None
- stream_LBRACKET = RewriteRuleTokenStream(self.adaptor, "token LBRACKET")
- stream_TOKEN_NAME = RewriteRuleTokenStream(self.adaptor, "token TOKEN_NAME")
+ n1 = None
+ n2 = None
+ string_literal23 = None
+ char_literal24 = None
+ char_literal25 = None
+ char_literal26 = None
+ char_literal27 = None
+ char_literal28 = None
+
+ n1_tree = None
+ n2_tree = None
+ string_literal23_tree = None
+ char_literal24_tree = None
+ char_literal25_tree = None
+ char_literal26_tree = None
+ char_literal27_tree = None
+ char_literal28_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
- stream_SEMICOLON = RewriteRuleTokenStream(self.adaptor, "token SEMICOLON")
- stream_RBRACKET = RewriteRuleTokenStream(self.adaptor, "token RBRACKET")
- stream_EQUAL = RewriteRuleTokenStream(self.adaptor, "token EQUAL")
- stream_quotedstring = RewriteRuleSubtreeStream(self.adaptor, "rule quotedstring")
+ stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
+ stream_36 = RewriteRuleTokenStream(self.adaptor, "token 36")
+ stream_39 = RewriteRuleTokenStream(self.adaptor, "token 39")
+ stream_37 = RewriteRuleTokenStream(self.adaptor, "token 37")
+ stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
+
try:
try:
- # XKBGrammar.g:118:2: ( TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON -> ^( TOKEN_NAME $name $nameval) )
- # XKBGrammar.g:118:4: TOKEN_NAME LBRACKET name= NAME RBRACKET EQUAL nameval= quotedstring SEMICOLON
- TOKEN_NAME14 = self.input.LT(1)
- self.match(self.input, TOKEN_NAME, self.FOLLOW_TOKEN_NAME_in_line_name563)
-
- stream_TOKEN_NAME.add(TOKEN_NAME14)
- LBRACKET15 = self.input.LT(1)
- self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_line_name565)
-
- stream_LBRACKET.add(LBRACKET15)
- name = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name569)
-
- stream_NAME.add(name)
- RBRACKET16 = self.input.LT(1)
- self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_line_name571)
-
- stream_RBRACKET.add(RBRACKET16)
- EQUAL17 = self.input.LT(1)
- self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_line_name573)
-
- stream_EQUAL.add(EQUAL17)
- self.following.append(self.FOLLOW_quotedstring_in_line_name577)
- nameval = self.quotedstring()
- self.following.pop()
+ # XKBGrammar.g:90:2: ( 'name' '[' n1= NAME ']' '=' '\"' n2= NAME '\"' -> ^( TOKEN_NAME $n1 $n2) )
+ # XKBGrammar.g:90:4: 'name' '[' n1= NAME ']' '=' '\"' n2= NAME '\"'
+ string_literal23 = self.input.LT(1)
+ self.match(self.input, 36, self.FOLLOW_36_in_line_name309)
+
+ stream_36.add(string_literal23)
+ char_literal24 = self.input.LT(1)
+ self.match(self.input, 37, self.FOLLOW_37_in_line_name311)
+
+ stream_37.add(char_literal24)
+ n1 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name315)
+
+ stream_NAME.add(n1)
+ char_literal25 = self.input.LT(1)
+ self.match(self.input, 38, self.FOLLOW_38_in_line_name317)
+
+ stream_38.add(char_literal25)
+ char_literal26 = self.input.LT(1)
+ self.match(self.input, 39, self.FOLLOW_39_in_line_name319)
- stream_quotedstring.add(nameval.tree)
- SEMICOLON18 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_name579)
+ stream_39.add(char_literal26)
+ char_literal27 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_name321)
- stream_SEMICOLON.add(SEMICOLON18)
- #action start
- print '\tname[%(name)s] = %(nameval)s;' % { "name": name.text, "nameval": self.input.toString(nameval.start,nameval.stop) }
- #action end
+ stream_31.add(char_literal27)
+ n2 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name325)
+
+ stream_NAME.add(n2)
+ char_literal28 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_name327)
+
+ stream_31.add(char_literal28)
# AST Rewrite
- # elements: nameval, name, TOKEN_NAME
- # token labels: name
- # rule labels: nameval, retval
+ # elements: n2, n1
+ # token labels: n1, n2
+ # rule labels: retval
# token list labels:
# rule list labels:
retval.tree = root_0
- stream_name = RewriteRuleTokenStream(self.adaptor, "token name", name)
-
- if nameval is not None:
- stream_nameval = RewriteRuleSubtreeStream(self.adaptor, "token nameval", nameval.tree)
- else:
- stream_nameval = RewriteRuleSubtreeStream(self.adaptor, "token nameval", None)
-
+ stream_n1 = RewriteRuleTokenStream(self.adaptor, "token n1", n1)
+ stream_n2 = RewriteRuleTokenStream(self.adaptor, "token n2", n2)
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
@@ -814,13 +715,13 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 120:2: -> ^( TOKEN_NAME $name $nameval)
- # XKBGrammar.g:120:5: ^( TOKEN_NAME $name $nameval)
+ # 91:2: -> ^( TOKEN_NAME $n1 $n2)
+ # XKBGrammar.g:91:5: ^( TOKEN_NAME $n1 $n2)
root_1 = self.adaptor.nil()
- root_1 = self.adaptor.becomeRoot(stream_TOKEN_NAME.next(), root_1)
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_NAME, "TOKEN_NAME"), root_1)
- self.adaptor.addChild(root_1, stream_name.next())
- self.adaptor.addChild(root_1, stream_nameval.next())
+ self.adaptor.addChild(root_1, stream_n1.next())
+ self.adaptor.addChild(root_1, stream_n2.next())
self.adaptor.addChild(root_0, root_1)
@@ -855,7 +756,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_keytype
- # XKBGrammar.g:123:1: line_keytype : TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON -> ^( TOKEN_KEY_TYPE $keytype $keytypevalue) ;
+ # XKBGrammar.g:94:1: line_keytype : 'key.type' '[' n1= NAME ']' '=' '\"' n2= NAME '\"' -> ^( TOKEN_KEY_TYPE $n1 $n2) ;
def line_keytype(self, ):
retval = self.line_keytype_return()
@@ -863,86 +764,76 @@ class XKBGrammarParser(Parser):
root_0 = None
- keytype = None
- keytypevalue = None
- TOKEN_KEY_TYPE19 = None
- LBRACKET20 = None
- RBRACKET21 = None
- EQUAL22 = None
- DQUOTE23 = None
- DQUOTE24 = None
- SEMICOLON25 = None
-
- keytype_tree = None
- keytypevalue_tree = None
- TOKEN_KEY_TYPE19_tree = None
- LBRACKET20_tree = None
- RBRACKET21_tree = None
- EQUAL22_tree = None
- DQUOTE23_tree = None
- DQUOTE24_tree = None
- SEMICOLON25_tree = None
- stream_LBRACKET = RewriteRuleTokenStream(self.adaptor, "token LBRACKET")
- stream_DQUOTE = RewriteRuleTokenStream(self.adaptor, "token DQUOTE")
+ n1 = None
+ n2 = None
+ string_literal29 = None
+ char_literal30 = None
+ char_literal31 = None
+ char_literal32 = None
+ char_literal33 = None
+ char_literal34 = None
+
+ n1_tree = None
+ n2_tree = None
+ string_literal29_tree = None
+ char_literal30_tree = None
+ char_literal31_tree = None
+ char_literal32_tree = None
+ char_literal33_tree = None
+ char_literal34_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
- stream_SEMICOLON = RewriteRuleTokenStream(self.adaptor, "token SEMICOLON")
- stream_RBRACKET = RewriteRuleTokenStream(self.adaptor, "token RBRACKET")
- stream_EQUAL = RewriteRuleTokenStream(self.adaptor, "token EQUAL")
- stream_TOKEN_KEY_TYPE = RewriteRuleTokenStream(self.adaptor, "token TOKEN_KEY_TYPE")
+ stream_40 = RewriteRuleTokenStream(self.adaptor, "token 40")
+ stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
+ stream_39 = RewriteRuleTokenStream(self.adaptor, "token 39")
+ stream_37 = RewriteRuleTokenStream(self.adaptor, "token 37")
+ stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
try:
try:
- # XKBGrammar.g:124:2: ( TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON -> ^( TOKEN_KEY_TYPE $keytype $keytypevalue) )
- # XKBGrammar.g:124:4: TOKEN_KEY_TYPE LBRACKET keytype= NAME RBRACKET EQUAL DQUOTE keytypevalue= NAME DQUOTE SEMICOLON
- TOKEN_KEY_TYPE19 = self.input.LT(1)
- self.match(self.input, TOKEN_KEY_TYPE, self.FOLLOW_TOKEN_KEY_TYPE_in_line_keytype606)
-
- stream_TOKEN_KEY_TYPE.add(TOKEN_KEY_TYPE19)
- LBRACKET20 = self.input.LT(1)
- self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_line_keytype608)
-
- stream_LBRACKET.add(LBRACKET20)
- keytype = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype612)
-
- stream_NAME.add(keytype)
- RBRACKET21 = self.input.LT(1)
- self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_line_keytype614)
-
- stream_RBRACKET.add(RBRACKET21)
- EQUAL22 = self.input.LT(1)
- self.match(self.input, EQUAL, self.FOLLOW_EQUAL_in_line_keytype616)
-
- stream_EQUAL.add(EQUAL22)
- DQUOTE23 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_line_keytype618)
-
- stream_DQUOTE.add(DQUOTE23)
- keytypevalue = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype622)
-
- stream_NAME.add(keytypevalue)
- DQUOTE24 = self.input.LT(1)
- self.match(self.input, DQUOTE, self.FOLLOW_DQUOTE_in_line_keytype624)
-
- stream_DQUOTE.add(DQUOTE24)
- SEMICOLON25 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_keytype626)
-
- stream_SEMICOLON.add(SEMICOLON25)
- #action start
- print '\tkey.type[%(kt)s] = \"%(ktv)s\";' % { "kt": keytype.text, "ktv": keytypevalue.text }
- #action end
+ # XKBGrammar.g:95:2: ( 'key.type' '[' n1= NAME ']' '=' '\"' n2= NAME '\"' -> ^( TOKEN_KEY_TYPE $n1 $n2) )
+ # XKBGrammar.g:95:4: 'key.type' '[' n1= NAME ']' '=' '\"' n2= NAME '\"'
+ string_literal29 = self.input.LT(1)
+ self.match(self.input, 40, self.FOLLOW_40_in_line_keytype351)
+
+ stream_40.add(string_literal29)
+ char_literal30 = self.input.LT(1)
+ self.match(self.input, 37, self.FOLLOW_37_in_line_keytype353)
+
+ stream_37.add(char_literal30)
+ n1 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype357)
+
+ stream_NAME.add(n1)
+ char_literal31 = self.input.LT(1)
+ self.match(self.input, 38, self.FOLLOW_38_in_line_keytype359)
+
+ stream_38.add(char_literal31)
+ char_literal32 = self.input.LT(1)
+ self.match(self.input, 39, self.FOLLOW_39_in_line_keytype361)
+
+ stream_39.add(char_literal32)
+ char_literal33 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_keytype363)
+
+ stream_31.add(char_literal33)
+ n2 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype367)
+
+ stream_NAME.add(n2)
+ char_literal34 = self.input.LT(1)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_keytype369)
+
+ stream_31.add(char_literal34)
# AST Rewrite
- # elements: keytypevalue, keytype, TOKEN_KEY_TYPE
- # token labels: keytype, keytypevalue
+ # elements: n2, n1
+ # token labels: n1, n2
# rule labels: retval
# token list labels:
# rule list labels:
retval.tree = root_0
- stream_keytype = RewriteRuleTokenStream(self.adaptor, "token keytype", keytype)
- stream_keytypevalue = RewriteRuleTokenStream(self.adaptor, "token keytypevalue", keytypevalue)
+ stream_n1 = RewriteRuleTokenStream(self.adaptor, "token n1", n1)
+ stream_n2 = RewriteRuleTokenStream(self.adaptor, "token n2", n2)
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
@@ -951,13 +842,13 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 126:2: -> ^( TOKEN_KEY_TYPE $keytype $keytypevalue)
- # XKBGrammar.g:126:5: ^( TOKEN_KEY_TYPE $keytype $keytypevalue)
+ # 96:2: -> ^( TOKEN_KEY_TYPE $n1 $n2)
+ # XKBGrammar.g:96:5: ^( TOKEN_KEY_TYPE $n1 $n2)
root_1 = self.adaptor.nil()
- root_1 = self.adaptor.becomeRoot(stream_TOKEN_KEY_TYPE.next(), root_1)
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_KEY_TYPE, "TOKEN_KEY_TYPE"), root_1)
- self.adaptor.addChild(root_1, stream_keytype.next())
- self.adaptor.addChild(root_1, stream_keytypevalue.next())
+ self.adaptor.addChild(root_1, stream_n1.next())
+ self.adaptor.addChild(root_1, stream_n2.next())
self.adaptor.addChild(root_0, root_1)
@@ -992,7 +883,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_key
- # XKBGrammar.g:129:1: line_key : TOKEN_KEY keycode keysyms SEMICOLON -> ^( TOKEN_KEY keycode keysyms ) ;
+ # XKBGrammar.g:99:1: line_key : 'key' keycode keysyms -> ^( TOKEN_KEY keycode keysyms ) ;
def line_key(self, ):
retval = self.line_key_return()
@@ -1000,46 +891,36 @@ class XKBGrammarParser(Parser):
root_0 = None
- TOKEN_KEY26 = None
- SEMICOLON29 = None
- keycode27 = None
+ string_literal35 = None
+ keycode36 = None
- keysyms28 = None
+ keysyms37 = None
- TOKEN_KEY26_tree = None
- SEMICOLON29_tree = None
- stream_SEMICOLON = RewriteRuleTokenStream(self.adaptor, "token SEMICOLON")
- stream_TOKEN_KEY = RewriteRuleTokenStream(self.adaptor, "token TOKEN_KEY")
+ string_literal35_tree = None
+ stream_41 = RewriteRuleTokenStream(self.adaptor, "token 41")
stream_keysyms = RewriteRuleSubtreeStream(self.adaptor, "rule keysyms")
stream_keycode = RewriteRuleSubtreeStream(self.adaptor, "rule keycode")
try:
try:
- # XKBGrammar.g:130:2: ( TOKEN_KEY keycode keysyms SEMICOLON -> ^( TOKEN_KEY keycode keysyms ) )
- # XKBGrammar.g:130:4: TOKEN_KEY keycode keysyms SEMICOLON
- TOKEN_KEY26 = self.input.LT(1)
- self.match(self.input, TOKEN_KEY, self.FOLLOW_TOKEN_KEY_in_line_key654)
-
- stream_TOKEN_KEY.add(TOKEN_KEY26)
- self.following.append(self.FOLLOW_keycode_in_line_key656)
- keycode27 = self.keycode()
+ # XKBGrammar.g:100:2: ( 'key' keycode keysyms -> ^( TOKEN_KEY keycode keysyms ) )
+ # XKBGrammar.g:100:4: 'key' keycode keysyms
+ string_literal35 = self.input.LT(1)
+ self.match(self.input, 41, self.FOLLOW_41_in_line_key394)
+
+ stream_41.add(string_literal35)
+ self.following.append(self.FOLLOW_keycode_in_line_key396)
+ keycode36 = self.keycode()
self.following.pop()
- stream_keycode.add(keycode27.tree)
- self.following.append(self.FOLLOW_keysyms_in_line_key658)
- keysyms28 = self.keysyms()
+ stream_keycode.add(keycode36.tree)
+ self.following.append(self.FOLLOW_keysyms_in_line_key398)
+ keysyms37 = self.keysyms()
self.following.pop()
- stream_keysyms.add(keysyms28.tree)
- SEMICOLON29 = self.input.LT(1)
- self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_line_key660)
-
- stream_SEMICOLON.add(SEMICOLON29)
- #action start
- print '\tkey %(keycode)s %(keysyms)s;' % { "keycode": self.input.toString(keycode27.start,keycode27.stop), "keysyms": keysyms28.value }
- #action end
+ stream_keysyms.add(keysyms37.tree)
# AST Rewrite
- # elements: keysyms, TOKEN_KEY, keycode
+ # elements: keycode, keysyms
# token labels:
# rule labels: retval
# token list labels:
@@ -1054,10 +935,10 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 132:2: -> ^( TOKEN_KEY keycode keysyms )
- # XKBGrammar.g:132:5: ^( TOKEN_KEY keycode keysyms )
+ # 101:2: -> ^( TOKEN_KEY keycode keysyms )
+ # XKBGrammar.g:101:5: ^( TOKEN_KEY keycode keysyms )
root_1 = self.adaptor.nil()
- root_1 = self.adaptor.becomeRoot(stream_TOKEN_KEY.next(), root_1)
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_KEY, "TOKEN_KEY"), root_1)
self.adaptor.addChild(root_1, stream_keycode.next())
self.adaptor.addChild(root_1, stream_keysyms.next())
@@ -1095,7 +976,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start keycode
- # XKBGrammar.g:135:1: keycode : LOWERTHAN NAME GREATERTHAN -> ^( KEYCODE NAME ) ;
+ # XKBGrammar.g:104:1: keycode : '<' NAME '>' -> ^( KEYCODE NAME ) ;
def keycode(self, ):
retval = self.keycode_return()
@@ -1103,33 +984,33 @@ class XKBGrammarParser(Parser):
root_0 = None
- LOWERTHAN30 = None
- NAME31 = None
- GREATERTHAN32 = None
+ char_literal38 = None
+ NAME39 = None
+ char_literal40 = None
- LOWERTHAN30_tree = None
- NAME31_tree = None
- GREATERTHAN32_tree = None
+ char_literal38_tree = None
+ NAME39_tree = None
+ char_literal40_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
- stream_LOWERTHAN = RewriteRuleTokenStream(self.adaptor, "token LOWERTHAN")
- stream_GREATERTHAN = RewriteRuleTokenStream(self.adaptor, "token GREATERTHAN")
+ stream_43 = RewriteRuleTokenStream(self.adaptor, "token 43")
+ stream_42 = RewriteRuleTokenStream(self.adaptor, "token 42")
try:
try:
- # XKBGrammar.g:136:2: ( LOWERTHAN NAME GREATERTHAN -> ^( KEYCODE NAME ) )
- # XKBGrammar.g:136:4: LOWERTHAN NAME GREATERTHAN
- LOWERTHAN30 = self.input.LT(1)
- self.match(self.input, LOWERTHAN, self.FOLLOW_LOWERTHAN_in_keycode687)
+ # XKBGrammar.g:105:2: ( '<' NAME '>' -> ^( KEYCODE NAME ) )
+ # XKBGrammar.g:105:4: '<' NAME '>'
+ char_literal38 = self.input.LT(1)
+ self.match(self.input, 42, self.FOLLOW_42_in_keycode422)
- stream_LOWERTHAN.add(LOWERTHAN30)
- NAME31 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode689)
+ stream_42.add(char_literal38)
+ NAME39 = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode424)
- stream_NAME.add(NAME31)
- GREATERTHAN32 = self.input.LT(1)
- self.match(self.input, GREATERTHAN, self.FOLLOW_GREATERTHAN_in_keycode691)
+ stream_NAME.add(NAME39)
+ char_literal40 = self.input.LT(1)
+ self.match(self.input, 43, self.FOLLOW_43_in_keycode426)
- stream_GREATERTHAN.add(GREATERTHAN32)
+ stream_43.add(char_literal40)
# AST Rewrite
# elements: NAME
# token labels:
@@ -1146,8 +1027,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 137:2: -> ^( KEYCODE NAME )
- # XKBGrammar.g:137:5: ^( KEYCODE NAME )
+ # 106:2: -> ^( KEYCODE NAME )
+ # XKBGrammar.g:106:5: ^( KEYCODE NAME )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(KEYCODE, "KEYCODE"), root_1)
@@ -1182,12 +1063,11 @@ class XKBGrammarParser(Parser):
self.start = None
self.stop = None
- self.value = None
self.tree = None
# $ANTLR start keysyms
- # XKBGrammar.g:140:1: keysyms returns [value] : LCURLY LBRACKET keysym+= NAME ( COMMA keysym+= NAME )* RBRACKET RCURLY ;
+ # XKBGrammar.g:109:1: keysyms : '{' '[' keysym+= NAME ( ',' keysym+= NAME )* ']' '}' -> ^( KEYSYMS ( $keysym)+ ) ;
def keysyms(self, ):
retval = self.keysyms_return()
@@ -1195,77 +1075,66 @@ class XKBGrammarParser(Parser):
root_0 = None
- LCURLY33 = None
- LBRACKET34 = None
- COMMA35 = None
- RBRACKET36 = None
- RCURLY37 = None
+ char_literal41 = None
+ char_literal42 = None
+ char_literal43 = None
+ char_literal44 = None
+ char_literal45 = None
keysym = None
list_keysym = None
- LCURLY33_tree = None
- LBRACKET34_tree = None
- COMMA35_tree = None
- RBRACKET36_tree = None
- RCURLY37_tree = None
+ char_literal41_tree = None
+ char_literal42_tree = None
+ char_literal43_tree = None
+ char_literal44_tree = None
+ char_literal45_tree = None
keysym_tree = None
+ stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
+ stream_44 = RewriteRuleTokenStream(self.adaptor, "token 44")
+ stream_32 = RewriteRuleTokenStream(self.adaptor, "token 32")
+ stream_34 = RewriteRuleTokenStream(self.adaptor, "token 34")
+ stream_37 = RewriteRuleTokenStream(self.adaptor, "token 37")
+ stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
try:
try:
- # XKBGrammar.g:141:2: ( LCURLY LBRACKET keysym+= NAME ( COMMA keysym+= NAME )* RBRACKET RCURLY )
- # XKBGrammar.g:141:4: LCURLY LBRACKET keysym+= NAME ( COMMA keysym+= NAME )* RBRACKET RCURLY
- root_0 = self.adaptor.nil()
-
- LCURLY33 = self.input.LT(1)
- self.match(self.input, LCURLY, self.FOLLOW_LCURLY_in_keysyms716)
-
-
- LCURLY33_tree = self.adaptor.createWithPayload(LCURLY33)
- self.adaptor.addChild(root_0, LCURLY33_tree)
-
- LBRACKET34 = self.input.LT(1)
- self.match(self.input, LBRACKET, self.FOLLOW_LBRACKET_in_keysyms718)
-
+ # XKBGrammar.g:110:2: ( '{' '[' keysym+= NAME ( ',' keysym+= NAME )* ']' '}' -> ^( KEYSYMS ( $keysym)+ ) )
+ # XKBGrammar.g:110:4: '{' '[' keysym+= NAME ( ',' keysym+= NAME )* ']' '}'
+ char_literal41 = self.input.LT(1)
+ self.match(self.input, 32, self.FOLLOW_32_in_keysyms446)
- LBRACKET34_tree = self.adaptor.createWithPayload(LBRACKET34)
- self.adaptor.addChild(root_0, LBRACKET34_tree)
+ stream_32.add(char_literal41)
+ char_literal42 = self.input.LT(1)
+ self.match(self.input, 37, self.FOLLOW_37_in_keysyms448)
+ stream_37.add(char_literal42)
keysym = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms722)
-
-
- keysym_tree = self.adaptor.createWithPayload(keysym)
- self.adaptor.addChild(root_0, keysym_tree)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms452)
+ stream_NAME.add(keysym)
if list_keysym is None:
list_keysym = []
list_keysym.append(keysym)
- # XKBGrammar.g:141:33: ( COMMA keysym+= NAME )*
- while True: #loop5
- alt5 = 2
- LA5_0 = self.input.LA(1)
-
- if (LA5_0 == COMMA) :
- alt5 = 1
-
+ # XKBGrammar.g:110:25: ( ',' keysym+= NAME )*
+ while True: #loop4
+ alt4 = 2
+ LA4_0 = self.input.LA(1)
- if alt5 == 1:
- # XKBGrammar.g:141:34: COMMA keysym+= NAME
- COMMA35 = self.input.LT(1)
- self.match(self.input, COMMA, self.FOLLOW_COMMA_in_keysyms725)
+ if (LA4_0 == 44) :
+ alt4 = 1
- COMMA35_tree = self.adaptor.createWithPayload(COMMA35)
- self.adaptor.addChild(root_0, COMMA35_tree)
+ if alt4 == 1:
+ # XKBGrammar.g:110:26: ',' keysym+= NAME
+ char_literal43 = self.input.LT(1)
+ self.match(self.input, 44, self.FOLLOW_44_in_keysyms455)
+ stream_44.add(char_literal43)
keysym = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms729)
-
-
- keysym_tree = self.adaptor.createWithPayload(keysym)
- self.adaptor.addChild(root_0, keysym_tree)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms459)
+ stream_NAME.add(keysym)
if list_keysym is None:
list_keysym = []
list_keysym.append(keysym)
@@ -1273,38 +1142,53 @@ class XKBGrammarParser(Parser):
else:
- break #loop5
+ break #loop4
- RBRACKET36 = self.input.LT(1)
- self.match(self.input, RBRACKET, self.FOLLOW_RBRACKET_in_keysyms733)
+ char_literal44 = self.input.LT(1)
+ self.match(self.input, 38, self.FOLLOW_38_in_keysyms463)
+ stream_38.add(char_literal44)
+ char_literal45 = self.input.LT(1)
+ self.match(self.input, 34, self.FOLLOW_34_in_keysyms465)
- RBRACKET36_tree = self.adaptor.createWithPayload(RBRACKET36)
- self.adaptor.addChild(root_0, RBRACKET36_tree)
+ stream_34.add(char_literal45)
+ # AST Rewrite
+ # elements: keysym
+ # token labels:
+ # rule labels: retval
+ # token list labels: keysym
+ # rule list labels:
- RCURLY37 = self.input.LT(1)
- self.match(self.input, RCURLY, self.FOLLOW_RCURLY_in_keysyms735)
+ retval.tree = root_0
+ stream_keysym = RewriteRuleTokenStream(self.adaptor, "token keysym", list_keysym)
+
+ if retval is not None:
+ stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
+ else:
+ stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", None)
+
+
+ root_0 = self.adaptor.nil()
+ # 111:2: -> ^( KEYSYMS ( $keysym)+ )
+ # XKBGrammar.g:111:5: ^( KEYSYMS ( $keysym)+ )
+ root_1 = self.adaptor.nil()
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(KEYSYMS, "KEYSYMS"), root_1)
+
+ # XKBGrammar.g:111:15: ( $keysym)+
+ if not (stream_keysym.hasNext()):
+ raise RewriteEarlyExitException()
+
+ while stream_keysym.hasNext():
+ self.adaptor.addChild(root_1, stream_keysym.next())
+
+
+ stream_keysym.reset()
+
+ self.adaptor.addChild(root_0, root_1)
- RCURLY37_tree = self.adaptor.createWithPayload(RCURLY37)
- self.adaptor.addChild(root_0, RCURLY37_tree)
- #action start
-
- qstring = ["{ [ "]
- first_elem = list_keysym[0].getText()
- qstring.append(first_elem)
- for elem in list_keysym:
- if first_elem != "":
- first_elem = ""
- continue
- qstring.append(", ")
- qstring.append(elem.getText())
- qstring.append(" ] }")
- retval.value = "".join(qstring)
-
- #action end
@@ -1334,7 +1218,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start mapOptions
- # XKBGrammar.g:161:1: mapOptions : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS );
+ # XKBGrammar.g:114:1: mapOptions : ( 'default' | 'hidden' | 'partial' | 'alphanumeric_keys' | 'alternate_group' | 'xkb_symbols' );
def mapOptions(self, ):
retval = self.mapOptions_return()
@@ -1342,136 +1226,31 @@ class XKBGrammarParser(Parser):
root_0 = None
- TOKEN_DEFAULT38 = None
- TOKEN_HIDDEN39 = None
- TOKEN_PARTIAL40 = None
- TOKEN_ALPHANUMERIC_KEYS41 = None
- TOKEN_ALTERNATE_GROUP42 = None
- TOKEN_XKB_SYMBOLS43 = None
+ set46 = None
- TOKEN_DEFAULT38_tree = None
- TOKEN_HIDDEN39_tree = None
- TOKEN_PARTIAL40_tree = None
- TOKEN_ALPHANUMERIC_KEYS41_tree = None
- TOKEN_ALTERNATE_GROUP42_tree = None
- TOKEN_XKB_SYMBOLS43_tree = None
+ set46_tree = None
try:
try:
- # XKBGrammar.g:162:2: ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS )
- alt6 = 6
- LA6 = self.input.LA(1)
- if LA6 == TOKEN_DEFAULT:
- alt6 = 1
- elif LA6 == TOKEN_HIDDEN:
- alt6 = 2
- elif LA6 == TOKEN_PARTIAL:
- alt6 = 3
- elif LA6 == TOKEN_ALPHANUMERIC_KEYS:
- alt6 = 4
- elif LA6 == TOKEN_ALTERNATE_GROUP:
- alt6 = 5
- elif LA6 == TOKEN_XKB_SYMBOLS:
- alt6 = 6
- else:
- nvae = NoViableAltException("161:1: mapOptions : ( TOKEN_DEFAULT | TOKEN_HIDDEN | TOKEN_PARTIAL | TOKEN_ALPHANUMERIC_KEYS | TOKEN_ALTERNATE_GROUP | TOKEN_XKB_SYMBOLS );", 6, 0, self.input)
-
- raise nvae
-
- if alt6 == 1:
- # XKBGrammar.g:162:4: TOKEN_DEFAULT
- root_0 = self.adaptor.nil()
-
- TOKEN_DEFAULT38 = self.input.LT(1)
- self.match(self.input, TOKEN_DEFAULT, self.FOLLOW_TOKEN_DEFAULT_in_mapOptions760)
-
-
- TOKEN_DEFAULT38_tree = self.adaptor.createWithPayload(TOKEN_DEFAULT38)
- self.adaptor.addChild(root_0, TOKEN_DEFAULT38_tree)
-
- #action start
- print "default",
- #action end
-
-
- elif alt6 == 2:
- # XKBGrammar.g:163:4: TOKEN_HIDDEN
- root_0 = self.adaptor.nil()
-
- TOKEN_HIDDEN39 = self.input.LT(1)
- self.match(self.input, TOKEN_HIDDEN, self.FOLLOW_TOKEN_HIDDEN_in_mapOptions768)
-
-
- TOKEN_HIDDEN39_tree = self.adaptor.createWithPayload(TOKEN_HIDDEN39)
- self.adaptor.addChild(root_0, TOKEN_HIDDEN39_tree)
-
- #action start
- print "hidden",
- #action end
-
-
- elif alt6 == 3:
- # XKBGrammar.g:164:4: TOKEN_PARTIAL
- root_0 = self.adaptor.nil()
-
- TOKEN_PARTIAL40 = self.input.LT(1)
- self.match(self.input, TOKEN_PARTIAL, self.FOLLOW_TOKEN_PARTIAL_in_mapOptions777)
-
-
- TOKEN_PARTIAL40_tree = self.adaptor.createWithPayload(TOKEN_PARTIAL40)
- self.adaptor.addChild(root_0, TOKEN_PARTIAL40_tree)
-
- #action start
- print "partial",
- #action end
-
-
- elif alt6 == 4:
- # XKBGrammar.g:165:4: TOKEN_ALPHANUMERIC_KEYS
- root_0 = self.adaptor.nil()
-
- TOKEN_ALPHANUMERIC_KEYS41 = self.input.LT(1)
- self.match(self.input, TOKEN_ALPHANUMERIC_KEYS, self.FOLLOW_TOKEN_ALPHANUMERIC_KEYS_in_mapOptions786)
-
-
- TOKEN_ALPHANUMERIC_KEYS41_tree = self.adaptor.createWithPayload(TOKEN_ALPHANUMERIC_KEYS41)
- self.adaptor.addChild(root_0, TOKEN_ALPHANUMERIC_KEYS41_tree)
-
- #action start
- print "alphanumeric_keys",
- #action end
-
-
- elif alt6 == 5:
- # XKBGrammar.g:166:4: TOKEN_ALTERNATE_GROUP
- root_0 = self.adaptor.nil()
-
- TOKEN_ALTERNATE_GROUP42 = self.input.LT(1)
- self.match(self.input, TOKEN_ALTERNATE_GROUP, self.FOLLOW_TOKEN_ALTERNATE_GROUP_in_mapOptions795)
-
-
- TOKEN_ALTERNATE_GROUP42_tree = self.adaptor.createWithPayload(TOKEN_ALTERNATE_GROUP42)
- self.adaptor.addChild(root_0, TOKEN_ALTERNATE_GROUP42_tree)
-
- #action start
- print "alternate_group",
- #action end
-
+ # XKBGrammar.g:115:2: ( 'default' | 'hidden' | 'partial' | 'alphanumeric_keys' | 'alternate_group' | 'xkb_symbols' )
+ # XKBGrammar.g:
+ root_0 = self.adaptor.nil()
- elif alt6 == 6:
- # XKBGrammar.g:167:4: TOKEN_XKB_SYMBOLS
- root_0 = self.adaptor.nil()
+ set46 = self.input.LT(1)
+ if (45 <= self.input.LA(1) <= 50):
+ self.input.consume();
+ self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set46))
+ self.errorRecovery = False
- TOKEN_XKB_SYMBOLS43 = self.input.LT(1)
- self.match(self.input, TOKEN_XKB_SYMBOLS, self.FOLLOW_TOKEN_XKB_SYMBOLS_in_mapOptions802)
+ else:
+ mse = MismatchedSetException(None, self.input)
+ self.recoverFromMismatchedSet(
+ self.input, mse, self.FOLLOW_set_in_mapOptions0
+ )
+ raise mse
- TOKEN_XKB_SYMBOLS43_tree = self.adaptor.createWithPayload(TOKEN_XKB_SYMBOLS43)
- self.adaptor.addChild(root_0, TOKEN_XKB_SYMBOLS43_tree)
- #action start
- print "xkb_symbols",
- #action end
retval.stop = self.input.LT(-1)
@@ -1494,57 +1273,56 @@ class XKBGrammarParser(Parser):
- FOLLOW_section_in_layout369 = frozenset([1, 4, 5, 6, 7, 9, 10])
- FOLLOW_mapType_in_section383 = frozenset([17])
- FOLLOW_sectionmaterial_in_section385 = frozenset([1])
- FOLLOW_mapOptions_in_mapType401 = frozenset([4, 5, 6, 7, 9, 10, 20])
- FOLLOW_quotedstring_in_mapType406 = frozenset([1])
- FOLLOW_DQUOTE_in_quotedstring444 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41])
- FOLLOW_set_in_quotedstring448 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41])
- FOLLOW_DQUOTE_in_quotedstring454 = frozenset([1])
- FOLLOW_LCURLY_in_sectionmaterial486 = frozenset([11, 12, 13, 14])
- FOLLOW_line_include_in_sectionmaterial489 = frozenset([11, 12, 13, 14, 18])
- FOLLOW_line_name_in_sectionmaterial495 = frozenset([11, 12, 13, 14, 18])
- FOLLOW_line_keytype_in_sectionmaterial501 = frozenset([11, 12, 13, 14, 18])
- FOLLOW_line_key_in_sectionmaterial507 = frozenset([11, 12, 13, 14, 18])
- FOLLOW_RCURLY_in_sectionmaterial514 = frozenset([23])
- FOLLOW_SEMICOLON_in_sectionmaterial516 = frozenset([1])
- FOLLOW_TOKEN_INCLUDE_in_line_include534 = frozenset([20])
- FOLLOW_quotedstring_in_line_include538 = frozenset([1])
- FOLLOW_TOKEN_NAME_in_line_name563 = frozenset([15])
- FOLLOW_LBRACKET_in_line_name565 = frozenset([37])
- FOLLOW_NAME_in_line_name569 = frozenset([16])
- FOLLOW_RBRACKET_in_line_name571 = frozenset([24])
- FOLLOW_EQUAL_in_line_name573 = frozenset([20])
- FOLLOW_quotedstring_in_line_name577 = frozenset([23])
- FOLLOW_SEMICOLON_in_line_name579 = frozenset([1])
- FOLLOW_TOKEN_KEY_TYPE_in_line_keytype606 = frozenset([15])
- FOLLOW_LBRACKET_in_line_keytype608 = frozenset([37])
- FOLLOW_NAME_in_line_keytype612 = frozenset([16])
- FOLLOW_RBRACKET_in_line_keytype614 = frozenset([24])
- FOLLOW_EQUAL_in_line_keytype616 = frozenset([20])
- FOLLOW_DQUOTE_in_line_keytype618 = frozenset([37])
- FOLLOW_NAME_in_line_keytype622 = frozenset([20])
- FOLLOW_DQUOTE_in_line_keytype624 = frozenset([23])
- FOLLOW_SEMICOLON_in_line_keytype626 = frozenset([1])
- FOLLOW_TOKEN_KEY_in_line_key654 = frozenset([25])
- FOLLOW_keycode_in_line_key656 = frozenset([17])
- FOLLOW_keysyms_in_line_key658 = frozenset([23])
- FOLLOW_SEMICOLON_in_line_key660 = frozenset([1])
- FOLLOW_LOWERTHAN_in_keycode687 = frozenset([37])
- FOLLOW_NAME_in_keycode689 = frozenset([26])
- FOLLOW_GREATERTHAN_in_keycode691 = frozenset([1])
- FOLLOW_LCURLY_in_keysyms716 = frozenset([15])
- FOLLOW_LBRACKET_in_keysyms718 = frozenset([37])
- FOLLOW_NAME_in_keysyms722 = frozenset([16, 19])
- FOLLOW_COMMA_in_keysyms725 = frozenset([37])
- FOLLOW_NAME_in_keysyms729 = frozenset([16, 19])
- FOLLOW_RBRACKET_in_keysyms733 = frozenset([18])
- FOLLOW_RCURLY_in_keysyms735 = frozenset([1])
- FOLLOW_TOKEN_DEFAULT_in_mapOptions760 = frozenset([1])
- FOLLOW_TOKEN_HIDDEN_in_mapOptions768 = frozenset([1])
- FOLLOW_TOKEN_PARTIAL_in_mapOptions777 = frozenset([1])
- FOLLOW_TOKEN_ALPHANUMERIC_KEYS_in_mapOptions786 = frozenset([1])
- FOLLOW_TOKEN_ALTERNATE_GROUP_in_mapOptions795 = frozenset([1])
- FOLLOW_TOKEN_XKB_SYMBOLS_in_mapOptions802 = frozenset([1])
+ FOLLOW_section_in_layout160 = frozenset([45, 46, 47, 48, 49, 50])
+ FOLLOW_EOF_in_layout163 = frozenset([1])
+ FOLLOW_mapType_in_section177 = frozenset([32])
+ FOLLOW_mapMaterial_in_section179 = frozenset([1])
+ FOLLOW_mapOptions_in_mapType202 = frozenset([31, 45, 46, 47, 48, 49, 50])
+ FOLLOW_31_in_mapType205 = frozenset([27])
+ FOLLOW_NAME_in_mapType207 = frozenset([31])
+ FOLLOW_31_in_mapType209 = frozenset([1])
+ FOLLOW_32_in_mapMaterial233 = frozenset([35, 36, 40, 41])
+ FOLLOW_line_include_in_mapMaterial239 = frozenset([34, 35, 36, 40, 41])
+ FOLLOW_line_name_in_mapMaterial245 = frozenset([33])
+ FOLLOW_33_in_mapMaterial247 = frozenset([34, 35, 36, 40, 41])
+ FOLLOW_line_keytype_in_mapMaterial253 = frozenset([33])
+ FOLLOW_33_in_mapMaterial255 = frozenset([34, 35, 36, 40, 41])
+ FOLLOW_line_key_in_mapMaterial261 = frozenset([33])
+ FOLLOW_33_in_mapMaterial263 = frozenset([34, 35, 36, 40, 41])
+ FOLLOW_34_in_mapMaterial270 = frozenset([33])
+ FOLLOW_33_in_mapMaterial272 = frozenset([1])
+ FOLLOW_35_in_line_include283 = frozenset([31])
+ FOLLOW_31_in_line_include285 = frozenset([27])
+ FOLLOW_NAME_in_line_include287 = frozenset([31])
+ FOLLOW_31_in_line_include289 = frozenset([1])
+ FOLLOW_36_in_line_name309 = frozenset([37])
+ FOLLOW_37_in_line_name311 = frozenset([27])
+ FOLLOW_NAME_in_line_name315 = frozenset([38])
+ FOLLOW_38_in_line_name317 = frozenset([39])
+ FOLLOW_39_in_line_name319 = frozenset([31])
+ FOLLOW_31_in_line_name321 = frozenset([27])
+ FOLLOW_NAME_in_line_name325 = frozenset([31])
+ FOLLOW_31_in_line_name327 = frozenset([1])
+ FOLLOW_40_in_line_keytype351 = frozenset([37])
+ FOLLOW_37_in_line_keytype353 = frozenset([27])
+ FOLLOW_NAME_in_line_keytype357 = frozenset([38])
+ FOLLOW_38_in_line_keytype359 = frozenset([39])
+ FOLLOW_39_in_line_keytype361 = frozenset([31])
+ FOLLOW_31_in_line_keytype363 = frozenset([27])
+ FOLLOW_NAME_in_line_keytype367 = frozenset([31])
+ FOLLOW_31_in_line_keytype369 = frozenset([1])
+ FOLLOW_41_in_line_key394 = frozenset([42])
+ FOLLOW_keycode_in_line_key396 = frozenset([32])
+ FOLLOW_keysyms_in_line_key398 = frozenset([1])
+ FOLLOW_42_in_keycode422 = frozenset([27])
+ FOLLOW_NAME_in_keycode424 = frozenset([43])
+ FOLLOW_43_in_keycode426 = frozenset([1])
+ FOLLOW_32_in_keysyms446 = frozenset([37])
+ FOLLOW_37_in_keysyms448 = frozenset([27])
+ FOLLOW_NAME_in_keysyms452 = frozenset([38, 44])
+ FOLLOW_44_in_keysyms455 = frozenset([27])
+ FOLLOW_NAME_in_keysyms459 = frozenset([38, 44])
+ FOLLOW_38_in_keysyms463 = frozenset([34])
+ FOLLOW_34_in_keysyms465 = frozenset([1])
+ FOLLOW_set_in_mapOptions0 = frozenset([1])
diff --git a/XKBGrammar/XKBGrammarParser.pyc b/XKBGrammar/XKBGrammarParser.pyc
Binary files differ.
diff --git a/XKBGrammar/XKBGrammarWalker.g b/XKBGrammar/XKBGrammarWalker.g
@@ -0,0 +1,79 @@
+// XKB Grammar (X.org)
+// Written by Simos Xenitellis <simos.lists@googlemail.com>, 2008.
+// Version 0.3
+
+tree grammar XKBGrammarWalker;
+
+options
+{
+ tokenVocab=XKBGrammar;
+ ASTLabelType=CommonTree;
+ language=Python;
+}
+
+// We cover XKB symbol files that look like
+//
+// // comments can appear here.
+// one of more modifiers "mysectionname"
+// {
+// // comments can appear here.
+// include "somename" // comments can also appear here.
+// name[somestring] = "sometext";
+// key.type[someotherstring] = "someothertext";
+// key <someotherstring> { [ somesymbol, someothersymbol, ... uptoEightSymbols ] };
+// modifier_map someothertext { somesymbol, someothersymbol, ... uptoEightSymbols };
+// // can also have multiples of the above.
+// };
+//
+// // can have several sections as above.
+
+layout
+ : section+
+ ;
+
+section
+ : ^(SECTION mapType mapMaterial)
+ ;
+
+mapType
+ : ^(MAPTYPE mapOptions+ NAME)
+ ;
+
+mapMaterial
+ : ^(MAPMATERIAL line_include line_name line_keytype line_key)
+ ;
+// : ^(MAPMATERIAL TOKEN_INCLUDE TOKEN_NAME TOKEN_KEY_TYPE TOKEY_KEY)
+// ;
+
+line_include
+ : ^(TOKEN_INCLUDE NAME)
+ ;
+
+line_name
+ : ^(TOKEN_NAME NAME+)
+ ;
+
+line_keytype
+ : ^(TOKEN_KEY_TYPE NAME+)
+ ;
+
+line_key
+ : ^(TOKEN_KEY keycode keysyms)
+ ;
+
+keycode
+ : ^(KEYCODE NAME)
+ ;
+
+keysyms
+ : ^(KEYSYMS NAME+)
+ ;
+
+mapOptions
+ : 'default'
+ | 'hidden'
+ | 'partial'
+ | 'alphanumeric_keys'
+ | 'alternate_group'
+ | 'xkb_symbols'
+ ;
diff --git a/XKBGrammar/XKBGrammar__.g b/XKBGrammar/XKBGrammar__.g
@@ -4,59 +4,50 @@ options {
}
-TOKEN_DEFAULT : 'default' ;
-TOKEN_HIDDEN : 'hidden' ;
-TOKEN_PARTIAL : 'partial' ;
-TOKEN_ALPHANUMERIC_KEYS : 'alphanumeric_keys' ;
-TOKEN_MODIFIER_KEYS : 'modifier_keys' ;
-TOKEN_ALTERNATE_GROUP : 'alternate_group' ;
-TOKEN_XKB_SYMBOLS : 'xkb_symbols' ;
-TOKEN_INCLUDE : 'include' ;
-TOKEN_KEY_TYPE : 'key.type' ;
-TOKEN_NAME : 'name' ;
-TOKEN_KEY : 'key' ;
-LBRACKET : '[' ;
-RBRACKET : ']' ;
-LCURLY : '{' ;
-RCURLY : '}' ;
-COMMA : ',' ;
-DQUOTE : '"' ;
-MINUS : '-' ;
-PLUS : '+' ;
-SEMICOLON : ';' ;
-EQUAL : '=' ;
-LOWERTHAN : '<' ;
-GREATERTHAN : '>' ;
-DOT : '.' ;
-
-// $ANTLR src "XKBGrammar.g" 170
-fragment GENERIC_NAME
- : ('a'..'z'|'A'..'Z'|'_')('a'..'z'|'A'..'Z'|'_'|'0'..'9')
- ;
-
-// $ANTLR src "XKBGrammar.g" 174
+T31 : '"' ;
+T32 : '{' ;
+T33 : ';' ;
+T34 : '}' ;
+T35 : 'include' ;
+T36 : 'name' ;
+T37 : '[' ;
+T38 : ']' ;
+T39 : '=' ;
+T40 : 'key.type' ;
+T41 : 'key' ;
+T42 : '<' ;
+T43 : '>' ;
+T44 : ',' ;
+T45 : 'default' ;
+T46 : 'hidden' ;
+T47 : 'partial' ;
+T48 : 'alphanumeric_keys' ;
+T49 : 'alternate_group' ;
+T50 : 'xkb_symbols' ;
+
+// $ANTLR src "XKBGrammar.g" 123
NAME
- : ('a'..'z'|'A'..'Z'|'_'|'('|')'|'0'..'9')*
+ : ('a'..'z' | 'A'..'Z' | '_' | '-' | '(' | ')' | '0'..'9')*
;
// Comments are currently ignored.
-// $ANTLR src "XKBGrammar.g" 179
+// $ANTLR src "XKBGrammar.g" 128
WS
:
- (' '|'\r'|'\t'|'\u000C'|'\n')
- {$channel=HIDDEN;}
+ ( ' ' | '\r' | '\t' | '\u000C' | '\n')
+ { $channel=HIDDEN; }
;
-// $ANTLR src "XKBGrammar.g" 185
+// $ANTLR src "XKBGrammar.g" 134
COMMENT
:
'/*' .* '*/' {$channel=HIDDEN;}
;
-// $ANTLR src "XKBGrammar.g" 190
+// $ANTLR src "XKBGrammar.g" 139
LINE_COMMENT
:
- '//' ~('\n'|'\r')* '\r'? '\n'
- {$channel=HIDDEN;}
+ '//' ~('\n' | '\r')* '\r'? '\n'
+ { $channel=HIDDEN; }
;
diff --git a/XKBGrammar/check_xkb.py b/XKBGrammar/check_xkb.py
@@ -2,16 +2,11 @@
# -*- coding: utf-8 -*-
import sys
+import pdb
import antlr3
from XKBGrammarLexer import XKBGrammarLexer
-from XKBGrammarParser import XKBGrammarParser, ATTRIBUTES, ATTRIBUTE, INCLUDE, NAME
-
-# Helper function to iterate through all children of a given type
-def getChildrenByType(tree, type_value):
- for i in range(tree.getChildCount()):
- child = tree.getChild(i)
- if child.getType() == type_value:
- yield child
+from XKBGrammarParser import XKBGrammarParser
+from XKBGrammarWalker import XKBGrammarWalker
xkbfilename = "gr"
if len(sys.argv) > 1:
@@ -30,10 +25,13 @@ lexer = XKBGrammarLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = XKBGrammarParser(tokens)
+# pdb.set_trace()
result = parser.layout()
-# Get all of the SPECIES children
-for attribute in getChildrenByType(result.tree, INCLUDE):
- print "attribute: ", attribute,
- value = attribute.getFirstChildWithType(NAME).getText()
- print "value: ", value
+print "tree =", result.tree.toStringTree()
+
+nodes = antlr3.tree.CommonTreeNodeStream(result.tree)
+nodes.setTokenStream(tokens)
+walker = XKBGrammarWalker(nodes)
+# walker.layout()
+
diff --git a/XKBGrammar/gr b/XKBGrammar/gr
@@ -1,10 +1,7 @@
-// my comment
partial alphanumeric_keys alternate_group
xkb_symbols "extended" {
- // my comment
- // more of these comments
include "gr(basic)"
- name[Group1] = "Greece - Extended";
+ name[Group1] = "Greece-Extended";
key.type[Group1] = "THREE_LEVEL"; // yeah, comment
key <AE03> { [ NoSymbol, NoSymbol, sterling ] }; // my comment
key <AE10> { [ NoSymbol, NoSymbol, degree ] }; // more comment
@@ -14,12 +11,12 @@ xkb_symbols "extended" {
include "eurosign(e)"
};
-partial alphanumeric_keys alternate_group
+partial alphanumeric_keys
xkb_symbols "nodeadkeys" {
include "gr(basic)"
- name[Group1] = "Greece - Eliminate dead keys";
+ name[Group1] = "Greece-Eliminatedeadkeys";
key <AC10> { [ semicolon, colon ] }; // ; :
};