commit 9699784811645e771bd1d2dc898bda96d06efa79
parent 911e7ef563787fbbad5c7738f769c8dd0f41df5e
Author: simos.lists <simos.lists@70737e48-4f4a-0410-8df8-290828ad50c4>
Date: Tue, 20 May 2008 23:13:03 +0000
Now we work with multiple groups within a key
git-svn-id: http://keyboardlayouteditor.googlecode.com/svn/trunk@29 70737e48-4f4a-0410-8df8-290828ad50c4
Diffstat:
9 files changed, 825 insertions(+), 625 deletions(-)
diff --git a/XKBGrammar/XKBGrammar.g b/XKBGrammar/XKBGrammar.g
@@ -32,6 +32,7 @@ tokens
KEYSYMS;
VALUE;
STATE;
+ KEYSYMGROUP;
}
// We cover XKB symbol files that look like
@@ -104,8 +105,13 @@ keycode
;
keysyms
- : '{' ('type' '[' tn1=NAME ']' '=' tn2=DQSTRING ',')? '[' keysym+=NAME (',' keysym+=NAME)* ']' '}'
- -> ^(KEYSYMS ^(TOKEN_TYPE $tn1 $tn2)? $keysym+)
+ : '{' ('type' '[' tn1=NAME ']' '=' tn2=DQSTRING ',')? keysymgroup (',' keysymgroup)* '}'
+ -> ^(KEYSYMS ^(TOKEN_TYPE $tn1 $tn2)? keysymgroup+)
+ ;
+
+keysymgroup
+ : '[' keysym+=NAME (',' keysym+=NAME)* ']'
+ -> ^(KEYSYMGROUP $keysym+)
;
mapOptions
@@ -146,7 +152,7 @@ COMMENT
LINE_COMMENT
:
- '//' ~('\n' | '\r')* '\r'? '\n'
+ ('//' | '#') ~('\n' | '\r')* '\r'? '\n'
{ $channel=HIDDEN; }
;
diff --git a/XKBGrammar/XKBGrammar.tokens b/XKBGrammar/XKBGrammar.tokens
@@ -1,22 +1,22 @@
T__29=29
T__28=28
T__27=27
-T__26=26
MAPOPTIONS=13
TOKEN_INCLUDE=4
TOKEN_MODIFIER_MAP=9
TOKEN_TYPE=8
MAPTYPE=11
T__55=55
-NAME=22
+T__56=56
+NAME=23
T__51=51
T__52=52
MAPMATERIAL=14
T__53=53
T__54=54
KEYSYMS=18
-COMMENT=24
-DQSTRING=21
+COMMENT=25
+DQSTRING=22
T__50=50
T__42=42
T__43=43
@@ -28,7 +28,7 @@ T__47=47
T__44=44
SECTION=15
T__45=45
-LINE_COMMENT=25
+LINE_COMMENT=26
KEYCODE=16
T__48=48
T__49=49
@@ -38,7 +38,7 @@ LAYOUT=10
T__30=30
T__31=31
T__32=32
-WS=23
+WS=24
T__33=33
T__34=34
T__35=35
@@ -46,37 +46,38 @@ T__36=36
T__37=37
T__38=38
T__39=39
+KEYSYMGROUP=21
TOKEN_KEY=7
MAPNAME=12
TOKEN_KEY_TYPE=5
KEYCODEX=17
-'alphanumeric_keys'=44
-'Shift'=48
-'alternate_group'=46
-'Mod3'=53
-'type'=40
-'>'=39
-'include'=29
-'hidden'=42
-';'=28
-'Mod1'=51
-'='=33
-'Mod5'=55
-'xkb_symbols'=47
-'}'=27
-'Control'=49
-'key'=35
-'partial'=43
-'{'=26
-'modifier_keys'=45
-'Mod4'=54
-'Mod2'=52
-'<'=38
-'key.type'=34
-'['=31
-'name'=30
-','=37
-'modifier_map'=36
-'default'=41
-']'=32
-'Lock'=50
+'alphanumeric_keys'=45
+'Shift'=49
+'alternate_group'=47
+'Mod3'=54
+'type'=41
+'>'=40
+'include'=30
+'hidden'=43
+';'=29
+'Mod1'=52
+'='=34
+'Mod5'=56
+'xkb_symbols'=48
+'}'=28
+'Control'=50
+'key'=36
+'partial'=44
+'{'=27
+'modifier_keys'=46
+'Mod4'=55
+'Mod2'=53
+'<'=39
+'key.type'=35
+'['=32
+'name'=31
+','=38
+'modifier_map'=37
+'default'=42
+']'=33
+'Lock'=51
diff --git a/XKBGrammar/XKBGrammarLexer.py b/XKBGrammar/XKBGrammarLexer.py
@@ -1,4 +1,4 @@
-# $ANTLR 3.1b1 XKBGrammar.g 2008-05-20 21:54:25
+# $ANTLR 3.1b1 XKBGrammar.g 2008-05-20 22:51:09
import sys
from antlr3 import *
@@ -12,7 +12,6 @@ HIDDEN = BaseRecognizer.HIDDEN
T__29=29
T__28=28
T__27=27
-T__26=26
MAPOPTIONS=13
TOKEN_INCLUDE=4
TOKEN_MODIFIER_MAP=9
@@ -20,15 +19,16 @@ EOF=-1
TOKEN_TYPE=8
MAPTYPE=11
T__55=55
-NAME=22
+T__56=56
+NAME=23
T__51=51
T__52=52
T__53=53
MAPMATERIAL=14
T__54=54
KEYSYMS=18
-COMMENT=24
-DQSTRING=21
+COMMENT=25
+DQSTRING=22
T__50=50
T__42=42
T__43=43
@@ -40,7 +40,7 @@ T__47=47
T__44=44
SECTION=15
T__45=45
-LINE_COMMENT=25
+LINE_COMMENT=26
KEYCODE=16
T__48=48
T__49=49
@@ -51,13 +51,14 @@ T__30=30
T__31=31
T__32=32
T__33=33
-WS=23
+WS=24
T__34=34
T__35=35
T__36=36
T__37=37
T__38=38
T__39=39
+KEYSYMGROUP=21
MAPNAME=12
TOKEN_KEY=7
TOKEN_KEY_TYPE=5
@@ -73,15 +74,15 @@ class XKBGrammarLexer(Lexer):
state = RecognizerSharedState()
Lexer.__init__(self, input, state)
- self.dfa6 = self.DFA6(
- self, 6,
- eot = self.DFA6_eot,
- eof = self.DFA6_eof,
- min = self.DFA6_min,
- max = self.DFA6_max,
- accept = self.DFA6_accept,
- special = self.DFA6_special,
- transition = self.DFA6_transition
+ self.dfa7 = self.DFA7(
+ self, 7,
+ eot = self.DFA7_eot,
+ eof = self.DFA7_eof,
+ min = self.DFA7_min,
+ max = self.DFA7_max,
+ accept = self.DFA7_accept,
+ special = self.DFA7_special,
+ transition = self.DFA7_transition
)
@@ -89,31 +90,6 @@ class XKBGrammarLexer(Lexer):
- # $ANTLR start T__26
- def mT__26(self, ):
-
- try:
- _type = T__26
- _channel = DEFAULT_CHANNEL
-
- # XKBGrammar.g:7:7: ( '{' )
- # XKBGrammar.g:7:9: '{'
- self.match(123)
-
-
-
-
- self._state.type = _type
- self._state.channel = _channel
-
- finally:
-
- pass
-
- # $ANTLR end T__26
-
-
-
# $ANTLR start T__27
def mT__27(self, ):
@@ -121,9 +97,9 @@ class XKBGrammarLexer(Lexer):
_type = T__27
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:8:7: ( '}' )
- # XKBGrammar.g:8:9: '}'
- self.match(125)
+ # XKBGrammar.g:7:7: ( '{' )
+ # XKBGrammar.g:7:9: '{'
+ self.match(123)
@@ -146,9 +122,9 @@ class XKBGrammarLexer(Lexer):
_type = T__28
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:9:7: ( ';' )
- # XKBGrammar.g:9:9: ';'
- self.match(59)
+ # XKBGrammar.g:8:7: ( '}' )
+ # XKBGrammar.g:8:9: '}'
+ self.match(125)
@@ -171,9 +147,9 @@ class XKBGrammarLexer(Lexer):
_type = T__29
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:10:7: ( 'include' )
- # XKBGrammar.g:10:9: 'include'
- self.match("include")
+ # XKBGrammar.g:9:7: ( ';' )
+ # XKBGrammar.g:9:9: ';'
+ self.match(59)
@@ -196,9 +172,9 @@ class XKBGrammarLexer(Lexer):
_type = T__30
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:11:7: ( 'name' )
- # XKBGrammar.g:11:9: 'name'
- self.match("name")
+ # XKBGrammar.g:10:7: ( 'include' )
+ # XKBGrammar.g:10:9: 'include'
+ self.match("include")
@@ -221,9 +197,9 @@ class XKBGrammarLexer(Lexer):
_type = T__31
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:12:7: ( '[' )
- # XKBGrammar.g:12:9: '['
- self.match(91)
+ # XKBGrammar.g:11:7: ( 'name' )
+ # XKBGrammar.g:11:9: 'name'
+ self.match("name")
@@ -246,9 +222,9 @@ class XKBGrammarLexer(Lexer):
_type = T__32
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:13:7: ( ']' )
- # XKBGrammar.g:13:9: ']'
- self.match(93)
+ # XKBGrammar.g:12:7: ( '[' )
+ # XKBGrammar.g:12:9: '['
+ self.match(91)
@@ -271,9 +247,9 @@ class XKBGrammarLexer(Lexer):
_type = T__33
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:14:7: ( '=' )
- # XKBGrammar.g:14:9: '='
- self.match(61)
+ # XKBGrammar.g:13:7: ( ']' )
+ # XKBGrammar.g:13:9: ']'
+ self.match(93)
@@ -296,9 +272,9 @@ class XKBGrammarLexer(Lexer):
_type = T__34
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:15:7: ( 'key.type' )
- # XKBGrammar.g:15:9: 'key.type'
- self.match("key.type")
+ # XKBGrammar.g:14:7: ( '=' )
+ # XKBGrammar.g:14:9: '='
+ self.match(61)
@@ -321,9 +297,9 @@ class XKBGrammarLexer(Lexer):
_type = T__35
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:16:7: ( 'key' )
- # XKBGrammar.g:16:9: 'key'
- self.match("key")
+ # XKBGrammar.g:15:7: ( 'key.type' )
+ # XKBGrammar.g:15:9: 'key.type'
+ self.match("key.type")
@@ -346,9 +322,9 @@ class XKBGrammarLexer(Lexer):
_type = T__36
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:17:7: ( 'modifier_map' )
- # XKBGrammar.g:17:9: 'modifier_map'
- self.match("modifier_map")
+ # XKBGrammar.g:16:7: ( 'key' )
+ # XKBGrammar.g:16:9: 'key'
+ self.match("key")
@@ -371,9 +347,9 @@ class XKBGrammarLexer(Lexer):
_type = T__37
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:18:7: ( ',' )
- # XKBGrammar.g:18:9: ','
- self.match(44)
+ # XKBGrammar.g:17:7: ( 'modifier_map' )
+ # XKBGrammar.g:17:9: 'modifier_map'
+ self.match("modifier_map")
@@ -396,9 +372,9 @@ class XKBGrammarLexer(Lexer):
_type = T__38
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:19:7: ( '<' )
- # XKBGrammar.g:19:9: '<'
- self.match(60)
+ # XKBGrammar.g:18:7: ( ',' )
+ # XKBGrammar.g:18:9: ','
+ self.match(44)
@@ -421,9 +397,9 @@ class XKBGrammarLexer(Lexer):
_type = T__39
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:20:7: ( '>' )
- # XKBGrammar.g:20:9: '>'
- self.match(62)
+ # XKBGrammar.g:19:7: ( '<' )
+ # XKBGrammar.g:19:9: '<'
+ self.match(60)
@@ -446,9 +422,9 @@ class XKBGrammarLexer(Lexer):
_type = T__40
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:21:7: ( 'type' )
- # XKBGrammar.g:21:9: 'type'
- self.match("type")
+ # XKBGrammar.g:20:7: ( '>' )
+ # XKBGrammar.g:20:9: '>'
+ self.match(62)
@@ -471,9 +447,9 @@ class XKBGrammarLexer(Lexer):
_type = T__41
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:22:7: ( 'default' )
- # XKBGrammar.g:22:9: 'default'
- self.match("default")
+ # XKBGrammar.g:21:7: ( 'type' )
+ # XKBGrammar.g:21:9: 'type'
+ self.match("type")
@@ -496,9 +472,9 @@ class XKBGrammarLexer(Lexer):
_type = T__42
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:23:7: ( 'hidden' )
- # XKBGrammar.g:23:9: 'hidden'
- self.match("hidden")
+ # XKBGrammar.g:22:7: ( 'default' )
+ # XKBGrammar.g:22:9: 'default'
+ self.match("default")
@@ -521,9 +497,9 @@ class XKBGrammarLexer(Lexer):
_type = T__43
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:24:7: ( 'partial' )
- # XKBGrammar.g:24:9: 'partial'
- self.match("partial")
+ # XKBGrammar.g:23:7: ( 'hidden' )
+ # XKBGrammar.g:23:9: 'hidden'
+ self.match("hidden")
@@ -546,9 +522,9 @@ class XKBGrammarLexer(Lexer):
_type = T__44
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:25:7: ( 'alphanumeric_keys' )
- # XKBGrammar.g:25:9: 'alphanumeric_keys'
- self.match("alphanumeric_keys")
+ # XKBGrammar.g:24:7: ( 'partial' )
+ # XKBGrammar.g:24:9: 'partial'
+ self.match("partial")
@@ -571,9 +547,9 @@ class XKBGrammarLexer(Lexer):
_type = T__45
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:26:7: ( 'modifier_keys' )
- # XKBGrammar.g:26:9: 'modifier_keys'
- self.match("modifier_keys")
+ # XKBGrammar.g:25:7: ( 'alphanumeric_keys' )
+ # XKBGrammar.g:25:9: 'alphanumeric_keys'
+ self.match("alphanumeric_keys")
@@ -596,9 +572,9 @@ class XKBGrammarLexer(Lexer):
_type = T__46
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:27:7: ( 'alternate_group' )
- # XKBGrammar.g:27:9: 'alternate_group'
- self.match("alternate_group")
+ # XKBGrammar.g:26:7: ( 'modifier_keys' )
+ # XKBGrammar.g:26:9: 'modifier_keys'
+ self.match("modifier_keys")
@@ -621,9 +597,9 @@ class XKBGrammarLexer(Lexer):
_type = T__47
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:28:7: ( 'xkb_symbols' )
- # XKBGrammar.g:28:9: 'xkb_symbols'
- self.match("xkb_symbols")
+ # XKBGrammar.g:27:7: ( 'alternate_group' )
+ # XKBGrammar.g:27:9: 'alternate_group'
+ self.match("alternate_group")
@@ -646,9 +622,9 @@ class XKBGrammarLexer(Lexer):
_type = T__48
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:29:7: ( 'Shift' )
- # XKBGrammar.g:29:9: 'Shift'
- self.match("Shift")
+ # XKBGrammar.g:28:7: ( 'xkb_symbols' )
+ # XKBGrammar.g:28:9: 'xkb_symbols'
+ self.match("xkb_symbols")
@@ -671,9 +647,9 @@ class XKBGrammarLexer(Lexer):
_type = T__49
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:30:7: ( 'Control' )
- # XKBGrammar.g:30:9: 'Control'
- self.match("Control")
+ # XKBGrammar.g:29:7: ( 'Shift' )
+ # XKBGrammar.g:29:9: 'Shift'
+ self.match("Shift")
@@ -696,9 +672,9 @@ class XKBGrammarLexer(Lexer):
_type = T__50
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:31:7: ( 'Lock' )
- # XKBGrammar.g:31:9: 'Lock'
- self.match("Lock")
+ # XKBGrammar.g:30:7: ( 'Control' )
+ # XKBGrammar.g:30:9: 'Control'
+ self.match("Control")
@@ -721,9 +697,9 @@ class XKBGrammarLexer(Lexer):
_type = T__51
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:32:7: ( 'Mod1' )
- # XKBGrammar.g:32:9: 'Mod1'
- self.match("Mod1")
+ # XKBGrammar.g:31:7: ( 'Lock' )
+ # XKBGrammar.g:31:9: 'Lock'
+ self.match("Lock")
@@ -746,9 +722,9 @@ class XKBGrammarLexer(Lexer):
_type = T__52
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:33:7: ( 'Mod2' )
- # XKBGrammar.g:33:9: 'Mod2'
- self.match("Mod2")
+ # XKBGrammar.g:32:7: ( 'Mod1' )
+ # XKBGrammar.g:32:9: 'Mod1'
+ self.match("Mod1")
@@ -771,9 +747,9 @@ class XKBGrammarLexer(Lexer):
_type = T__53
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:34:7: ( 'Mod3' )
- # XKBGrammar.g:34:9: 'Mod3'
- self.match("Mod3")
+ # XKBGrammar.g:33:7: ( 'Mod2' )
+ # XKBGrammar.g:33:9: 'Mod2'
+ self.match("Mod2")
@@ -796,9 +772,9 @@ class XKBGrammarLexer(Lexer):
_type = T__54
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:35:7: ( 'Mod4' )
- # XKBGrammar.g:35:9: 'Mod4'
- self.match("Mod4")
+ # XKBGrammar.g:34:7: ( 'Mod3' )
+ # XKBGrammar.g:34:9: 'Mod3'
+ self.match("Mod3")
@@ -821,6 +797,31 @@ class XKBGrammarLexer(Lexer):
_type = T__55
_channel = DEFAULT_CHANNEL
+ # XKBGrammar.g:35:7: ( 'Mod4' )
+ # XKBGrammar.g:35:9: 'Mod4'
+ self.match("Mod4")
+
+
+
+
+ self._state.type = _type
+ self._state.channel = _channel
+
+ finally:
+
+ pass
+
+ # $ANTLR end T__55
+
+
+
+ # $ANTLR start T__56
+ def mT__56(self, ):
+
+ try:
+ _type = T__56
+ _channel = DEFAULT_CHANNEL
+
# XKBGrammar.g:36:7: ( 'Mod5' )
# XKBGrammar.g:36:9: 'Mod5'
self.match("Mod5")
@@ -835,7 +836,7 @@ class XKBGrammarLexer(Lexer):
pass
- # $ANTLR end T__55
+ # $ANTLR end T__56
@@ -846,9 +847,9 @@ class XKBGrammarLexer(Lexer):
_type = NAME
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:133:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' | '+' | '-' )* )
- # XKBGrammar.g:133:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' | '+' | '-' )*
- # XKBGrammar.g:133:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' | '+' | '-' )*
+ # XKBGrammar.g:139:2: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' | '+' | '-' )* )
+ # XKBGrammar.g:139:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' | '+' | '-' )*
+ # XKBGrammar.g:139:4: ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '(' | ')' | '0' .. '9' | '+' | '-' )*
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
@@ -894,8 +895,8 @@ class XKBGrammarLexer(Lexer):
_type = WS
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:137:2: ( ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' ) )
- # XKBGrammar.g:138:2: ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' )
+ # XKBGrammar.g:143:2: ( ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' ) )
+ # XKBGrammar.g:144:2: ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' )
if (9 <= self.input.LA(1) <= 10) or (12 <= self.input.LA(1) <= 13) or self.input.LA(1) == 32:
self.input.consume();
else:
@@ -928,10 +929,10 @@ class XKBGrammarLexer(Lexer):
_type = COMMENT
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:143:6: ( '/*' ( . )* '*/' )
- # XKBGrammar.g:144:2: '/*' ( . )* '*/'
+ # XKBGrammar.g:149:6: ( '/*' ( . )* '*/' )
+ # XKBGrammar.g:150:2: '/*' ( . )* '*/'
self.match("/*")
- # XKBGrammar.g:144:7: ( . )*
+ # XKBGrammar.g:150:7: ( . )*
while True: #loop2
alt2 = 2
LA2_0 = self.input.LA(1)
@@ -950,7 +951,7 @@ class XKBGrammarLexer(Lexer):
if alt2 == 1:
- # XKBGrammar.g:144:7: .
+ # XKBGrammar.g:150:7: .
self.matchAny()
@@ -985,20 +986,45 @@ class XKBGrammarLexer(Lexer):
_type = LINE_COMMENT
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:148:6: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' )
- # XKBGrammar.g:149:2: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n'
- self.match("//")
- # XKBGrammar.g:149:7: (~ ( '\\n' | '\\r' ) )*
- while True: #loop3
+ # XKBGrammar.g:154:6: ( ( '//' | '#' ) (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' )
+ # XKBGrammar.g:155:2: ( '//' | '#' ) (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n'
+ # XKBGrammar.g:155:2: ( '//' | '#' )
+ alt3 = 2
+ LA3_0 = self.input.LA(1)
+
+ if (LA3_0 == 47) :
+ alt3 = 1
+ elif (LA3_0 == 35) :
alt3 = 2
- LA3_0 = self.input.LA(1)
+ else:
+ nvae = NoViableAltException("", 3, 0, self.input)
+
+ raise nvae
+
+ if alt3 == 1:
+ # XKBGrammar.g:155:3: '//'
+ self.match("//")
+
+
+
+ elif alt3 == 2:
+ # XKBGrammar.g:155:10: '#'
+ self.match(35)
+
+
- if ((0 <= LA3_0 <= 9) or (11 <= LA3_0 <= 12) or (14 <= LA3_0 <= 65534)) :
- alt3 = 1
+ # XKBGrammar.g:155:16: (~ ( '\\n' | '\\r' ) )*
+ while True: #loop4
+ alt4 = 2
+ LA4_0 = self.input.LA(1)
- if alt3 == 1:
- # XKBGrammar.g:149:7: ~ ( '\\n' | '\\r' )
+ if ((0 <= LA4_0 <= 9) or (11 <= LA4_0 <= 12) or (14 <= LA4_0 <= 65534)) :
+ alt4 = 1
+
+
+ if alt4 == 1:
+ # XKBGrammar.g:155:16: ~ ( '\\n' | '\\r' )
if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 65534):
self.input.consume();
else:
@@ -1010,17 +1036,17 @@ class XKBGrammarLexer(Lexer):
else:
- break #loop3
+ break #loop4
- # XKBGrammar.g:149:23: ( '\\r' )?
- alt4 = 2
- LA4_0 = self.input.LA(1)
+ # XKBGrammar.g:155:32: ( '\\r' )?
+ alt5 = 2
+ LA5_0 = self.input.LA(1)
- if (LA4_0 == 13) :
- alt4 = 1
- if alt4 == 1:
- # XKBGrammar.g:149:23: '\\r'
+ if (LA5_0 == 13) :
+ alt5 = 1
+ if alt5 == 1:
+ # XKBGrammar.g:155:32: '\\r'
self.match(13)
@@ -1052,22 +1078,22 @@ class XKBGrammarLexer(Lexer):
_type = DQSTRING
_channel = DEFAULT_CHANNEL
- # XKBGrammar.g:157:6: ( '\"' ( options {greedy=false; } : ~ ( '\"' ) )* '\"' )
- # XKBGrammar.g:157:10: '\"' ( options {greedy=false; } : ~ ( '\"' ) )* '\"'
+ # XKBGrammar.g:163:6: ( '\"' ( options {greedy=false; } : ~ ( '\"' ) )* '\"' )
+ # XKBGrammar.g:163:10: '\"' ( options {greedy=false; } : ~ ( '\"' ) )* '\"'
self.match(34)
- # XKBGrammar.g:157:14: ( options {greedy=false; } : ~ ( '\"' ) )*
- while True: #loop5
- alt5 = 2
- LA5_0 = self.input.LA(1)
+ # XKBGrammar.g:163:14: ( options {greedy=false; } : ~ ( '\"' ) )*
+ while True: #loop6
+ alt6 = 2
+ LA6_0 = self.input.LA(1)
- if ((0 <= LA5_0 <= 33) or (35 <= LA5_0 <= 65534)) :
- alt5 = 1
- elif (LA5_0 == 34) :
- alt5 = 2
+ if ((0 <= LA6_0 <= 33) or (35 <= LA6_0 <= 65534)) :
+ alt6 = 1
+ elif (LA6_0 == 34) :
+ alt6 = 2
- if alt5 == 1:
- # XKBGrammar.g:157:39: ~ ( '\"' )
+ if alt6 == 1:
+ # XKBGrammar.g:163:39: ~ ( '\"' )
if (0 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 65534):
self.input.consume();
else:
@@ -1079,7 +1105,7 @@ class XKBGrammarLexer(Lexer):
else:
- break #loop5
+ break #loop6
self.match(34)
@@ -1099,214 +1125,214 @@ class XKBGrammarLexer(Lexer):
def mTokens(self):
- # XKBGrammar.g:1:8: ( T__26 | T__27 | T__28 | T__29 | T__30 | T__31 | T__32 | T__33 | T__34 | T__35 | T__36 | T__37 | T__38 | T__39 | T__40 | T__41 | T__42 | T__43 | T__44 | T__45 | T__46 | T__47 | T__48 | T__49 | T__50 | T__51 | T__52 | T__53 | T__54 | T__55 | NAME | WS | COMMENT | LINE_COMMENT | DQSTRING )
- alt6 = 35
- alt6 = self.dfa6.predict(self.input)
- if alt6 == 1:
- # XKBGrammar.g:1:10: T__26
- self.mT__26()
-
-
-
- elif alt6 == 2:
- # XKBGrammar.g:1:16: T__27
+ # XKBGrammar.g:1:8: ( T__27 | T__28 | T__29 | T__30 | T__31 | T__32 | T__33 | T__34 | T__35 | T__36 | T__37 | T__38 | T__39 | T__40 | T__41 | T__42 | T__43 | T__44 | T__45 | T__46 | T__47 | T__48 | T__49 | T__50 | T__51 | T__52 | T__53 | T__54 | T__55 | T__56 | NAME | WS | COMMENT | LINE_COMMENT | DQSTRING )
+ alt7 = 35
+ alt7 = self.dfa7.predict(self.input)
+ if alt7 == 1:
+ # XKBGrammar.g:1:10: T__27
self.mT__27()
- elif alt6 == 3:
- # XKBGrammar.g:1:22: T__28
+ elif alt7 == 2:
+ # XKBGrammar.g:1:16: T__28
self.mT__28()
- elif alt6 == 4:
- # XKBGrammar.g:1:28: T__29
+ elif alt7 == 3:
+ # XKBGrammar.g:1:22: T__29
self.mT__29()
- elif alt6 == 5:
- # XKBGrammar.g:1:34: T__30
+ elif alt7 == 4:
+ # XKBGrammar.g:1:28: T__30
self.mT__30()
- elif alt6 == 6:
- # XKBGrammar.g:1:40: T__31
+ elif alt7 == 5:
+ # XKBGrammar.g:1:34: T__31
self.mT__31()
- elif alt6 == 7:
- # XKBGrammar.g:1:46: T__32
+ elif alt7 == 6:
+ # XKBGrammar.g:1:40: T__32
self.mT__32()
- elif alt6 == 8:
- # XKBGrammar.g:1:52: T__33
+ elif alt7 == 7:
+ # XKBGrammar.g:1:46: T__33
self.mT__33()
- elif alt6 == 9:
- # XKBGrammar.g:1:58: T__34
+ elif alt7 == 8:
+ # XKBGrammar.g:1:52: T__34
self.mT__34()
- elif alt6 == 10:
- # XKBGrammar.g:1:64: T__35
+ elif alt7 == 9:
+ # XKBGrammar.g:1:58: T__35
self.mT__35()
- elif alt6 == 11:
- # XKBGrammar.g:1:70: T__36
+ elif alt7 == 10:
+ # XKBGrammar.g:1:64: T__36
self.mT__36()
- elif alt6 == 12:
- # XKBGrammar.g:1:76: T__37
+ elif alt7 == 11:
+ # XKBGrammar.g:1:70: T__37
self.mT__37()
- elif alt6 == 13:
- # XKBGrammar.g:1:82: T__38
+ elif alt7 == 12:
+ # XKBGrammar.g:1:76: T__38
self.mT__38()
- elif alt6 == 14:
- # XKBGrammar.g:1:88: T__39
+ elif alt7 == 13:
+ # XKBGrammar.g:1:82: T__39
self.mT__39()
- elif alt6 == 15:
- # XKBGrammar.g:1:94: T__40
+ elif alt7 == 14:
+ # XKBGrammar.g:1:88: T__40
self.mT__40()
- elif alt6 == 16:
- # XKBGrammar.g:1:100: T__41
+ elif alt7 == 15:
+ # XKBGrammar.g:1:94: T__41
self.mT__41()
- elif alt6 == 17:
- # XKBGrammar.g:1:106: T__42
+ elif alt7 == 16:
+ # XKBGrammar.g:1:100: T__42
self.mT__42()
- elif alt6 == 18:
- # XKBGrammar.g:1:112: T__43
+ elif alt7 == 17:
+ # XKBGrammar.g:1:106: T__43
self.mT__43()
- elif alt6 == 19:
- # XKBGrammar.g:1:118: T__44
+ elif alt7 == 18:
+ # XKBGrammar.g:1:112: T__44
self.mT__44()
- elif alt6 == 20:
- # XKBGrammar.g:1:124: T__45
+ elif alt7 == 19:
+ # XKBGrammar.g:1:118: T__45
self.mT__45()
- elif alt6 == 21:
- # XKBGrammar.g:1:130: T__46
+ elif alt7 == 20:
+ # XKBGrammar.g:1:124: T__46
self.mT__46()
- elif alt6 == 22:
- # XKBGrammar.g:1:136: T__47
+ elif alt7 == 21:
+ # XKBGrammar.g:1:130: T__47
self.mT__47()
- elif alt6 == 23:
- # XKBGrammar.g:1:142: T__48
+ elif alt7 == 22:
+ # XKBGrammar.g:1:136: T__48
self.mT__48()
- elif alt6 == 24:
- # XKBGrammar.g:1:148: T__49
+ elif alt7 == 23:
+ # XKBGrammar.g:1:142: T__49
self.mT__49()
- elif alt6 == 25:
- # XKBGrammar.g:1:154: T__50
+ elif alt7 == 24:
+ # XKBGrammar.g:1:148: T__50
self.mT__50()
- elif alt6 == 26:
- # XKBGrammar.g:1:160: T__51
+ elif alt7 == 25:
+ # XKBGrammar.g:1:154: T__51
self.mT__51()
- elif alt6 == 27:
- # XKBGrammar.g:1:166: T__52
+ elif alt7 == 26:
+ # XKBGrammar.g:1:160: T__52
self.mT__52()
- elif alt6 == 28:
- # XKBGrammar.g:1:172: T__53
+ elif alt7 == 27:
+ # XKBGrammar.g:1:166: T__53
self.mT__53()
- elif alt6 == 29:
- # XKBGrammar.g:1:178: T__54
+ elif alt7 == 28:
+ # XKBGrammar.g:1:172: T__54
self.mT__54()
- elif alt6 == 30:
- # XKBGrammar.g:1:184: T__55
+ elif alt7 == 29:
+ # XKBGrammar.g:1:178: T__55
self.mT__55()
- elif alt6 == 31:
+ elif alt7 == 30:
+ # XKBGrammar.g:1:184: T__56
+ self.mT__56()
+
+
+
+ elif alt7 == 31:
# XKBGrammar.g:1:190: NAME
self.mNAME()
- elif alt6 == 32:
+ elif alt7 == 32:
# XKBGrammar.g:1:195: WS
self.mWS()
- elif alt6 == 33:
+ elif alt7 == 33:
# XKBGrammar.g:1:198: COMMENT
self.mCOMMENT()
- elif alt6 == 34:
+ elif alt7 == 34:
# XKBGrammar.g:1:206: LINE_COMMENT
self.mLINE_COMMENT()
- elif alt6 == 35:
+ elif alt7 == 35:
# XKBGrammar.g:1:219: DQSTRING
self.mDQSTRING()
@@ -1317,10 +1343,10 @@ class XKBGrammarLexer(Lexer):
- # lookup tables for DFA #6
+ # lookup tables for DFA #7
- DFA6_eot = DFA.unpack(
- u"\1\30\3\uffff\2\30\3\uffff\2\30\3\uffff\12\30\4\uffff\16\30\2\uffff"
+ DFA7_eot = DFA.unpack(
+ u"\1\30\3\uffff\2\30\3\uffff\2\30\3\uffff\12\30\5\uffff\16\30\1\uffff"
u"\2\30\1\76\15\30\1\120\2\uffff\1\30\1\122\10\30\1\133\1\134\1\135"
u"\1\136\1\137\1\140\1\30\1\uffff\1\30\1\uffff\6\30\1\151\1\30\6"
u"\uffff\3\30\1\156\4\30\1\uffff\1\30\1\164\1\30\1\166\1\uffff\1"
@@ -1329,15 +1355,15 @@ class XKBGrammarLexer(Lexer):
u"\uffff\1\u009b\1\uffff"
)
- DFA6_eof = DFA.unpack(
+ DFA7_eof = DFA.unpack(
u"\u009c\uffff"
)
- DFA6_min = DFA.unpack(
+ DFA7_min = DFA.unpack(
u"\1\11\3\uffff\1\156\1\141\3\uffff\1\145\1\157\3\uffff\1\171\1\145"
- u"\1\151\1\141\1\154\1\153\1\150\3\157\2\uffff\1\52\1\uffff\1\143"
+ u"\1\151\1\141\1\154\1\153\1\150\3\157\2\uffff\1\52\2\uffff\1\143"
u"\1\155\1\171\1\144\1\160\1\146\1\144\1\162\1\160\1\142\1\151\1"
- u"\156\1\143\1\144\2\uffff\1\154\1\145\1\50\1\151\1\145\1\141\1\144"
+ u"\156\1\143\1\144\1\uffff\1\154\1\145\1\50\1\151\1\145\1\141\1\144"
u"\1\164\1\150\1\145\1\137\1\146\1\164\1\153\1\61\1\165\1\50\2\uffff"
u"\1\146\1\50\1\165\1\145\1\151\1\141\1\162\1\163\1\164\1\162\6\50"
u"\1\144\1\uffff\1\151\1\uffff\1\154\1\156\1\141\2\156\1\171\1\50"
@@ -1349,11 +1375,11 @@ class XKBGrammarLexer(Lexer):
u"\1\163\1\uffff\1\50\1\uffff"
)
- DFA6_max = DFA.unpack(
+ DFA7_max = DFA.unpack(
u"\1\175\3\uffff\1\156\1\141\3\uffff\1\145\1\157\3\uffff\1\171\1"
- u"\145\1\151\1\141\1\154\1\153\1\150\3\157\2\uffff\1\57\1\uffff\1"
+ u"\145\1\151\1\141\1\154\1\153\1\150\3\157\2\uffff\1\57\2\uffff\1"
u"\143\1\155\1\171\1\144\1\160\1\146\1\144\1\162\1\164\1\142\1\151"
- u"\1\156\1\143\1\144\2\uffff\1\154\1\145\1\172\1\151\1\145\1\141"
+ u"\1\156\1\143\1\144\1\uffff\1\154\1\145\1\172\1\151\1\145\1\141"
u"\1\144\1\164\1\150\1\145\1\137\1\146\1\164\1\153\1\65\1\165\1\172"
u"\2\uffff\1\146\1\172\1\165\1\145\1\151\1\141\1\162\1\163\1\164"
u"\1\162\6\172\1\144\1\uffff\1\151\1\uffff\1\154\1\156\1\141\2\156"
@@ -1365,40 +1391,40 @@ class XKBGrammarLexer(Lexer):
u"\1\145\1\160\1\171\1\172\1\163\1\uffff\1\172\1\uffff"
)
- DFA6_accept = DFA.unpack(
+ DFA7_accept = DFA.unpack(
u"\1\uffff\1\1\1\2\1\3\2\uffff\1\6\1\7\1\10\2\uffff\1\14\1\15\1\16"
- u"\12\uffff\1\37\1\40\1\uffff\1\43\16\uffff\1\41\1\42\21\uffff\1"
+ u"\12\uffff\1\37\1\40\1\uffff\1\42\1\43\16\uffff\1\41\21\uffff\1"
u"\11\1\12\21\uffff\1\5\1\uffff\1\17\10\uffff\1\31\1\32\1\33\1\34"
u"\1\35\1\36\10\uffff\1\27\4\uffff\1\21\5\uffff\1\4\1\uffff\1\20"
u"\1\22\3\uffff\1\30\22\uffff\1\26\1\13\3\uffff\1\24\5\uffff\1\25"
u"\1\uffff\1\23"
)
- DFA6_special = DFA.unpack(
+ DFA7_special = DFA.unpack(
u"\u009c\uffff"
)
- DFA6_transition = [
- DFA.unpack(u"\2\31\1\uffff\2\31\22\uffff\1\31\1\uffff\1\33\11\uffff"
- u"\1\13\2\uffff\1\32\13\uffff\1\3\1\14\1\10\1\15\4\uffff\1\25\10"
- u"\uffff\1\26\1\27\5\uffff\1\24\7\uffff\1\6\1\uffff\1\7\3\uffff\1"
- u"\22\2\uffff\1\17\3\uffff\1\20\1\4\1\uffff\1\11\1\uffff\1\12\1\5"
- u"\1\uffff\1\21\3\uffff\1\16\3\uffff\1\23\2\uffff\1\1\1\uffff\1\2"),
+ DFA7_transition = [
+ DFA.unpack(u"\2\31\1\uffff\2\31\22\uffff\1\31\1\uffff\1\34\1\33\10"
+ u"\uffff\1\13\2\uffff\1\32\13\uffff\1\3\1\14\1\10\1\15\4\uffff\1"
+ u"\25\10\uffff\1\26\1\27\5\uffff\1\24\7\uffff\1\6\1\uffff\1\7\3\uffff"
+ u"\1\22\2\uffff\1\17\3\uffff\1\20\1\4\1\uffff\1\11\1\uffff\1\12\1"
+ u"\5\1\uffff\1\21\3\uffff\1\16\3\uffff\1\23\2\uffff\1\1\1\uffff\1"
+ u"\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
- DFA.unpack(u"\1\34"),
DFA.unpack(u"\1\35"),
+ DFA.unpack(u"\1\36"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
- DFA.unpack(u"\1\36"),
DFA.unpack(u"\1\37"),
+ DFA.unpack(u"\1\40"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
- DFA.unpack(u"\1\40"),
DFA.unpack(u"\1\41"),
DFA.unpack(u"\1\42"),
DFA.unpack(u"\1\43"),
@@ -1408,9 +1434,11 @@ class XKBGrammarLexer(Lexer):
DFA.unpack(u"\1\47"),
DFA.unpack(u"\1\50"),
DFA.unpack(u"\1\51"),
+ DFA.unpack(u"\1\52"),
+ DFA.unpack(u""),
DFA.unpack(u""),
+ DFA.unpack(u"\1\53\4\uffff\1\33"),
DFA.unpack(u""),
- DFA.unpack(u"\1\52\4\uffff\1\53"),
DFA.unpack(u""),
DFA.unpack(u"\1\54"),
DFA.unpack(u"\1\55"),
@@ -1427,7 +1455,6 @@ class XKBGrammarLexer(Lexer):
DFA.unpack(u"\1\71"),
DFA.unpack(u"\1\72"),
DFA.unpack(u""),
- DFA.unpack(u""),
DFA.unpack(u"\1\73"),
DFA.unpack(u"\1\74"),
DFA.unpack(u"\2\30\1\uffff\1\30\1\uffff\1\30\1\75\1\uffff\12\30\7"
@@ -1562,9 +1589,9 @@ class XKBGrammarLexer(Lexer):
DFA.unpack(u"")
]
- # class definition for DFA #6
+ # class definition for DFA #7
- DFA6 = DFA
+ DFA7 = DFA
diff --git a/XKBGrammar/XKBGrammarLexer.pyc b/XKBGrammar/XKBGrammarLexer.pyc
Binary files differ.
diff --git a/XKBGrammar/XKBGrammarParser.py b/XKBGrammar/XKBGrammarParser.py
@@ -1,4 +1,4 @@
-# $ANTLR 3.1b1 XKBGrammar.g 2008-05-20 21:54:24
+# $ANTLR 3.1b1 XKBGrammar.g 2008-05-20 22:51:08
import sys
from antlr3 import *
@@ -15,7 +15,6 @@ HIDDEN = BaseRecognizer.HIDDEN
T__29=29
T__28=28
T__27=27
-T__26=26
MAPOPTIONS=13
TOKEN_INCLUDE=4
TOKEN_MODIFIER_MAP=9
@@ -23,15 +22,16 @@ EOF=-1
TOKEN_TYPE=8
MAPTYPE=11
T__55=55
-NAME=22
+T__56=56
+NAME=23
T__51=51
T__52=52
MAPMATERIAL=14
T__53=53
T__54=54
KEYSYMS=18
-COMMENT=24
-DQSTRING=21
+COMMENT=25
+DQSTRING=22
T__50=50
T__42=42
T__43=43
@@ -43,7 +43,7 @@ T__47=47
T__44=44
SECTION=15
T__45=45
-LINE_COMMENT=25
+LINE_COMMENT=26
KEYCODE=16
T__48=48
T__49=49
@@ -53,7 +53,7 @@ LAYOUT=10
T__30=30
T__31=31
T__32=32
-WS=23
+WS=24
T__33=33
T__34=34
T__35=35
@@ -61,6 +61,7 @@ T__36=36
T__37=37
T__38=38
T__39=39
+KEYSYMGROUP=21
TOKEN_KEY=7
MAPNAME=12
TOKEN_KEY_TYPE=5
@@ -72,8 +73,8 @@ tokenNames = [
"TOKEN_INCLUDE", "TOKEN_KEY_TYPE", "TOKEN_NAME", "TOKEN_KEY", "TOKEN_TYPE",
"TOKEN_MODIFIER_MAP", "LAYOUT", "MAPTYPE", "MAPNAME", "MAPOPTIONS",
"MAPMATERIAL", "SECTION", "KEYCODE", "KEYCODEX", "KEYSYMS", "VALUE",
- "STATE", "DQSTRING", "NAME", "WS", "COMMENT", "LINE_COMMENT", "'{'",
- "'}'", "';'", "'include'", "'name'", "'['", "']'", "'='", "'key.type'",
+ "STATE", "KEYSYMGROUP", "DQSTRING", "NAME", "WS", "COMMENT", "LINE_COMMENT",
+ "'{'", "'}'", "';'", "'include'", "'name'", "'['", "']'", "'='", "'key.type'",
"'key'", "'modifier_map'", "','", "'<'", "'>'", "'type'", "'default'",
"'hidden'", "'partial'", "'alphanumeric_keys'", "'modifier_keys'", "'alternate_group'",
"'xkb_symbols'", "'Shift'", "'Control'", "'Lock'", "'Mod1'", "'Mod2'",
@@ -117,7 +118,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start layout
- # XKBGrammar.g:53:1: layout : ( section )+ EOF -> ^( LAYOUT ( section )+ ) ;
+ # XKBGrammar.g:54:1: layout : ( section )+ EOF -> ^( LAYOUT ( section )+ ) ;
def layout(self, ):
retval = self.layout_return()
@@ -134,21 +135,21 @@ class XKBGrammarParser(Parser):
stream_section = RewriteRuleSubtreeStream(self.adaptor, "rule section")
try:
try:
- # XKBGrammar.g:54:2: ( ( section )+ EOF -> ^( LAYOUT ( section )+ ) )
- # XKBGrammar.g:54:4: ( section )+ EOF
- # XKBGrammar.g:54:4: ( section )+
+ # XKBGrammar.g:55:2: ( ( section )+ EOF -> ^( LAYOUT ( section )+ ) )
+ # XKBGrammar.g:55:4: ( section )+ EOF
+ # XKBGrammar.g:55:4: ( section )+
cnt1 = 0
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
- if ((41 <= LA1_0 <= 47)) :
+ if ((42 <= LA1_0 <= 48)) :
alt1 = 1
if alt1 == 1:
- # XKBGrammar.g:54:4: section
- self._state.following.append(self.FOLLOW_section_in_layout133)
+ # XKBGrammar.g:55:4: section
+ self._state.following.append(self.FOLLOW_section_in_layout137)
section1 = self.section()
self._state.following.pop()
@@ -167,7 +168,7 @@ class XKBGrammarParser(Parser):
EOF2 = self.input.LT(1)
- self.match(self.input, EOF, self.FOLLOW_EOF_in_layout136)
+ self.match(self.input, EOF, self.FOLLOW_EOF_in_layout140)
stream_EOF.add(EOF2)
# AST Rewrite
# elements: section
@@ -185,12 +186,12 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 55:2: -> ^( LAYOUT ( section )+ )
- # XKBGrammar.g:55:5: ^( LAYOUT ( section )+ )
+ # 56:2: -> ^( LAYOUT ( section )+ )
+ # XKBGrammar.g:56:5: ^( LAYOUT ( section )+ )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(LAYOUT, "LAYOUT"), root_1)
- # XKBGrammar.g:55:14: ( section )+
+ # XKBGrammar.g:56:14: ( section )+
if not (stream_section.hasNext()):
raise RewriteEarlyExitException()
@@ -239,7 +240,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start section
- # XKBGrammar.g:58:1: section : mapType '{' ( mapMaterial )+ '}' ';' -> ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) ) ;
+ # XKBGrammar.g:59:1: section : mapType '{' ( mapMaterial )+ '}' ';' -> ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) ) ;
def section(self, ):
retval = self.section_return()
@@ -258,36 +259,36 @@ class XKBGrammarParser(Parser):
char_literal4_tree = None
char_literal6_tree = None
char_literal7_tree = None
- stream_26 = RewriteRuleTokenStream(self.adaptor, "token 26")
stream_27 = RewriteRuleTokenStream(self.adaptor, "token 27")
stream_28 = RewriteRuleTokenStream(self.adaptor, "token 28")
+ stream_29 = RewriteRuleTokenStream(self.adaptor, "token 29")
stream_mapMaterial = RewriteRuleSubtreeStream(self.adaptor, "rule mapMaterial")
stream_mapType = RewriteRuleSubtreeStream(self.adaptor, "rule mapType")
try:
try:
- # XKBGrammar.g:59:2: ( mapType '{' ( mapMaterial )+ '}' ';' -> ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) ) )
- # XKBGrammar.g:59:4: mapType '{' ( mapMaterial )+ '}' ';'
- self._state.following.append(self.FOLLOW_mapType_in_section159)
+ # XKBGrammar.g:60:2: ( mapType '{' ( mapMaterial )+ '}' ';' -> ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) ) )
+ # XKBGrammar.g:60:4: mapType '{' ( mapMaterial )+ '}' ';'
+ self._state.following.append(self.FOLLOW_mapType_in_section163)
mapType3 = self.mapType()
self._state.following.pop()
stream_mapType.add(mapType3.tree)
char_literal4 = self.input.LT(1)
- self.match(self.input, 26, self.FOLLOW_26_in_section161)
- stream_26.add(char_literal4)
- # XKBGrammar.g:59:16: ( mapMaterial )+
+ self.match(self.input, 27, self.FOLLOW_27_in_section165)
+ stream_27.add(char_literal4)
+ # XKBGrammar.g:60:16: ( mapMaterial )+
cnt2 = 0
while True: #loop2
alt2 = 2
LA2_0 = self.input.LA(1)
- if ((29 <= LA2_0 <= 30) or (34 <= LA2_0 <= 36)) :
+ if ((30 <= LA2_0 <= 31) or (35 <= LA2_0 <= 37)) :
alt2 = 1
if alt2 == 1:
- # XKBGrammar.g:59:16: mapMaterial
- self._state.following.append(self.FOLLOW_mapMaterial_in_section163)
+ # XKBGrammar.g:60:16: mapMaterial
+ self._state.following.append(self.FOLLOW_mapMaterial_in_section167)
mapMaterial5 = self.mapMaterial()
self._state.following.pop()
@@ -306,11 +307,11 @@ class XKBGrammarParser(Parser):
char_literal6 = self.input.LT(1)
- self.match(self.input, 27, self.FOLLOW_27_in_section166)
- stream_27.add(char_literal6)
+ self.match(self.input, 28, self.FOLLOW_28_in_section170)
+ stream_28.add(char_literal6)
char_literal7 = self.input.LT(1)
- self.match(self.input, 28, self.FOLLOW_28_in_section168)
- stream_28.add(char_literal7)
+ self.match(self.input, 29, self.FOLLOW_29_in_section172)
+ stream_29.add(char_literal7)
# AST Rewrite
# elements: mapMaterial, mapType
# token labels:
@@ -327,17 +328,17 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 60:2: -> ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) )
- # XKBGrammar.g:60:5: ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) )
+ # 61:2: -> ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) )
+ # XKBGrammar.g:61:5: ^( SECTION mapType ^( MAPMATERIAL ( mapMaterial )+ ) )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(SECTION, "SECTION"), root_1)
self.adaptor.addChild(root_1, stream_mapType.nextTree())
- # XKBGrammar.g:60:23: ^( MAPMATERIAL ( mapMaterial )+ )
+ # XKBGrammar.g:61:23: ^( MAPMATERIAL ( mapMaterial )+ )
root_2 = self.adaptor.nil()
root_2 = self.adaptor.becomeRoot(self.adaptor.createFromType(MAPMATERIAL, "MAPMATERIAL"), root_2)
- # XKBGrammar.g:60:37: ( mapMaterial )+
+ # XKBGrammar.g:61:37: ( mapMaterial )+
if not (stream_mapMaterial.hasNext()):
raise RewriteEarlyExitException()
@@ -388,7 +389,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start mapType
- # XKBGrammar.g:63:1: mapType : ( mapOptions )+ DQSTRING -> ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) ) ;
+ # XKBGrammar.g:64:1: mapType : ( mapOptions )+ DQSTRING -> ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) ) ;
def mapType(self, ):
retval = self.mapType_return()
@@ -405,21 +406,21 @@ class XKBGrammarParser(Parser):
stream_mapOptions = RewriteRuleSubtreeStream(self.adaptor, "rule mapOptions")
try:
try:
- # XKBGrammar.g:64:2: ( ( mapOptions )+ DQSTRING -> ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) ) )
- # XKBGrammar.g:64:4: ( mapOptions )+ DQSTRING
- # XKBGrammar.g:64:4: ( mapOptions )+
+ # XKBGrammar.g:65:2: ( ( mapOptions )+ DQSTRING -> ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) ) )
+ # XKBGrammar.g:65:4: ( mapOptions )+ DQSTRING
+ # XKBGrammar.g:65:4: ( mapOptions )+
cnt3 = 0
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
- if ((41 <= LA3_0 <= 47)) :
+ if ((42 <= LA3_0 <= 48)) :
alt3 = 1
if alt3 == 1:
- # XKBGrammar.g:64:4: mapOptions
- self._state.following.append(self.FOLLOW_mapOptions_in_mapType196)
+ # XKBGrammar.g:65:4: mapOptions
+ self._state.following.append(self.FOLLOW_mapOptions_in_mapType200)
mapOptions8 = self.mapOptions()
self._state.following.pop()
@@ -438,7 +439,7 @@ class XKBGrammarParser(Parser):
DQSTRING9 = self.input.LT(1)
- self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_mapType199)
+ self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_mapType203)
stream_DQSTRING.add(DQSTRING9)
# AST Rewrite
# elements: DQSTRING, mapOptions
@@ -456,16 +457,16 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 65:2: -> ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) )
- # XKBGrammar.g:65:5: ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) )
+ # 66:2: -> ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) )
+ # XKBGrammar.g:66:5: ^( MAPTYPE ^( MAPOPTIONS ( mapOptions )+ ) ^( MAPNAME DQSTRING ) )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(MAPTYPE, "MAPTYPE"), root_1)
- # XKBGrammar.g:65:15: ^( MAPOPTIONS ( mapOptions )+ )
+ # XKBGrammar.g:66:15: ^( MAPOPTIONS ( mapOptions )+ )
root_2 = self.adaptor.nil()
root_2 = self.adaptor.becomeRoot(self.adaptor.createFromType(MAPOPTIONS, "MAPOPTIONS"), root_2)
- # XKBGrammar.g:65:28: ( mapOptions )+
+ # XKBGrammar.g:66:28: ( mapOptions )+
if not (stream_mapOptions.hasNext()):
raise RewriteEarlyExitException()
@@ -476,7 +477,7 @@ class XKBGrammarParser(Parser):
stream_mapOptions.reset()
self.adaptor.addChild(root_1, root_2)
- # XKBGrammar.g:65:41: ^( MAPNAME DQSTRING )
+ # XKBGrammar.g:66:41: ^( MAPNAME DQSTRING )
root_2 = self.adaptor.nil()
root_2 = self.adaptor.becomeRoot(self.adaptor.createFromType(MAPNAME, "MAPNAME"), root_2)
@@ -523,7 +524,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start mapMaterial
- # XKBGrammar.g:68:1: mapMaterial : ( line_include | line_name ';' | line_keytype ';' | line_key ';' | line_modifier_map ';' );
+ # XKBGrammar.g:69:1: mapMaterial : ( line_include | line_name ';' | line_keytype ';' | line_key ';' | line_modifier_map ';' );
def mapMaterial(self, ):
retval = self.mapMaterial_return()
@@ -553,18 +554,18 @@ class XKBGrammarParser(Parser):
try:
try:
- # XKBGrammar.g:69:2: ( line_include | line_name ';' | line_keytype ';' | line_key ';' | line_modifier_map ';' )
+ # XKBGrammar.g:70:2: ( line_include | line_name ';' | line_keytype ';' | line_key ';' | line_modifier_map ';' )
alt4 = 5
LA4 = self.input.LA(1)
- if LA4 == 29:
+ if LA4 == 30:
alt4 = 1
- elif LA4 == 30:
+ elif LA4 == 31:
alt4 = 2
- elif LA4 == 34:
- alt4 = 3
elif LA4 == 35:
- alt4 = 4
+ alt4 = 3
elif LA4 == 36:
+ alt4 = 4
+ elif LA4 == 37:
alt4 = 5
else:
nvae = NoViableAltException("", 4, 0, self.input)
@@ -572,10 +573,10 @@ class XKBGrammarParser(Parser):
raise nvae
if alt4 == 1:
- # XKBGrammar.g:69:4: line_include
+ # XKBGrammar.g:70:4: line_include
root_0 = self.adaptor.nil()
- self._state.following.append(self.FOLLOW_line_include_in_mapMaterial231)
+ self._state.following.append(self.FOLLOW_line_include_in_mapMaterial235)
line_include10 = self.line_include()
self._state.following.pop()
@@ -584,58 +585,58 @@ class XKBGrammarParser(Parser):
elif alt4 == 2:
- # XKBGrammar.g:70:4: line_name ';'
+ # XKBGrammar.g:71:4: line_name ';'
root_0 = self.adaptor.nil()
- self._state.following.append(self.FOLLOW_line_name_in_mapMaterial237)
+ self._state.following.append(self.FOLLOW_line_name_in_mapMaterial241)
line_name11 = self.line_name()
self._state.following.pop()
self.adaptor.addChild(root_0, line_name11.tree)
char_literal12 = self.input.LT(1)
- self.match(self.input, 28, self.FOLLOW_28_in_mapMaterial239)
+ self.match(self.input, 29, self.FOLLOW_29_in_mapMaterial243)
elif alt4 == 3:
- # XKBGrammar.g:71:4: line_keytype ';'
+ # XKBGrammar.g:72:4: line_keytype ';'
root_0 = self.adaptor.nil()
- self._state.following.append(self.FOLLOW_line_keytype_in_mapMaterial245)
+ self._state.following.append(self.FOLLOW_line_keytype_in_mapMaterial249)
line_keytype13 = self.line_keytype()
self._state.following.pop()
self.adaptor.addChild(root_0, line_keytype13.tree)
char_literal14 = self.input.LT(1)
- self.match(self.input, 28, self.FOLLOW_28_in_mapMaterial247)
+ self.match(self.input, 29, self.FOLLOW_29_in_mapMaterial251)
elif alt4 == 4:
- # XKBGrammar.g:72:4: line_key ';'
+ # XKBGrammar.g:73:4: line_key ';'
root_0 = self.adaptor.nil()
- self._state.following.append(self.FOLLOW_line_key_in_mapMaterial253)
+ self._state.following.append(self.FOLLOW_line_key_in_mapMaterial257)
line_key15 = self.line_key()
self._state.following.pop()
self.adaptor.addChild(root_0, line_key15.tree)
char_literal16 = self.input.LT(1)
- self.match(self.input, 28, self.FOLLOW_28_in_mapMaterial255)
+ self.match(self.input, 29, self.FOLLOW_29_in_mapMaterial259)
elif alt4 == 5:
- # XKBGrammar.g:73:4: line_modifier_map ';'
+ # XKBGrammar.g:74:4: line_modifier_map ';'
root_0 = self.adaptor.nil()
- self._state.following.append(self.FOLLOW_line_modifier_map_in_mapMaterial261)
+ self._state.following.append(self.FOLLOW_line_modifier_map_in_mapMaterial265)
line_modifier_map17 = self.line_modifier_map()
self._state.following.pop()
self.adaptor.addChild(root_0, line_modifier_map17.tree)
char_literal18 = self.input.LT(1)
- self.match(self.input, 28, self.FOLLOW_28_in_mapMaterial263)
+ self.match(self.input, 29, self.FOLLOW_29_in_mapMaterial267)
@@ -669,7 +670,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_include
- # XKBGrammar.g:76:1: line_include : 'include' DQSTRING -> ^( TOKEN_INCLUDE DQSTRING ) ;
+ # XKBGrammar.g:77:1: line_include : 'include' DQSTRING -> ^( TOKEN_INCLUDE DQSTRING ) ;
def line_include(self, ):
retval = self.line_include_return()
@@ -682,18 +683,18 @@ class XKBGrammarParser(Parser):
string_literal19_tree = None
DQSTRING20_tree = None
+ stream_30 = RewriteRuleTokenStream(self.adaptor, "token 30")
stream_DQSTRING = RewriteRuleTokenStream(self.adaptor, "token DQSTRING")
- stream_29 = RewriteRuleTokenStream(self.adaptor, "token 29")
try:
try:
- # XKBGrammar.g:77:2: ( 'include' DQSTRING -> ^( TOKEN_INCLUDE DQSTRING ) )
- # XKBGrammar.g:77:4: 'include' DQSTRING
+ # XKBGrammar.g:78:2: ( 'include' DQSTRING -> ^( TOKEN_INCLUDE DQSTRING ) )
+ # XKBGrammar.g:78:4: 'include' DQSTRING
string_literal19 = self.input.LT(1)
- self.match(self.input, 29, self.FOLLOW_29_in_line_include275)
- stream_29.add(string_literal19)
+ self.match(self.input, 30, self.FOLLOW_30_in_line_include279)
+ stream_30.add(string_literal19)
DQSTRING20 = self.input.LT(1)
- self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_line_include277)
+ self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_line_include281)
stream_DQSTRING.add(DQSTRING20)
# AST Rewrite
# elements: DQSTRING
@@ -711,8 +712,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 78:2: -> ^( TOKEN_INCLUDE DQSTRING )
- # XKBGrammar.g:78:5: ^( TOKEN_INCLUDE DQSTRING )
+ # 79:2: -> ^( TOKEN_INCLUDE DQSTRING )
+ # XKBGrammar.g:79:5: ^( TOKEN_INCLUDE DQSTRING )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_INCLUDE, "TOKEN_INCLUDE"), root_1)
@@ -757,7 +758,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_name
- # XKBGrammar.g:81:1: line_name : 'name' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_NAME $n1 ^( VALUE $n2) ) ;
+ # XKBGrammar.g:82:1: line_name : 'name' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_NAME $n1 ^( VALUE $n2) ) ;
def line_name(self, ):
retval = self.line_name_return()
@@ -779,33 +780,33 @@ class XKBGrammarParser(Parser):
char_literal23_tree = None
char_literal24_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
- stream_30 = RewriteRuleTokenStream(self.adaptor, "token 30")
stream_32 = RewriteRuleTokenStream(self.adaptor, "token 32")
stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
stream_DQSTRING = RewriteRuleTokenStream(self.adaptor, "token DQSTRING")
stream_33 = RewriteRuleTokenStream(self.adaptor, "token 33")
+ stream_34 = RewriteRuleTokenStream(self.adaptor, "token 34")
try:
try:
- # XKBGrammar.g:82:2: ( 'name' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_NAME $n1 ^( VALUE $n2) ) )
- # XKBGrammar.g:82:4: 'name' '[' n1= NAME ']' '=' n2= DQSTRING
+ # XKBGrammar.g:83:2: ( 'name' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_NAME $n1 ^( VALUE $n2) ) )
+ # XKBGrammar.g:83:4: 'name' '[' n1= NAME ']' '=' n2= DQSTRING
string_literal21 = self.input.LT(1)
- self.match(self.input, 30, self.FOLLOW_30_in_line_name297)
- stream_30.add(string_literal21)
+ self.match(self.input, 31, self.FOLLOW_31_in_line_name301)
+ stream_31.add(string_literal21)
char_literal22 = self.input.LT(1)
- self.match(self.input, 31, self.FOLLOW_31_in_line_name299)
- stream_31.add(char_literal22)
+ self.match(self.input, 32, self.FOLLOW_32_in_line_name303)
+ stream_32.add(char_literal22)
n1 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name303)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_name307)
stream_NAME.add(n1)
char_literal23 = self.input.LT(1)
- self.match(self.input, 32, self.FOLLOW_32_in_line_name305)
- stream_32.add(char_literal23)
+ self.match(self.input, 33, self.FOLLOW_33_in_line_name309)
+ stream_33.add(char_literal23)
char_literal24 = self.input.LT(1)
- self.match(self.input, 33, self.FOLLOW_33_in_line_name307)
- stream_33.add(char_literal24)
+ self.match(self.input, 34, self.FOLLOW_34_in_line_name311)
+ stream_34.add(char_literal24)
n2 = self.input.LT(1)
- self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_line_name311)
+ self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_line_name315)
stream_DQSTRING.add(n2)
# AST Rewrite
# elements: n2, n1
@@ -825,13 +826,13 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 83:2: -> ^( TOKEN_NAME $n1 ^( VALUE $n2) )
- # XKBGrammar.g:83:5: ^( TOKEN_NAME $n1 ^( VALUE $n2) )
+ # 84:2: -> ^( TOKEN_NAME $n1 ^( VALUE $n2) )
+ # XKBGrammar.g:84:5: ^( TOKEN_NAME $n1 ^( VALUE $n2) )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_NAME, "TOKEN_NAME"), root_1)
self.adaptor.addChild(root_1, stream_n1.nextNode())
- # XKBGrammar.g:83:22: ^( VALUE $n2)
+ # XKBGrammar.g:84:22: ^( VALUE $n2)
root_2 = self.adaptor.nil()
root_2 = self.adaptor.becomeRoot(self.adaptor.createFromType(VALUE, "VALUE"), root_2)
@@ -878,7 +879,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_keytype
- # XKBGrammar.g:86:1: line_keytype : 'key.type' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) ) ;
+ # XKBGrammar.g:87:1: line_keytype : 'key.type' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) ) ;
def line_keytype(self, ):
retval = self.line_keytype_return()
@@ -901,32 +902,32 @@ class XKBGrammarParser(Parser):
char_literal28_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
stream_32 = RewriteRuleTokenStream(self.adaptor, "token 32")
- stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
+ stream_35 = RewriteRuleTokenStream(self.adaptor, "token 35")
stream_DQSTRING = RewriteRuleTokenStream(self.adaptor, "token DQSTRING")
stream_33 = RewriteRuleTokenStream(self.adaptor, "token 33")
stream_34 = RewriteRuleTokenStream(self.adaptor, "token 34")
try:
try:
- # XKBGrammar.g:87:2: ( 'key.type' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) ) )
- # XKBGrammar.g:87:4: 'key.type' '[' n1= NAME ']' '=' n2= DQSTRING
+ # XKBGrammar.g:88:2: ( 'key.type' '[' n1= NAME ']' '=' n2= DQSTRING -> ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) ) )
+ # XKBGrammar.g:88:4: 'key.type' '[' n1= NAME ']' '=' n2= DQSTRING
string_literal25 = self.input.LT(1)
- self.match(self.input, 34, self.FOLLOW_34_in_line_keytype339)
- stream_34.add(string_literal25)
+ self.match(self.input, 35, self.FOLLOW_35_in_line_keytype343)
+ stream_35.add(string_literal25)
char_literal26 = self.input.LT(1)
- self.match(self.input, 31, self.FOLLOW_31_in_line_keytype341)
- stream_31.add(char_literal26)
+ self.match(self.input, 32, self.FOLLOW_32_in_line_keytype345)
+ stream_32.add(char_literal26)
n1 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype345)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_line_keytype349)
stream_NAME.add(n1)
char_literal27 = self.input.LT(1)
- self.match(self.input, 32, self.FOLLOW_32_in_line_keytype347)
- stream_32.add(char_literal27)
+ self.match(self.input, 33, self.FOLLOW_33_in_line_keytype351)
+ stream_33.add(char_literal27)
char_literal28 = self.input.LT(1)
- self.match(self.input, 33, self.FOLLOW_33_in_line_keytype349)
- stream_33.add(char_literal28)
+ self.match(self.input, 34, self.FOLLOW_34_in_line_keytype353)
+ stream_34.add(char_literal28)
n2 = self.input.LT(1)
- self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_line_keytype353)
+ self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_line_keytype357)
stream_DQSTRING.add(n2)
# AST Rewrite
# elements: n1, n2
@@ -946,13 +947,13 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 88:2: -> ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) )
- # XKBGrammar.g:88:5: ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) )
+ # 89:2: -> ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) )
+ # XKBGrammar.g:89:5: ^( TOKEN_KEY_TYPE $n1 ^( VALUE $n2) )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_KEY_TYPE, "TOKEN_KEY_TYPE"), root_1)
self.adaptor.addChild(root_1, stream_n1.nextNode())
- # XKBGrammar.g:88:26: ^( VALUE $n2)
+ # XKBGrammar.g:89:26: ^( VALUE $n2)
root_2 = self.adaptor.nil()
root_2 = self.adaptor.becomeRoot(self.adaptor.createFromType(VALUE, "VALUE"), root_2)
@@ -999,7 +1000,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_key
- # XKBGrammar.g:91:1: line_key : 'key' keycode keysyms -> ^( TOKEN_KEY keycode keysyms ) ;
+ # XKBGrammar.g:92:1: line_key : 'key' keycode keysyms -> ^( TOKEN_KEY keycode keysyms ) ;
def line_key(self, ):
retval = self.line_key_return()
@@ -1014,22 +1015,22 @@ class XKBGrammarParser(Parser):
string_literal29_tree = None
- stream_35 = RewriteRuleTokenStream(self.adaptor, "token 35")
+ stream_36 = RewriteRuleTokenStream(self.adaptor, "token 36")
stream_keysyms = RewriteRuleSubtreeStream(self.adaptor, "rule keysyms")
stream_keycode = RewriteRuleSubtreeStream(self.adaptor, "rule keycode")
try:
try:
- # XKBGrammar.g:92:2: ( 'key' keycode keysyms -> ^( TOKEN_KEY keycode keysyms ) )
- # XKBGrammar.g:92:4: 'key' keycode keysyms
+ # XKBGrammar.g:93:2: ( 'key' keycode keysyms -> ^( TOKEN_KEY keycode keysyms ) )
+ # XKBGrammar.g:93:4: 'key' keycode keysyms
string_literal29 = self.input.LT(1)
- self.match(self.input, 35, self.FOLLOW_35_in_line_key381)
- stream_35.add(string_literal29)
- self._state.following.append(self.FOLLOW_keycode_in_line_key383)
+ self.match(self.input, 36, self.FOLLOW_36_in_line_key385)
+ stream_36.add(string_literal29)
+ self._state.following.append(self.FOLLOW_keycode_in_line_key387)
keycode30 = self.keycode()
self._state.following.pop()
stream_keycode.add(keycode30.tree)
- self._state.following.append(self.FOLLOW_keysyms_in_line_key385)
+ self._state.following.append(self.FOLLOW_keysyms_in_line_key389)
keysyms31 = self.keysyms()
self._state.following.pop()
@@ -1050,8 +1051,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 93:2: -> ^( TOKEN_KEY keycode keysyms )
- # XKBGrammar.g:93:5: ^( TOKEN_KEY keycode keysyms )
+ # 94:2: -> ^( TOKEN_KEY keycode keysyms )
+ # XKBGrammar.g:94:5: ^( TOKEN_KEY keycode keysyms )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_KEY, "TOKEN_KEY"), root_1)
@@ -1097,7 +1098,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start line_modifier_map
- # XKBGrammar.g:96:1: line_modifier_map : 'modifier_map' state '{' keycode ( ',' keycode )* '}' -> ^( TOKEN_MODIFIER_MAP state ( keycode )+ ) ;
+ # XKBGrammar.g:97:1: line_modifier_map : 'modifier_map' state '{' keycode ( ',' keycode )* '}' -> ^( TOKEN_MODIFIER_MAP state ( keycode )+ ) ;
def line_modifier_map(self, ):
retval = self.line_modifier_map_return()
@@ -1120,47 +1121,47 @@ class XKBGrammarParser(Parser):
char_literal34_tree = None
char_literal36_tree = None
char_literal38_tree = None
- stream_36 = RewriteRuleTokenStream(self.adaptor, "token 36")
- stream_26 = RewriteRuleTokenStream(self.adaptor, "token 26")
stream_27 = RewriteRuleTokenStream(self.adaptor, "token 27")
stream_37 = RewriteRuleTokenStream(self.adaptor, "token 37")
+ stream_28 = RewriteRuleTokenStream(self.adaptor, "token 28")
+ stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
stream_state = RewriteRuleSubtreeStream(self.adaptor, "rule state")
stream_keycode = RewriteRuleSubtreeStream(self.adaptor, "rule keycode")
try:
try:
- # XKBGrammar.g:97:2: ( 'modifier_map' state '{' keycode ( ',' keycode )* '}' -> ^( TOKEN_MODIFIER_MAP state ( keycode )+ ) )
- # XKBGrammar.g:97:4: 'modifier_map' state '{' keycode ( ',' keycode )* '}'
+ # XKBGrammar.g:98:2: ( 'modifier_map' state '{' keycode ( ',' keycode )* '}' -> ^( TOKEN_MODIFIER_MAP state ( keycode )+ ) )
+ # XKBGrammar.g:98:4: 'modifier_map' state '{' keycode ( ',' keycode )* '}'
string_literal32 = self.input.LT(1)
- self.match(self.input, 36, self.FOLLOW_36_in_line_modifier_map407)
- stream_36.add(string_literal32)
- self._state.following.append(self.FOLLOW_state_in_line_modifier_map409)
+ self.match(self.input, 37, self.FOLLOW_37_in_line_modifier_map411)
+ stream_37.add(string_literal32)
+ self._state.following.append(self.FOLLOW_state_in_line_modifier_map413)
state33 = self.state()
self._state.following.pop()
stream_state.add(state33.tree)
char_literal34 = self.input.LT(1)
- self.match(self.input, 26, self.FOLLOW_26_in_line_modifier_map411)
- stream_26.add(char_literal34)
- self._state.following.append(self.FOLLOW_keycode_in_line_modifier_map413)
+ self.match(self.input, 27, self.FOLLOW_27_in_line_modifier_map415)
+ stream_27.add(char_literal34)
+ self._state.following.append(self.FOLLOW_keycode_in_line_modifier_map417)
keycode35 = self.keycode()
self._state.following.pop()
stream_keycode.add(keycode35.tree)
- # XKBGrammar.g:97:37: ( ',' keycode )*
+ # XKBGrammar.g:98:37: ( ',' keycode )*
while True: #loop5
alt5 = 2
LA5_0 = self.input.LA(1)
- if (LA5_0 == 37) :
+ if (LA5_0 == 38) :
alt5 = 1
if alt5 == 1:
- # XKBGrammar.g:97:38: ',' keycode
+ # XKBGrammar.g:98:38: ',' keycode
char_literal36 = self.input.LT(1)
- self.match(self.input, 37, self.FOLLOW_37_in_line_modifier_map416)
- stream_37.add(char_literal36)
- self._state.following.append(self.FOLLOW_keycode_in_line_modifier_map418)
+ self.match(self.input, 38, self.FOLLOW_38_in_line_modifier_map420)
+ stream_38.add(char_literal36)
+ self._state.following.append(self.FOLLOW_keycode_in_line_modifier_map422)
keycode37 = self.keycode()
self._state.following.pop()
@@ -1173,8 +1174,8 @@ class XKBGrammarParser(Parser):
char_literal38 = self.input.LT(1)
- self.match(self.input, 27, self.FOLLOW_27_in_line_modifier_map422)
- stream_27.add(char_literal38)
+ self.match(self.input, 28, self.FOLLOW_28_in_line_modifier_map426)
+ stream_28.add(char_literal38)
# AST Rewrite
# elements: state, keycode
# token labels:
@@ -1191,13 +1192,13 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 98:2: -> ^( TOKEN_MODIFIER_MAP state ( keycode )+ )
- # XKBGrammar.g:98:5: ^( TOKEN_MODIFIER_MAP state ( keycode )+ )
+ # 99:2: -> ^( TOKEN_MODIFIER_MAP state ( keycode )+ )
+ # XKBGrammar.g:99:5: ^( TOKEN_MODIFIER_MAP state ( keycode )+ )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_MODIFIER_MAP, "TOKEN_MODIFIER_MAP"), root_1)
self.adaptor.addChild(root_1, stream_state.nextTree())
- # XKBGrammar.g:98:32: ( keycode )+
+ # XKBGrammar.g:99:32: ( keycode )+
if not (stream_keycode.hasNext()):
raise RewriteEarlyExitException()
@@ -1246,7 +1247,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start keycode
- # XKBGrammar.g:101:1: keycode : ( NAME -> ^( KEYCODE NAME ) | '<' NAME '>' -> ^( KEYCODEX NAME ) );
+ # XKBGrammar.g:102:1: keycode : ( NAME -> ^( KEYCODE NAME ) | '<' NAME '>' -> ^( KEYCODEX NAME ) );
def keycode(self, ):
retval = self.keycode_return()
@@ -1264,18 +1265,18 @@ class XKBGrammarParser(Parser):
NAME41_tree = None
char_literal42_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
+ stream_40 = RewriteRuleTokenStream(self.adaptor, "token 40")
stream_39 = RewriteRuleTokenStream(self.adaptor, "token 39")
- stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
try:
try:
- # XKBGrammar.g:102:2: ( NAME -> ^( KEYCODE NAME ) | '<' NAME '>' -> ^( KEYCODEX NAME ) )
+ # XKBGrammar.g:103:2: ( NAME -> ^( KEYCODE NAME ) | '<' NAME '>' -> ^( KEYCODEX NAME ) )
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == NAME) :
alt6 = 1
- elif (LA6_0 == 38) :
+ elif (LA6_0 == 39) :
alt6 = 2
else:
nvae = NoViableAltException("", 6, 0, self.input)
@@ -1283,9 +1284,9 @@ class XKBGrammarParser(Parser):
raise nvae
if alt6 == 1:
- # XKBGrammar.g:102:4: NAME
+ # XKBGrammar.g:103:4: NAME
NAME39 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode446)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode450)
stream_NAME.add(NAME39)
# AST Rewrite
# elements: NAME
@@ -1303,8 +1304,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 102:9: -> ^( KEYCODE NAME )
- # XKBGrammar.g:102:12: ^( KEYCODE NAME )
+ # 103:9: -> ^( KEYCODE NAME )
+ # XKBGrammar.g:103:12: ^( KEYCODE NAME )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(KEYCODE, "KEYCODE"), root_1)
@@ -1319,16 +1320,16 @@ class XKBGrammarParser(Parser):
elif alt6 == 2:
- # XKBGrammar.g:103:4: '<' NAME '>'
+ # XKBGrammar.g:104:4: '<' NAME '>'
char_literal40 = self.input.LT(1)
- self.match(self.input, 38, self.FOLLOW_38_in_keycode459)
- stream_38.add(char_literal40)
+ self.match(self.input, 39, self.FOLLOW_39_in_keycode463)
+ stream_39.add(char_literal40)
NAME41 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode461)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keycode465)
stream_NAME.add(NAME41)
char_literal42 = self.input.LT(1)
- self.match(self.input, 39, self.FOLLOW_39_in_keycode463)
- stream_39.add(char_literal42)
+ self.match(self.input, 40, self.FOLLOW_40_in_keycode467)
+ stream_40.add(char_literal42)
# AST Rewrite
# elements: NAME
# token labels:
@@ -1345,8 +1346,8 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 103:17: -> ^( KEYCODEX NAME )
- # XKBGrammar.g:103:20: ^( KEYCODEX NAME )
+ # 104:17: -> ^( KEYCODEX NAME )
+ # XKBGrammar.g:104:20: ^( KEYCODEX NAME )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(KEYCODEX, "KEYCODEX"), root_1)
@@ -1390,7 +1391,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start keysyms
- # XKBGrammar.g:106:1: keysyms : '{' ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )? '[' keysym+= NAME ( ',' keysym+= NAME )* ']' '}' -> ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( $keysym)+ ) ;
+ # XKBGrammar.g:107:1: keysyms : '{' ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )? keysymgroup ( ',' keysymgroup )* '}' -> ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( keysymgroup )+ ) ;
def keysyms(self, ):
retval = self.keysyms_return()
@@ -1406,12 +1407,12 @@ class XKBGrammarParser(Parser):
char_literal46 = None
char_literal47 = None
char_literal48 = None
- char_literal49 = None
char_literal50 = None
- char_literal51 = None
char_literal52 = None
- keysym = None
- list_keysym = None
+ keysymgroup49 = None
+
+ keysymgroup51 = None
+
tn1_tree = None
tn2_tree = None
@@ -1421,92 +1422,82 @@ class XKBGrammarParser(Parser):
char_literal46_tree = None
char_literal47_tree = None
char_literal48_tree = None
- char_literal49_tree = None
char_literal50_tree = None
- char_literal51_tree = None
char_literal52_tree = None
- keysym_tree = None
stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
+ stream_41 = RewriteRuleTokenStream(self.adaptor, "token 41")
stream_32 = RewriteRuleTokenStream(self.adaptor, "token 32")
- stream_40 = RewriteRuleTokenStream(self.adaptor, "token 40")
- stream_31 = RewriteRuleTokenStream(self.adaptor, "token 31")
stream_DQSTRING = RewriteRuleTokenStream(self.adaptor, "token DQSTRING")
stream_33 = RewriteRuleTokenStream(self.adaptor, "token 33")
- stream_26 = RewriteRuleTokenStream(self.adaptor, "token 26")
+ stream_34 = RewriteRuleTokenStream(self.adaptor, "token 34")
stream_27 = RewriteRuleTokenStream(self.adaptor, "token 27")
- stream_37 = RewriteRuleTokenStream(self.adaptor, "token 37")
-
+ stream_28 = RewriteRuleTokenStream(self.adaptor, "token 28")
+ stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
+ stream_keysymgroup = RewriteRuleSubtreeStream(self.adaptor, "rule keysymgroup")
try:
try:
- # XKBGrammar.g:107:2: ( '{' ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )? '[' keysym+= NAME ( ',' keysym+= NAME )* ']' '}' -> ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( $keysym)+ ) )
- # XKBGrammar.g:107:4: '{' ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )? '[' keysym+= NAME ( ',' keysym+= NAME )* ']' '}'
+ # XKBGrammar.g:108:2: ( '{' ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )? keysymgroup ( ',' keysymgroup )* '}' -> ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( keysymgroup )+ ) )
+ # XKBGrammar.g:108:4: '{' ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )? keysymgroup ( ',' keysymgroup )* '}'
char_literal43 = self.input.LT(1)
- self.match(self.input, 26, self.FOLLOW_26_in_keysyms482)
- stream_26.add(char_literal43)
- # XKBGrammar.g:107:8: ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )?
+ self.match(self.input, 27, self.FOLLOW_27_in_keysyms486)
+ stream_27.add(char_literal43)
+ # XKBGrammar.g:108:8: ( 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ',' )?
alt7 = 2
LA7_0 = self.input.LA(1)
- if (LA7_0 == 40) :
+ if (LA7_0 == 41) :
alt7 = 1
if alt7 == 1:
- # XKBGrammar.g:107:9: 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ','
+ # XKBGrammar.g:108:9: 'type' '[' tn1= NAME ']' '=' tn2= DQSTRING ','
string_literal44 = self.input.LT(1)
- self.match(self.input, 40, self.FOLLOW_40_in_keysyms485)
- stream_40.add(string_literal44)
+ self.match(self.input, 41, self.FOLLOW_41_in_keysyms489)
+ stream_41.add(string_literal44)
char_literal45 = self.input.LT(1)
- self.match(self.input, 31, self.FOLLOW_31_in_keysyms487)
- stream_31.add(char_literal45)
+ self.match(self.input, 32, self.FOLLOW_32_in_keysyms491)
+ stream_32.add(char_literal45)
tn1 = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms491)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms495)
stream_NAME.add(tn1)
char_literal46 = self.input.LT(1)
- self.match(self.input, 32, self.FOLLOW_32_in_keysyms493)
- stream_32.add(char_literal46)
+ self.match(self.input, 33, self.FOLLOW_33_in_keysyms497)
+ stream_33.add(char_literal46)
char_literal47 = self.input.LT(1)
- self.match(self.input, 33, self.FOLLOW_33_in_keysyms495)
- stream_33.add(char_literal47)
+ self.match(self.input, 34, self.FOLLOW_34_in_keysyms499)
+ stream_34.add(char_literal47)
tn2 = self.input.LT(1)
- self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_keysyms499)
+ self.match(self.input, DQSTRING, self.FOLLOW_DQSTRING_in_keysyms503)
stream_DQSTRING.add(tn2)
char_literal48 = self.input.LT(1)
- self.match(self.input, 37, self.FOLLOW_37_in_keysyms501)
- stream_37.add(char_literal48)
+ self.match(self.input, 38, self.FOLLOW_38_in_keysyms505)
+ stream_38.add(char_literal48)
- char_literal49 = self.input.LT(1)
- self.match(self.input, 31, self.FOLLOW_31_in_keysyms505)
- stream_31.add(char_literal49)
- keysym = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms509)
- stream_NAME.add(keysym)
- if list_keysym is None:
- list_keysym = []
- list_keysym.append(keysym)
+ self._state.following.append(self.FOLLOW_keysymgroup_in_keysyms509)
+ keysymgroup49 = self.keysymgroup()
- # XKBGrammar.g:107:73: ( ',' keysym+= NAME )*
+ self._state.following.pop()
+ stream_keysymgroup.add(keysymgroup49.tree)
+ # XKBGrammar.g:108:68: ( ',' keysymgroup )*
while True: #loop8
alt8 = 2
LA8_0 = self.input.LA(1)
- if (LA8_0 == 37) :
+ if (LA8_0 == 38) :
alt8 = 1
if alt8 == 1:
- # XKBGrammar.g:107:74: ',' keysym+= NAME
+ # XKBGrammar.g:108:69: ',' keysymgroup
char_literal50 = self.input.LT(1)
- self.match(self.input, 37, self.FOLLOW_37_in_keysyms512)
- stream_37.add(char_literal50)
- keysym = self.input.LT(1)
- self.match(self.input, NAME, self.FOLLOW_NAME_in_keysyms516)
- stream_NAME.add(keysym)
- if list_keysym is None:
- list_keysym = []
- list_keysym.append(keysym)
+ self.match(self.input, 38, self.FOLLOW_38_in_keysyms512)
+ stream_38.add(char_literal50)
+ self._state.following.append(self.FOLLOW_keysymgroup_in_keysyms514)
+ keysymgroup51 = self.keysymgroup()
+ self._state.following.pop()
+ stream_keysymgroup.add(keysymgroup51.tree)
@@ -1514,23 +1505,19 @@ class XKBGrammarParser(Parser):
break #loop8
- char_literal51 = self.input.LT(1)
- self.match(self.input, 32, self.FOLLOW_32_in_keysyms520)
- stream_32.add(char_literal51)
char_literal52 = self.input.LT(1)
- self.match(self.input, 27, self.FOLLOW_27_in_keysyms522)
- stream_27.add(char_literal52)
+ self.match(self.input, 28, self.FOLLOW_28_in_keysyms518)
+ stream_28.add(char_literal52)
# AST Rewrite
- # elements: tn2, keysym, tn1
+ # elements: tn2, keysymgroup, tn1
# token labels: tn1, tn2
# rule labels: retval
- # token list labels: keysym
+ # token list labels:
# rule list labels:
retval.tree = root_0
stream_tn1 = RewriteRuleTokenStream(self.adaptor, "token tn1", tn1)
stream_tn2 = RewriteRuleTokenStream(self.adaptor, "token tn2", tn2)
- stream_keysym = RewriteRuleTokenStream(self.adaptor, "token keysym", list_keysym)
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
@@ -1539,14 +1526,14 @@ class XKBGrammarParser(Parser):
root_0 = self.adaptor.nil()
- # 108:2: -> ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( $keysym)+ )
- # XKBGrammar.g:108:5: ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( $keysym)+ )
+ # 109:2: -> ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( keysymgroup )+ )
+ # XKBGrammar.g:109:5: ^( KEYSYMS ( ^( TOKEN_TYPE $tn1 $tn2) )? ( keysymgroup )+ )
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(KEYSYMS, "KEYSYMS"), root_1)
- # XKBGrammar.g:108:15: ( ^( TOKEN_TYPE $tn1 $tn2) )?
+ # XKBGrammar.g:109:15: ( ^( TOKEN_TYPE $tn1 $tn2) )?
if stream_tn2.hasNext() or stream_tn1.hasNext():
- # XKBGrammar.g:108:15: ^( TOKEN_TYPE $tn1 $tn2)
+ # XKBGrammar.g:109:15: ^( TOKEN_TYPE $tn1 $tn2)
root_2 = self.adaptor.nil()
root_2 = self.adaptor.becomeRoot(self.adaptor.createFromType(TOKEN_TYPE, "TOKEN_TYPE"), root_2)
@@ -1558,7 +1545,146 @@ class XKBGrammarParser(Parser):
stream_tn2.reset();
stream_tn1.reset();
- # XKBGrammar.g:108:40: ( $keysym)+
+ # XKBGrammar.g:109:40: ( keysymgroup )+
+ if not (stream_keysymgroup.hasNext()):
+ raise RewriteEarlyExitException()
+
+ while stream_keysymgroup.hasNext():
+ self.adaptor.addChild(root_1, stream_keysymgroup.nextTree())
+
+
+ stream_keysymgroup.reset()
+
+ self.adaptor.addChild(root_0, root_1)
+
+
+
+ retval.tree = root_0
+
+
+
+
+ retval.stop = self.input.LT(-1)
+
+
+ retval.tree = self.adaptor.rulePostProcessing(root_0)
+ self.adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+
+
+ except RecognitionException, re:
+ self.reportError(re)
+ self.recover(self.input, re)
+ retval.tree = self.adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+ finally:
+
+ pass
+
+ return retval
+
+ # $ANTLR end keysyms
+
+ class keysymgroup_return(object):
+ def __init__(self):
+ self.start = None
+ self.stop = None
+
+ self.tree = None
+
+
+
+
+ # $ANTLR start keysymgroup
+ # XKBGrammar.g:112:1: keysymgroup : '[' keysym+= NAME ( ',' keysym+= NAME )* ']' -> ^( KEYSYMGROUP ( $keysym)+ ) ;
+ def keysymgroup(self, ):
+
+ retval = self.keysymgroup_return()
+ retval.start = self.input.LT(1)
+
+ root_0 = None
+
+ char_literal53 = None
+ char_literal54 = None
+ char_literal55 = None
+ keysym = None
+ list_keysym = None
+
+ char_literal53_tree = None
+ char_literal54_tree = None
+ char_literal55_tree = None
+ keysym_tree = None
+ stream_NAME = RewriteRuleTokenStream(self.adaptor, "token NAME")
+ stream_32 = RewriteRuleTokenStream(self.adaptor, "token 32")
+ stream_33 = RewriteRuleTokenStream(self.adaptor, "token 33")
+ stream_38 = RewriteRuleTokenStream(self.adaptor, "token 38")
+
+ try:
+ try:
+ # XKBGrammar.g:113:2: ( '[' keysym+= NAME ( ',' keysym+= NAME )* ']' -> ^( KEYSYMGROUP ( $keysym)+ ) )
+ # XKBGrammar.g:113:4: '[' keysym+= NAME ( ',' keysym+= NAME )* ']'
+ char_literal53 = self.input.LT(1)
+ self.match(self.input, 32, self.FOLLOW_32_in_keysymgroup550)
+ stream_32.add(char_literal53)
+ keysym = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysymgroup554)
+ stream_NAME.add(keysym)
+ if list_keysym is None:
+ list_keysym = []
+ list_keysym.append(keysym)
+
+ # XKBGrammar.g:113:21: ( ',' keysym+= NAME )*
+ while True: #loop9
+ alt9 = 2
+ LA9_0 = self.input.LA(1)
+
+ if (LA9_0 == 38) :
+ alt9 = 1
+
+
+ if alt9 == 1:
+ # XKBGrammar.g:113:22: ',' keysym+= NAME
+ char_literal54 = self.input.LT(1)
+ self.match(self.input, 38, self.FOLLOW_38_in_keysymgroup557)
+ stream_38.add(char_literal54)
+ keysym = self.input.LT(1)
+ self.match(self.input, NAME, self.FOLLOW_NAME_in_keysymgroup561)
+ stream_NAME.add(keysym)
+ if list_keysym is None:
+ list_keysym = []
+ list_keysym.append(keysym)
+
+
+
+
+ else:
+ break #loop9
+
+
+ char_literal55 = self.input.LT(1)
+ self.match(self.input, 33, self.FOLLOW_33_in_keysymgroup565)
+ stream_33.add(char_literal55)
+ # AST Rewrite
+ # elements: keysym
+ # token labels:
+ # rule labels: retval
+ # token list labels: keysym
+ # rule list labels:
+
+ retval.tree = root_0
+ stream_keysym = RewriteRuleTokenStream(self.adaptor, "token keysym", list_keysym)
+
+ if retval is not None:
+ stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", retval.tree)
+ else:
+ stream_retval = RewriteRuleSubtreeStream(self.adaptor, "token retval", None)
+
+
+ root_0 = self.adaptor.nil()
+ # 114:2: -> ^( KEYSYMGROUP ( $keysym)+ )
+ # XKBGrammar.g:114:5: ^( KEYSYMGROUP ( $keysym)+ )
+ root_1 = self.adaptor.nil()
+ root_1 = self.adaptor.becomeRoot(self.adaptor.createFromType(KEYSYMGROUP, "KEYSYMGROUP"), root_1)
+
+ # XKBGrammar.g:114:19: ( $keysym)+
if not (stream_keysym.hasNext()):
raise RewriteEarlyExitException()
@@ -1594,7 +1720,7 @@ class XKBGrammarParser(Parser):
return retval
- # $ANTLR end keysyms
+ # $ANTLR end keysymgroup
class mapOptions_return(object):
def __init__(self):
@@ -1607,7 +1733,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start mapOptions
- # XKBGrammar.g:111:1: mapOptions : ( 'default' | 'hidden' | 'partial' | 'alphanumeric_keys' | 'modifier_keys' | 'alternate_group' | 'xkb_symbols' );
+ # XKBGrammar.g:117:1: mapOptions : ( 'default' | 'hidden' | 'partial' | 'alphanumeric_keys' | 'modifier_keys' | 'alternate_group' | 'xkb_symbols' );
def mapOptions(self, ):
retval = self.mapOptions_return()
@@ -1615,20 +1741,20 @@ class XKBGrammarParser(Parser):
root_0 = None
- set53 = None
+ set56 = None
- set53_tree = None
+ set56_tree = None
try:
try:
- # XKBGrammar.g:112:2: ( 'default' | 'hidden' | 'partial' | 'alphanumeric_keys' | 'modifier_keys' | 'alternate_group' | 'xkb_symbols' )
+ # XKBGrammar.g:118:2: ( 'default' | 'hidden' | 'partial' | 'alphanumeric_keys' | 'modifier_keys' | 'alternate_group' | 'xkb_symbols' )
# XKBGrammar.g:
root_0 = self.adaptor.nil()
- set53 = self.input.LT(1)
- if (41 <= self.input.LA(1) <= 47):
+ set56 = self.input.LT(1)
+ if (42 <= self.input.LA(1) <= 48):
self.input.consume();
- self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set53))
+ self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set56))
self._state.errorRecovery = False
else:
@@ -1669,7 +1795,7 @@ class XKBGrammarParser(Parser):
# $ANTLR start state
- # XKBGrammar.g:121:1: state : ( 'Shift' | 'Control' | 'Lock' | 'Mod1' | 'Mod2' | 'Mod3' | 'Mod4' | 'Mod5' );
+ # XKBGrammar.g:127:1: state : ( 'Shift' | 'Control' | 'Lock' | 'Mod1' | 'Mod2' | 'Mod3' | 'Mod4' | 'Mod5' );
def state(self, ):
retval = self.state_return()
@@ -1677,20 +1803,20 @@ class XKBGrammarParser(Parser):
root_0 = None
- set54 = None
+ set57 = None
- set54_tree = None
+ set57_tree = None
try:
try:
- # XKBGrammar.g:122:2: ( 'Shift' | 'Control' | 'Lock' | 'Mod1' | 'Mod2' | 'Mod3' | 'Mod4' | 'Mod5' )
+ # XKBGrammar.g:128:2: ( 'Shift' | 'Control' | 'Lock' | 'Mod1' | 'Mod2' | 'Mod3' | 'Mod4' | 'Mod5' )
# XKBGrammar.g:
root_0 = self.adaptor.nil()
- set54 = self.input.LT(1)
- if (48 <= self.input.LA(1) <= 55):
+ set57 = self.input.LT(1)
+ if (49 <= self.input.LA(1) <= 56):
self.input.consume();
- self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set54))
+ self.adaptor.addChild(root_0, self.adaptor.createWithPayload(set57))
self._state.errorRecovery = False
else:
@@ -1726,66 +1852,69 @@ class XKBGrammarParser(Parser):
- FOLLOW_section_in_layout133 = frozenset([41, 42, 43, 44, 45, 46, 47])
- FOLLOW_EOF_in_layout136 = frozenset([1])
- FOLLOW_mapType_in_section159 = frozenset([26])
- FOLLOW_26_in_section161 = frozenset([29, 30, 34, 35, 36])
- FOLLOW_mapMaterial_in_section163 = frozenset([27, 29, 30, 34, 35, 36])
- FOLLOW_27_in_section166 = frozenset([28])
- FOLLOW_28_in_section168 = frozenset([1])
- FOLLOW_mapOptions_in_mapType196 = frozenset([21, 41, 42, 43, 44, 45, 46, 47])
- FOLLOW_DQSTRING_in_mapType199 = frozenset([1])
- FOLLOW_line_include_in_mapMaterial231 = frozenset([1])
- FOLLOW_line_name_in_mapMaterial237 = frozenset([28])
- FOLLOW_28_in_mapMaterial239 = frozenset([1])
- FOLLOW_line_keytype_in_mapMaterial245 = frozenset([28])
- FOLLOW_28_in_mapMaterial247 = frozenset([1])
- FOLLOW_line_key_in_mapMaterial253 = frozenset([28])
- FOLLOW_28_in_mapMaterial255 = frozenset([1])
- FOLLOW_line_modifier_map_in_mapMaterial261 = frozenset([28])
- FOLLOW_28_in_mapMaterial263 = frozenset([1])
- FOLLOW_29_in_line_include275 = frozenset([21])
- FOLLOW_DQSTRING_in_line_include277 = frozenset([1])
- FOLLOW_30_in_line_name297 = frozenset([31])
- FOLLOW_31_in_line_name299 = frozenset([22])
- FOLLOW_NAME_in_line_name303 = frozenset([32])
- FOLLOW_32_in_line_name305 = frozenset([33])
- FOLLOW_33_in_line_name307 = frozenset([21])
- FOLLOW_DQSTRING_in_line_name311 = frozenset([1])
- FOLLOW_34_in_line_keytype339 = frozenset([31])
- FOLLOW_31_in_line_keytype341 = frozenset([22])
- FOLLOW_NAME_in_line_keytype345 = frozenset([32])
- FOLLOW_32_in_line_keytype347 = frozenset([33])
- FOLLOW_33_in_line_keytype349 = frozenset([21])
- FOLLOW_DQSTRING_in_line_keytype353 = frozenset([1])
- FOLLOW_35_in_line_key381 = frozenset([22, 38])
- FOLLOW_keycode_in_line_key383 = frozenset([26])
- FOLLOW_keysyms_in_line_key385 = frozenset([1])
- FOLLOW_36_in_line_modifier_map407 = frozenset([48, 49, 50, 51, 52, 53, 54, 55])
- FOLLOW_state_in_line_modifier_map409 = frozenset([26])
- FOLLOW_26_in_line_modifier_map411 = frozenset([22, 38])
- FOLLOW_keycode_in_line_modifier_map413 = frozenset([27, 37])
- FOLLOW_37_in_line_modifier_map416 = frozenset([22, 38])
- FOLLOW_keycode_in_line_modifier_map418 = frozenset([27, 37])
- FOLLOW_27_in_line_modifier_map422 = frozenset([1])
- FOLLOW_NAME_in_keycode446 = frozenset([1])
- FOLLOW_38_in_keycode459 = frozenset([22])
- FOLLOW_NAME_in_keycode461 = frozenset([39])
- FOLLOW_39_in_keycode463 = frozenset([1])
- FOLLOW_26_in_keysyms482 = frozenset([31, 40])
- FOLLOW_40_in_keysyms485 = frozenset([31])
- FOLLOW_31_in_keysyms487 = frozenset([22])
- FOLLOW_NAME_in_keysyms491 = frozenset([32])
- FOLLOW_32_in_keysyms493 = frozenset([33])
- FOLLOW_33_in_keysyms495 = frozenset([21])
- FOLLOW_DQSTRING_in_keysyms499 = frozenset([37])
- FOLLOW_37_in_keysyms501 = frozenset([31])
- FOLLOW_31_in_keysyms505 = frozenset([22])
- FOLLOW_NAME_in_keysyms509 = frozenset([32, 37])
- FOLLOW_37_in_keysyms512 = frozenset([22])
- FOLLOW_NAME_in_keysyms516 = frozenset([32, 37])
- FOLLOW_32_in_keysyms520 = frozenset([27])
- FOLLOW_27_in_keysyms522 = frozenset([1])
+ FOLLOW_section_in_layout137 = frozenset([42, 43, 44, 45, 46, 47, 48])
+ FOLLOW_EOF_in_layout140 = frozenset([1])
+ FOLLOW_mapType_in_section163 = frozenset([27])
+ FOLLOW_27_in_section165 = frozenset([30, 31, 35, 36, 37])
+ FOLLOW_mapMaterial_in_section167 = frozenset([28, 30, 31, 35, 36, 37])
+ FOLLOW_28_in_section170 = frozenset([29])
+ FOLLOW_29_in_section172 = frozenset([1])
+ FOLLOW_mapOptions_in_mapType200 = frozenset([22, 42, 43, 44, 45, 46, 47, 48])
+ FOLLOW_DQSTRING_in_mapType203 = frozenset([1])
+ FOLLOW_line_include_in_mapMaterial235 = frozenset([1])
+ FOLLOW_line_name_in_mapMaterial241 = frozenset([29])
+ FOLLOW_29_in_mapMaterial243 = frozenset([1])
+ FOLLOW_line_keytype_in_mapMaterial249 = frozenset([29])
+ FOLLOW_29_in_mapMaterial251 = frozenset([1])
+ FOLLOW_line_key_in_mapMaterial257 = frozenset([29])
+ FOLLOW_29_in_mapMaterial259 = frozenset([1])
+ FOLLOW_line_modifier_map_in_mapMaterial265 = frozenset([29])
+ FOLLOW_29_in_mapMaterial267 = frozenset([1])
+ FOLLOW_30_in_line_include279 = frozenset([22])
+ FOLLOW_DQSTRING_in_line_include281 = frozenset([1])
+ FOLLOW_31_in_line_name301 = frozenset([32])
+ FOLLOW_32_in_line_name303 = frozenset([23])
+ FOLLOW_NAME_in_line_name307 = frozenset([33])
+ FOLLOW_33_in_line_name309 = frozenset([34])
+ FOLLOW_34_in_line_name311 = frozenset([22])
+ FOLLOW_DQSTRING_in_line_name315 = frozenset([1])
+ FOLLOW_35_in_line_keytype343 = frozenset([32])
+ FOLLOW_32_in_line_keytype345 = frozenset([23])
+ FOLLOW_NAME_in_line_keytype349 = frozenset([33])
+ FOLLOW_33_in_line_keytype351 = frozenset([34])
+ FOLLOW_34_in_line_keytype353 = frozenset([22])
+ FOLLOW_DQSTRING_in_line_keytype357 = frozenset([1])
+ FOLLOW_36_in_line_key385 = frozenset([23, 39])
+ FOLLOW_keycode_in_line_key387 = frozenset([27])
+ FOLLOW_keysyms_in_line_key389 = frozenset([1])
+ FOLLOW_37_in_line_modifier_map411 = frozenset([49, 50, 51, 52, 53, 54, 55, 56])
+ FOLLOW_state_in_line_modifier_map413 = frozenset([27])
+ FOLLOW_27_in_line_modifier_map415 = frozenset([23, 39])
+ FOLLOW_keycode_in_line_modifier_map417 = frozenset([28, 38])
+ FOLLOW_38_in_line_modifier_map420 = frozenset([23, 39])
+ FOLLOW_keycode_in_line_modifier_map422 = frozenset([28, 38])
+ FOLLOW_28_in_line_modifier_map426 = frozenset([1])
+ FOLLOW_NAME_in_keycode450 = frozenset([1])
+ FOLLOW_39_in_keycode463 = frozenset([23])
+ FOLLOW_NAME_in_keycode465 = frozenset([40])
+ FOLLOW_40_in_keycode467 = frozenset([1])
+ FOLLOW_27_in_keysyms486 = frozenset([32, 41])
+ FOLLOW_41_in_keysyms489 = frozenset([32])
+ FOLLOW_32_in_keysyms491 = frozenset([23])
+ FOLLOW_NAME_in_keysyms495 = frozenset([33])
+ FOLLOW_33_in_keysyms497 = frozenset([34])
+ FOLLOW_34_in_keysyms499 = frozenset([22])
+ FOLLOW_DQSTRING_in_keysyms503 = frozenset([38])
+ FOLLOW_38_in_keysyms505 = frozenset([32, 41])
+ FOLLOW_keysymgroup_in_keysyms509 = frozenset([28, 38])
+ FOLLOW_38_in_keysyms512 = frozenset([32, 41])
+ FOLLOW_keysymgroup_in_keysyms514 = frozenset([28, 38])
+ FOLLOW_28_in_keysyms518 = frozenset([1])
+ FOLLOW_32_in_keysymgroup550 = frozenset([23])
+ FOLLOW_NAME_in_keysymgroup554 = frozenset([33, 38])
+ FOLLOW_38_in_keysymgroup557 = frozenset([23])
+ FOLLOW_NAME_in_keysymgroup561 = frozenset([33, 38])
+ FOLLOW_33_in_keysymgroup565 = frozenset([1])
FOLLOW_set_in_mapOptions0 = frozenset([1])
FOLLOW_set_in_state0 = frozenset([1])
diff --git a/XKBGrammar/XKBGrammarParser.pyc b/XKBGrammar/XKBGrammarParser.pyc
Binary files differ.
diff --git a/XKBGrammar/XKBGrammarWalker.g b/XKBGrammar/XKBGrammarWalker.g
@@ -79,6 +79,11 @@ line_type
;
keysyms
- : ^(KEYSYMS ^(TOKEN_TYPE NAME DQSTRING) NAME+)
+ : ^(KEYSYMS ^(TOKEN_TYPE NAME DQSTRING) keysymgroup+)
;
+keysymgroup
+ : ^(KEYSYM_GROUP NAME+)
+ ;
+
+
diff --git a/XKBGrammar/check_xkb.py b/XKBGrammar/check_xkb.py
@@ -6,7 +6,7 @@
import sys
import pdb
import antlr3
-from XKBGrammarLexer import XKBGrammarLexer, SECTION, MAPTYPE, MAPNAME, MAPOPTIONS, MAPMATERIAL, TOKEN_INCLUDE, TOKEN_NAME, TOKEN_KEY_TYPE, TOKEN_KEY, VALUE, KEYCODE, KEYCODEX, KEYSYMS, TOKEN_TYPE
+from XKBGrammarLexer import XKBGrammarLexer, SECTION, MAPTYPE, MAPNAME, MAPOPTIONS, MAPMATERIAL, TOKEN_INCLUDE, TOKEN_NAME, TOKEN_KEY_TYPE, TOKEN_KEY, VALUE, KEYCODE, KEYCODEX, KEYSYMS, TOKEN_TYPE,KEYSYMGROUP
from XKBGrammarParser import XKBGrammarParser
from XKBGrammarWalker import XKBGrammarWalker
@@ -39,6 +39,8 @@ xkbfilename = "gr"
if len(sys.argv) > 1:
xkbfilename = sys.argv[1]
+print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", sys.argv[1]
+
try:
xkbfile = open(xkbfilename, 'r')
except OSError:
@@ -93,28 +95,58 @@ for section in result.tree.getChildren():
for keyset in getChildrenByType(mapobject, TOKEN_KEY):
keycode = getChildrenListByType(keyset, KEYCODE)
keycodex = getChildrenListByType(keyset, KEYCODEX)
- keysyms = getChildrenListByType(keyset, KEYSYMS)
+ keysyms = getChildrenByType(keyset, KEYSYMS)
if len(keycode) == 1:
- print '\tkey %(kc)s = { [' % { "kc": keycode[0].getChild(0).getText() },
+ print '\tkey %(kc)s = { ' % { "kc": keycode[0].getChild(0).getText() },
elif len(keycodex) == 1:
- print '\tkey <%(kc)s> = { [' % { "kc": keycodex[0].getChild(0).getText() },
+ print '\tkey <%(kc)s> = { ' % { "kc": keycodex[0].getChild(0).getText() },
else:
print "\tInternal error keycode/keycodex:", len(keycode), len(keycodex)
sys.exit(-1)
first_time = True
- if keysyms[0].getChildCount() == 0:
- print "Internal error"
- sys.exit(-1)
- for ks in keysyms[0].getChildren():
- if first_time:
- if ks.getType() == TOKEN_TYPE:
- print 'type[%(t)s] = %(n)s, ' % {"t": ks.getChild(0).getText(), "n": ks.getChild(1).getText()},
- continue
- first_time = False
+ for ks in keysyms:
+ tokentype = getChildrenListByType(ks, TOKEN_TYPE)
+ keysymgroup = getChildrenListByType(ks, KEYSYMGROUP)
+ if len(tokentype) == 1:
+ print 'type[%(t)s] = %(v)s,' % { "t": tokentype[0].getChild(0).getText(), "v": tokentype[0].getChild(1).getText() },
+ elif len(tokentype) == 0:
+ pass
else:
- sys.stdout.write(", ");
- sys.stdout.write(ks.getText())
- print "] };"
+ print "Internal error"
+ sys.exit(-1)
+ ftg = True
+ for ksg in keysymgroup:
+ if ftg:
+ sys.stdout.write(' [ ')
+ ft = True
+ for lala in ksg.getChildren():
+ if ft:
+ sys.stdout.write(lala.getText())
+ ft = False
+ continue
+ sys.stdout.write(', ')
+ sys.stdout.write(lala.getText())
+ sys.stdout.write(' ]')
+ ftg = False
+ continue
+ sys.stdout.write(', [')
+ ft = True
+ for lala in ksg.getChildren():
+ if ft:
+ sys.stdout.write(lala.getText())
+ ft = False
+ continue
+ sys.stdout.write(', ')
+ sys.stdout.write(lala.getText())
+ sys.stdout.write(' ]')
+ # print "tokentype:", len(tokentype), tokentype[0].getText(),
+ # print "keysymgroup:", len(keysymgroup), keysymgroup[0],
+ # if len(tokentype) != 0:
+ # pass
+ #if len(keysymgroup) != 0:
+ # for ksg in keysymgroup:
+ # print ksg.getText(),
+ print " };"
else:
print "\tInternal error at map level,", mapobject.getText()
# sys.exit(-2)
diff --git a/XKBGrammar/gr b/XKBGrammar/gr
@@ -4,7 +4,7 @@ xkb_symbols "extended" {
name[Group1] = "Greece-Extended";
key.type[Group1] = "THREE_LEVEL"; // yeah, comment
key <AD01> { type[Group1] = "SEPARATE_CAPS_AND_SHIFT_ALPHABETIC",
- [ U1C9, U1C8, any, U1C7 ] }; // q
+ [ U1C9, U1C8], [ any, U1C7 ] }; // q
key <AD02> { type[Group1] = "SEPARATE_CAPS_AND_SHIFT_ALPHABETIC",
[ U1CC, U1CB, any, U1CA ] }; // w
};