-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLexer.py
204 lines (181 loc) · 4.67 KB
/
Lexer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import ply.lex as lex
import ply.yacc as yacc
## @package Lexer
# This is the parser that is used to make custom backup conditions
#
# This package will use ply(Python Lex and Yacc) to create a method for parsing
# the user generated backup conditions (hopefully making it easy to extend as desired.
# It will take a string, parse it, and return a boolean (true or false)
# which will allow for the program to decide if a backup is necessary
#
# @note Tokens have the following attributes: type, value, lexpos
# @author Barrett Hostetter-Lewis
# @date 7-29-2012
## @var tokens
# The tokens the lexer will use
tokens = ['AND', #&&
'GREATER', #>
'LESS', #<
'LESS_EQUAL', #<=
'GREAT_EQUAL', #>=
'EQUAL', #==
'PLUS', #+
'OR', #||
'MONTH', #mon, month, months
'DAY', #d, day, days
'HOUR', #h, hour, hours
'MINUTE', #min, minute, minutes
'TRUE', #True
'FALSE', #False
'LAST_BACKUP', #LastBU
'MODIFIED', #Modified
'LPAREN', #(
'RPAREN', #)
'INT'] #Any integer
## @var t_ignore
# Regex for all ignored characters
t_ignore = ' \t'
## And Token
# @pre none
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_AND(t):
r'&&'
t.value = "AND"
return t
## Greater than token
# @pre none
# @post A matching string will tokenized
# @param t The current token
# @return The created token object
def t_GREATER(t):
r'>'
t.value = "GREATER"
return t
## Less than token
# @pre none
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_LESS(t):
r'<'
t.value = "LESS"
return t
## Less than or equal to token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_LESS_EQUAL(t):
r'<='
t.value = "LESS_EQUAL"
return t
## Greater than or equal to token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_GREAT_EQUAL(t):
r'>='
t.value = "GREAT_EQUAL"
return t
## Equal token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_EQUAL(t):
r'=='
t.value = "EQUAL"
return t
## Binary plus token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_PLUS(t):
r'\+'
t.value = "PLUS"
return t
## Logical or token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_OR(t):
r'\|\|'
t.value = "OR"
return t
## Month token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_MONTH(t):
r'mon(th)?s?'
t.value = "MONTH"
return t
## Day token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_DAY(t):
r'd((ay)s?)?'
t.value = "DAY"
return t
## Hour token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_HOUR(t):
r'h((our)s?)?'
t.value = "HOUR"
return t
## Minute token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_MINUTE(t):
r'min(ute)?s?'
t.value = "MINUTE"
return t
## @var t_TRUE
# regex for the True token
t_TRUE = r'True'
## @var t_FALSE
# regex for the False token
t_FALSE = r'False'
## LastBU token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_LAST_BACKUP(t):
r'LastBU'
t.value = "LAST_BACKUP"
return t
## Left paren token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_LPAREN(t):
r'\('
t.VALUE = "LPAREN"
return t
## Right paren token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
def t_RPAREN(t):
r'\)'
t.value = "RPAREN"
return t
## Modified token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
t_MODIFIED = r'Modified'
## Int token
# @post A matching string will be tokenized
# @param t The current token
# @return The created token object
# @note This is the only token that has a custom defined value(a positive number for now)
def t_INT(t):
r'\d+'
t.value = int(t.value)
return t
lex.lex()