Professional Documents
Culture Documents
Pseudo Code
Pseudo Code
import re
def ReadInputStream():
statement = input("Enter Code to tokenize: ")
lexStat = statement.split()
print(lexStat)
lexeme = open("cc.txt", "w")
Tokenize(lexStat,lex)
def RemoveComments():
print("Removing Comment :")
file = open("cc.txt")
for line in file:
if '#' in line:
continue
if '/*' in line:
breakLine = line
deter = ""
for secondLine in file:
breakLine+= secondLine
if "*/" in secondLine:
break
elif "*/" not in secondLine:
deter+=secondLine
print(deter)
break
else:
print(line)
def Tokenize(lexStat,lex):
for word in lexStat:
if word in ['AND', 'OR', 'NOT']:
lex.write("Logical: " + word + "" + '\n')
elif word in ['for','if','elif','else','while','def','range','in', 'print']:
lex.write("Kw: " + word + "" + '\n')
elif re.findall('\"[a-zA-Z]+\"', word):
lex.write("L: " + word + "" + '\n')
elif re.match("[a-zA-Z]",word):
lex.write("Identifier: " + word + "" + '\n')
elif re.match("[0-9]", word):
lex.write("Integers: " + word + "" + '\n')
elif word in ['(', ')', '{', '}', '[', ']']:
lex.write("Punct: "+ word + "" + '\n')
elif word in ['+', '-', '*', '/', '%','++', '--','++', '--','=',, ',' , '.' , ';', ':','<','>','<=','>=','<>']:
lex.write("M-Operators: " + word + "" + '\n')
lex = open("cc.txt","r")
print("\n"+lex.read())
ReadInputStream()
RemoveComments()