添加一些cpu操作
This commit is contained in:
160
parser_c.py
160
parser_c.py
@@ -4,10 +4,6 @@ import dataclasses
|
||||
from lex_c import lex_token
|
||||
from lex_c import lex
|
||||
import lex_c
|
||||
from node_declear import dist_node_type_struct
|
||||
from node_declear import dist_node_type_union
|
||||
from node_declear import dist_node_type_enum
|
||||
from node_declear import dist_node_type_typedef
|
||||
|
||||
_NodeTypeTable=[
|
||||
"file","vdecl","fdef"
|
||||
@@ -16,23 +12,19 @@ _NodeTypeTable=[
|
||||
|
||||
@dataclasses.dataclass
|
||||
class node:
|
||||
name:str
|
||||
name:list[str]=dataclasses.field(default_factory=list)
|
||||
type:str="base"
|
||||
token_list:list[lex_token]=dataclasses.field(default_factory=list)
|
||||
|
||||
child:list=dataclasses.field(default_factory=list)
|
||||
# 文件节点
|
||||
@dataclasses.dataclass
|
||||
class node_file(node):
|
||||
type:str="file"
|
||||
body:list=dataclasses.field(default_factory=list)
|
||||
|
||||
# 变量定义节点
|
||||
@dataclasses.dataclass
|
||||
class node_variable_def(node):
|
||||
type:str="variable_def"
|
||||
vvalue=None
|
||||
vtype:str="unknown"
|
||||
vattr:list[str]=dataclasses.field(default_factory=list)
|
||||
|
||||
# 结构体声明节点
|
||||
@dataclasses.dataclass
|
||||
@@ -43,7 +35,6 @@ class node_struct_decl(node):
|
||||
@dataclasses.dataclass
|
||||
class node_struct_def(node):
|
||||
type:str="struct_def"
|
||||
body:list[node_variable_def]=dataclasses.field(default_factory=list)
|
||||
|
||||
# 联合体声明节点
|
||||
@dataclasses.dataclass
|
||||
@@ -54,7 +45,6 @@ class node_union_decl(node):
|
||||
@dataclasses.dataclass
|
||||
class node_union_def(node):
|
||||
type:str="union_def"
|
||||
body:list[node_variable_def]=dataclasses.field(default_factory=list)
|
||||
|
||||
# 枚举声明节点
|
||||
@dataclasses.dataclass
|
||||
@@ -65,31 +55,21 @@ class node_enum_decl(node):
|
||||
@dataclasses.dataclass
|
||||
class node_enum_def(node):
|
||||
type:str="enum_def"
|
||||
body:list[dict]=dataclasses.field(default_factory=list)
|
||||
|
||||
# 函数声明节点
|
||||
@dataclasses.dataclass
|
||||
class node_func_decl(node):
|
||||
type:str="func_decl"
|
||||
rettype:str="unknown"
|
||||
retattr:list[str]=dataclasses.field(default_factory=list)
|
||||
para:list[node_variable_def]=dataclasses.field(default_factory=list)
|
||||
|
||||
#typedef 节点
|
||||
@dataclasses.dataclass
|
||||
class node_typedef(node):
|
||||
type:str="typedef"
|
||||
attr:list[str]=dataclasses.field(default_factory=list)
|
||||
body:node=None
|
||||
|
||||
# 函数定义节点
|
||||
@dataclasses.dataclass
|
||||
class node_func_def(node):
|
||||
type:str="func_def"
|
||||
rettype:str="unknown"
|
||||
retattr:list[str]=dataclasses.field(default_factory=list)
|
||||
para:list[node_variable_def]=dataclasses.field(default_factory=list)
|
||||
body:list[node]=dataclasses.field(default_factory=list)
|
||||
|
||||
|
||||
# 找到闭合的括号
|
||||
@@ -129,6 +109,138 @@ def find_sentence(token_list:list[lex_token]):
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def dist_node_type_struct(token_list:list[lex_token]):
|
||||
if(token_list[0].token==lex_c.TOKEN_STRUCT):
|
||||
if(token_list[1].token==lex_c.TOKEN_SYMBOL):
|
||||
if(len(token_list)==2):
|
||||
return node_struct_decl(name=token_list[1].buff.decode("utf-8"),token_list=token_list)
|
||||
elif(token_list[2].token==lex_c.TOKEN("{")):
|
||||
if not token_list[-1].token==lex_c.TOKEN("}"):
|
||||
raise Exception("没有出现预期的符号 '}'")
|
||||
v_list:list[node_variable_def]=[]
|
||||
token_list_local=token_list[3:-1]
|
||||
while len(token_list_local)>0:
|
||||
sentence=find_sentence(token_list_local)
|
||||
v_list.append(dist_node_type(token_list=sentence))
|
||||
token_list_local=token_list_local[len(sentence):]
|
||||
return node_struct_def(name=token_list[1].buff.decode("utf-8"),token_list=token_list,body=v_list)
|
||||
raise Exception(f"语法错误 {token_list[0]}")
|
||||
|
||||
|
||||
|
||||
def dist_node_type_union(token_list:list[lex_token]):
|
||||
if(token_list[0].token==lex_c.TOKEN_UNION):
|
||||
if(token_list[1].token==lex_c.TOKEN_SYMBOL):
|
||||
if(len(token_list)==2):
|
||||
return node_union_decl(name=token_list[1].buff.decode("utf-8"),token_list=token_list)
|
||||
elif(token_list[2].token==lex_c.TOKEN("{")):
|
||||
if not token_list[-1].token==lex_c.TOKEN("}"):
|
||||
raise Exception("没有出现预期的符号 '}'")
|
||||
v_list:list[node_variable_def]=[]
|
||||
token_list_local=token_list[3:-1]
|
||||
while len(token_list_local)>0:
|
||||
sentence=find_sentence(token_list_local)
|
||||
v_list.append(dist_node_type(token_list=sentence))
|
||||
token_list_local=token_list_local[len(sentence):]
|
||||
return node_union_def(name=token_list[1].buff.decode("utf-8"),token_list=token_list,body=v_list)
|
||||
raise Exception(f"语法错误 {token_list[0]}")
|
||||
|
||||
|
||||
|
||||
def dist_node_type_enum(token_list:list[lex_token]):
|
||||
if(token_list[0].token==lex_c.TOKEN_ENUM):
|
||||
if(token_list[1].token==lex_c.TOKEN_SYMBOL):
|
||||
if(len(token_list)==2):
|
||||
return node_enum_decl(name=token_list[1].buff.decode("utf-8"),token_list=token_list)
|
||||
elif(token_list[2].token==lex_c.TOKEN("{")):
|
||||
if not token_list[-1].token==lex_c.TOKEN("}"):
|
||||
raise Exception("没有出现预期的符号 '}'")
|
||||
token_list_local=token_list[3:-1]
|
||||
index=0
|
||||
v_list:list[dict]=[]
|
||||
while len(token_list_local)>0:
|
||||
if(token_list_local[0].token==lex_c.TOKEN_SYMBOL):
|
||||
key=token_list_local[0].buff.decode("utf-8")
|
||||
if(token_list_local[1].token==lex_c.TOKEN("=") and token_list_local[2].token==lex_c.TOKEN_NUM):
|
||||
index=int(token_list_local[2].buff.decode("utf-8"))
|
||||
token_list_local=token_list_local[3:]
|
||||
else:
|
||||
index+=1
|
||||
token_list_local=token_list_local[1:]
|
||||
v_list.append({key:index})
|
||||
if(len(token_list_local)>0):
|
||||
if(token_list_local[0].token!=lex_c.TOKEN(",")):
|
||||
raise Exception(f"枚举类型应该使用 ',' 分隔符")
|
||||
token_list_local=token_list_local[1:]
|
||||
return node_enum_def(name=token_list[1].buff.decode("utf-8"),token_list=token_list,body=v_list)
|
||||
raise Exception(f"语法错误 {token_list[0]}")
|
||||
|
||||
|
||||
def dist_node_type_typedef(token_list:list[lex_token]):
|
||||
if(token_list[0].token==lex_c.TOKEN_TYPEDEF):
|
||||
attr=[]
|
||||
token_list_local=token_list
|
||||
if(token_list[-1].token!=lex_c.TOKEN_SYMBOL):
|
||||
raise Exception(f"没有定义新类型 {token_list[-1]}")
|
||||
name=token_list[-1].buff.decode("utf-8")
|
||||
token_list=token_list[1:]
|
||||
while token_list[0].token in [lex_c.TOKEN_UNSIGNED,lex_c.TOKEN_CONST]:
|
||||
attr.append(token_list[0].name)
|
||||
token_list=token_list[1:]
|
||||
if(token_list[0].token==lex_c.TOKEN_STRUCT or token_list[0].token==lex_c.TOKEN_UNION):
|
||||
attr.append(token_list[0].name)
|
||||
if(token_list[1].token==lex_c.TOKEN_SYMBOL):
|
||||
node_r=None
|
||||
attr.append(token_list[1].buff.decode("utf-8"))
|
||||
if(token_list[2].token==lex_c.TOKEN("{")):
|
||||
node_r=dist_node_type(token_list=token_list[1:-1])
|
||||
elif(token_list[2].token==lex_c.TOKEN("*")):
|
||||
attr.append(token_list[2].name)
|
||||
return node_typedef(name=name,token_list=token_list_local,child=node_r)
|
||||
if(token_list[0].token==lex_c.TOKEN_SYMBOL):
|
||||
# 使用typedef 定义过的自定义类型
|
||||
attr.append(token_list[0].buff.decode("utf-8"))
|
||||
token_list=token_list[1:]
|
||||
else:
|
||||
# c语言预设类型
|
||||
while(token_list[0].token in
|
||||
[lex_c.TOKEN_INT,lex_c.TOKEN_CHAR,lex_c.TOKEN_SHORT,lex_c.TOKEN_LONG,lex_c.TOKEN_FLOAT,
|
||||
lex_c.TOKEN_DOUBLE,lex_c.TOKEN_VOID,lex_c.TOKEN("*")]):
|
||||
attr.append(token_list[0].name)
|
||||
token_list=token_list[1:]
|
||||
if(len(token_list)>1):
|
||||
raise Exception(f"意外的token {token_list[0]}")
|
||||
return node_typedef(name=name,token_list=token_list_local,attr=attr,body=None)
|
||||
raise Exception(f"语法错误 {token_list[0]}")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# 判断一个语句的类型
|
||||
def dist_node_type(token_list:list[lex_token]):
|
||||
if(token_list[0].token==lex_c.TOKEN_EXTERN):
|
||||
@@ -151,11 +263,11 @@ if __name__ == "__main__":
|
||||
file_name="main.c"
|
||||
with open(file_name,mode='rb') as f:
|
||||
token_list=lex(f.read())
|
||||
file=node_file(name=file_name,token_list=token_list,body=[])
|
||||
file=node_file(name=file_name,token_list=token_list,child=[])
|
||||
while len(token_list)>0:
|
||||
sentence=find_sentence(token_list)
|
||||
node_d=dist_node_type(sentence)
|
||||
file.body.append(node_d)
|
||||
file.child.append(node_d)
|
||||
print('找到一个语句:')
|
||||
for item in sentence:
|
||||
print(f"\t{item}")
|
||||
|
Reference in New Issue
Block a user