diff --git a/.gitignore b/.gitignore
index fe0c959..6b59328 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,8 @@ zdatafront*
*log
examples/mysql/db
+
+
# frontend
frontend/node_modules
frontend/.env.local
@@ -37,4 +39,21 @@ frontend/.mfsu
frontend/.swc
frontend/pnpm-lock.yaml
-*.jar
\ No newline at end of file
+*.jar
+
+
+
+
+.spyproject/
+model_config.py
+model_config_example.py
+config.py
+**/service_onlyant
+**/ekg_test
+**/generalization_reasoning
+ekg.yaml
+*.ipynb
+**/web_operation
+examples/mysql/db
+tests/service/test_*
+tests/service/replacements.py
\ No newline at end of file
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 27f4262..84cbafd 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -149,7 +149,7 @@ services:
mysql:
- image: mysql:8.0.23
+ image: mysql:8.4.3
container_name: mysql
environment:
MYSQL_ROOT_PASSWORD: 'root'
diff --git a/examples/ekg_examples/who_is_spy_game.py b/examples/ekg_examples/who_is_spy_game.py
index 970242d..309da56 100644
--- a/examples/ekg_examples/who_is_spy_game.py
+++ b/examples/ekg_examples/who_is_spy_game.py
@@ -129,36 +129,46 @@ def hash_id(nodeId, sessionId='', otherstr = None):
new_nodes_2 = \
[GNode(id='剧本杀/谁是卧底', type='opsgptkg_intent', attributes={'ID': -5201231166222141228, 'teamids': '', 'gdb_timestamp': '1725088421109', 'description': '谁是卧底', 'name': '谁是卧底', 'extra': ''}),
- GNode(id='剧本杀/狼人杀', type='opsgptkg_intent', attributes={'ID': 5476827419397129797, 'description': '狼人杀', 'name': '狼人杀', 'extra': '', 'teamids': '', 'gdb_timestamp': '1724815561170'}),
- GNode(id='剧本杀/谁是卧底/智能交互', type='opsgptkg_schedule', attributes={'ID': 603563742932974030, 'extra': '', 'teamids': '', 'gdb_timestamp': '1725088469126', 'description': '智能交互', 'name': '智能交互', 'enable': 'True'}),
- GNode(id='剧本杀/狼人杀/智能交互', type='opsgptkg_schedule', attributes={'ID': -5931163481230280444, 'extra': '', 'teamids': '', 'gdb_timestamp': '1724815624907', 'description': '智能交互', 'name': '智能交互', 'enable': 'False'}),
+ GNode(id='剧本杀/谁是卧底/智能交互', type='opsgptkg_schedule', attributes={'ID': 603563742932974030, 'extra': '', 'teamids': '', 'gdb_timestamp': '1725088469126', 'description': '智能交互', 'name': '智能交互', 'enable': 'True', 'envdescription': '{"存活的玩家": "张伟、王鹏、李静、人类玩家"}'}),
GNode(id='剧本杀/谁是卧底/智能交互/分配座位', type='opsgptkg_task', attributes={'ID': 2011080219630105469, 'extra': '{"dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728912109030', 'executetype': '', 'description': '分配座位', 'name': '分配座位', 'accesscriteria': ''}),
- GNode(id='剧本杀/狼人杀/智能交互/位置选择', type='opsgptkg_task', attributes={'ID': 2541178858602010284, 'description': '位置选择', 'name': '位置选择', 'accesscriteria': '', 'extra': '{"memory_tag": "all"}', 'teamids': '', 'gdb_timestamp': '1724849735167', 'executetype': ''}),
GNode(id='剧本杀/谁是卧底/智能交互/角色分配和单词分配', type='opsgptkg_task', attributes={'ID': -1817533533893637377, 'accesscriteria': '', 'extra': '{"memory_tag": "None","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728912123682', 'executetype': '', 'description': '角色分配和单词分配', 'name': '角色分配和单词分配'}),
- GNode(id='剧本杀/狼人杀/智能交互/角色选择', type='opsgptkg_task', attributes={'ID': -8695604495489305484, 'description': '角色选择', 'name': '角色选择', 'accesscriteria': '', 'extra': '{"memory_tag": "None"}', 'teamids': '', 'gdb_timestamp': '1724849085296', 'executetype': ''}),
- GNode(id='剧本杀/谁是卧底/智能交互/通知身份', type='opsgptkg_task', attributes={'ID': 8901447933395410622, 'extra': '{"pattern": "react","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728912141060', 'executetype': '', 'description': '##角色##\n你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。\n目前已经完成 1)位置分配; 2)角色分配和单词分配。\n##任务##\n向所有玩家通知信息他们的 座位信息和单词信息。\n发送格式是: 【身份通知】你是{player_name}, 你的位置是{位置号}号, 你分配的单词是{单词}\n##详细步骤##\nstep1.依次向所有玩家通知信息他们的 座位信息和单词信息。发送格式是: 你是{player_name}, 你的位置是{位置号}号, 你分配的单词是{单词}\nstpe2.所有玩家信息都发送后,结束\n\n##注意##\n1. 每条信息只能发送给对应的玩家,其他人无法看到。\n2. 不要告诉玩家的角色信息,即不要高斯他是平民还是卧底角色\n3. 在将每个人的信息通知到后,本阶段任务结束\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为JSON,格式为\n[{"action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}, ...]\n\n关键词含义如下:\n_ player_name (str): 行动方的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): 行动方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n\n#example#\n如果是玩家发言,则用 {"action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{ "memory_tag":["agent_name_a","agent_name_b"],"content": "str"}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}\n\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请输出jsonstr,不用输出markdown格式\n5. 结合已有的步骤,每次只输出下一个步骤,即一个 {"action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}', 'name': '通知身份', 'accesscriteria': ''}),
- GNode(id='剧本杀/狼人杀/智能交互/向玩家通知消息', type='opsgptkg_task', attributes={'ID': -4014299322597660132, 'extra': '{"pattern": "react"}', 'teamids': '', 'gdb_timestamp': '1725092109310', 'executetype': '', 'description': '##角色##\n你正在参与狼人杀这个游戏,你的角色是[主持人]。你熟悉狼人杀游戏的完整流程,你需要完成[任务],保证狼人杀游戏的顺利进行。\n目前已经完成位置分配和角色分配。\n##任务##\n向所有玩家通知信息他们的座位信息和角色信息。\n发送格式是: 你是{player_name}, 你的位置是{位置号}号,你的身份是{角色名}\n##注意##\n1. 每条信息只能发送给对应的玩家,其他人无法看到。\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为Python可解析的JSON,格式为\n\n[{"action": {player_name, agent_name}, "observation" or "Dungeon_Master": [{content, memory_tag}, ...]}]\n\n关键词含义如下:\n_ player_name (str): 行动方的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): 行动方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n\n##example##\n如果是玩家发言,则用 {"action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{"content": "str", "memory_tag":["agent_name_a","agent_name_b"]}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{"content": "str", memory_tag:["agent_name_a","agent_name_b"]}]}\n\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请直接输出jsonstr,不用输出markdown格式\n\n##结果##', 'name': '向玩家通知消息', 'accesscriteria': ''}),
+ GNode(id='剧本杀/谁是卧底/智能交互/通知身份', type='opsgptkg_task', attributes={'ID': 8901447933395410622, 'extra': '{"pattern": "react","dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1728912141060', 'executetype': '', 'description': '##角色##\n你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。\n目前已经完成 1)位置分配; 2)角色分配和单词分配。\n##任务##\n向所有玩家通知信息他们的 座位信息和单词信息。\n发送格式是: 【身份通知】你是{player_name}, 你的位置是{位置号}号, 你分配的单词是{单词}\n##详细步骤##\nstep1.依次向所有玩家通知信息他们的 座位信息和单词信息。发送格式是: 你是{player_name}, 你的位置是{位置号}号, 你分配的单词是{单词}\nstpe2.所有玩家信息都发送后,结束\n\n##注意##\n1. 每条信息只能发送给对应的玩家,其他人无法看到。\n2. 不要告诉玩家的角色信息,即不要告诉他是平民还是卧底角色\n3. 在将每个人的信息通知到后,本阶段任务结束\n4.本环节所有的对话都有主持人发起\n', 'name': '通知身份', 'accesscriteria': ''}),
GNode(id='剧本杀/谁是卧底/智能交互/关键信息_1', type='opsgptkg_task', attributes={'ID': 3196717310525578616, 'gdb_timestamp': '1728913619628', 'executetype': '', 'description': '关键信息', 'name': '关键信息', 'accesscriteria': '', 'extra': '{"ignorememory":"True","dodisplay":"True"}', 'teamids': ''}),
- GNode(id='剧本杀/狼人杀/智能交互/狼人时刻', type='opsgptkg_task', attributes={'ID': 8926130661368382825, 'accesscriteria': 'OR', 'extra': '{"pattern": "react"}', 'teamids': '', 'gdb_timestamp': '1725092131051', 'executetype': '', 'description': '##背景##\n在狼人杀游戏中,主持人通知当前存活的狼人玩家指认一位击杀对象,所有狼人玩家给出击杀目标,主持人确定最终结果。\n\n##任务##\n整个流程分为6个步骤:\n1. 存活狼人通知:主持人向所有的狼人玩家广播,告知他们当前存活的狼人玩家有哪些。\n2. 第一轮讨论:主持人告知所有存活的狼人玩家投票,从当前存活的非狼人玩家中,挑选一个想要击杀的玩家。\n3. 第一轮投票:按照座位顺序,每一位存活的狼人为自己想要击杀的玩家投票。\n4. 第一轮结果反馈:主持人统计所有狼人的票数分布,确定他们是否达成一致。若达成一致,告知所有狼人最终被击杀的玩家的player_name,流程结束;否则,告知他们票数的分布情况,并让所有狼人重新投票指定击杀目标,主持人需要提醒他们,若该轮还不能达成一致,则取票数最大的目标为最终击杀对象。\n5. 第二轮投票:按照座位顺序,每一位存活的狼人为自己想要击杀的玩家投票。\n6. 第二轮结果反馈:主持人统计第二轮投票中所有狼人的票数分布,取票数最大的玩家为最终击杀对象,如果存在至少两个对象的票数最大且相同,取座位号最大的作为最终击杀对象。主持人告知所有狼人玩家最终被击杀的玩家的player_name。\n\n该任务的参与者只有狼人玩家和主持人,信息可见对象是所有狼人玩家。\n\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为Python可解析的JSON,格式为\n\n[{"action": {player_name, agent_name}, "observation" or "Dungeon_Master": [{content, memory_tag}, ...]}]\n\n关键词含义如下:\n_ player_name (str): 行动方的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): 行动方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n\n##example##\n如果是玩家发言,则用 {"action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{"content": "str", "memory_tag":["agent_name_a","agent_name_b"]}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{"content": "str", memory_tag:["agent_name_a","agent_name_b"]}]}\n\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请直接输出jsonstr,不用输出markdown格式\n\n##结果##', 'name': '狼人时刻'}),
- GNode(id='剧本杀/谁是卧底/智能交互/开始新一轮的讨论', type='opsgptkg_task', attributes={'ID': -6077057339616293423, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True",\n"memory_tag":"all",\n"dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913634866', 'executetype': '', 'description': '###以上为本局游戏记录###\n\n\n##背景##\n你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。\n\n##任务##\n以结构化的语句来模拟进行 谁是卧底的讨论环节。 在这一个环节里,所有主持人先宣布目前存活的玩家,然后每位玩家按照座位顺序发言\n\n\n##详细步骤##\nstep1. 主持人根据本局游戏历史记录,感知最开始所有的玩家 以及 在前面轮数中已经被票选死亡的玩家。注意死亡的玩家不能参与本轮游戏。得到当前存活的玩家个数以及其player_name。 并告知所有玩家当前存活的玩家个数以及其player_name。\nstep2. 主持人确定发言规则并告知所有玩家,发言规则步骤如下: 存活的玩家按照座位顺序由小到大进行发言\n(一个例子:假设总共有5个玩家,如果3号位置处玩家死亡,则发言顺序为:1_>2_>4_>5)\nstep3. 存活的的玩家按照顺序依次发言\nstpe4. 在每一位存活的玩家都发言后,结束\n\n \n \n##注意##\n1.之前的游戏轮数可能已经投票选中了某位/某些玩家,被票选中的玩家会立即死亡,不再视为存活玩家,死亡的玩家不能参与本轮游戏 \n2.你要让所有存活玩家都参与发言,不能遗漏任何存活玩家。在本轮所有玩家只发言一次\n3.该任务的参与者为主持人和所有存活的玩家,信息可见对象为所有玩家。\n4.不仅要模拟主持人的发言,还需要模拟玩家的发言\n5.每一位存活的玩家均发完言后,本阶段结束\n\n\n\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为JSON,格式为\n[ {"thought": str, "action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}] }, ...]\n\n\n\n\n关键词含义如下:\n_ thought (str): 主持人执行行动的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断等。 \n_ player_name (str): 行动方的 player_name,若行动方为主持人,为空 ;否则为玩家的 player_name;\n_ agent_name (str): 行动方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为本条信息的可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n\n##example##\n如果是玩家发言,则用 {"thought": "str", "action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{ "memory_tag":["agent_name_a","agent_name_b"],"content": "str"}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"thought": "str", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}\n\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请输出jsonstr,不用输出markdown格式\n5. 结合已有的步骤,每次只输出下一个步骤,即一个 {"thought": str, "action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}\n6. 如果是人类玩家发言, 一定要选择类似 agent_人类玩家 这样的agent_name', 'name': '开始新一轮的讨论'}),
- GNode(id='剧本杀/狼人杀/智能交互/天亮讨论', type='opsgptkg_task', attributes={'ID': 274796810216558717, 'gdb_timestamp': '1725106348469', 'executetype': '', 'description': '##角色##\n你正在参与狼人杀这个游戏,你的角色是[主持人]。你熟悉狼人杀游戏的完整流程,你需要完成[任务],保证狼人杀游戏的顺利进行。\n##任务##\n你的任务如下: \n1. 告诉玩家昨晚发生的情况: 首先告诉玩家天亮了,然后你需要根据过往信息,告诉所有玩家,昨晚是否有玩家死亡。如果有,则向所有人宣布死亡玩家的名字,你只能宣布死亡玩家是谁如:"昨晚xx玩家死了",不要透露任何其他信息。如果没有,则宣布昨晚是平安夜。\n2. 确定发言规则并告诉所有玩家:\n确定发言规则步骤如下: \n第一步:确定第一个发言玩家,第一个发言的玩家为死者的座位号加1位置处的玩家(注意:最后一个位置+1的位置号为1号座位),如无人死亡,则从1号玩家开始。\n第二步:告诉所有玩家从第一个发言玩家开始发言,除了死亡玩家,每个人都需要按座位号依次讨论,只讨论一轮,所有人发言完毕后结束。注意不能遗忘指挥任何存活玩家发言!\n以下是一个例子:\n```\n总共有5个玩家,如果3号位置处玩家死亡,则第一个发言玩家为4号位置处玩家,因此从他开始发言,发言顺序为:4_>5_>1_>2\n```\n3. 依次指定存活玩家依次发言\n4. 被指定的玩家依次发言\n##注意##\n1. 你必须根据规则确定第一个发言玩家是谁,然后根据第一个发言玩家的座位号,确定所有人的发言顺序并将具体发言顺序并告知所有玩家,不要做任何多余解释\n2. 你要让所有存活玩家都参与发言,不能遗漏任何存活玩家\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为Python可解析的JSON,格式为\n\n[{"action": {player_name, agent_name}, "observation" or "Dungeon_Master": [{content, memory_tag}, ...]}]\n\n关键词含义如下:\n_ player_name (str): 行动方的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): 行动方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n\n##example##\n如果是玩家发言,则用 {"action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{"content": "str", "memory_tag":["agent_name_a","agent_name_b"]}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{"content": "str", memory_tag:["agent_name_a","agent_name_b"]}]}\n\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请直接输出jsonstr,不用输出markdown格式\n\n##结果(请直接在后面输出,如果后面已经有部分结果,请续写。一定要保持续写后的内容结合前者能构成一个合法的 jsonstr)##', 'name': '天亮讨论', 'accesscriteria': '', 'extra': '{"pattern": "react"}', 'teamids': ''}),
+ GNode(id='剧本杀/谁是卧底/智能交互/开始新一轮的讨论', type='opsgptkg_task', attributes={'ID': -6077057339616293423, 'accesscriteria': 'OR','action':'plan', 'extra': '{"pattern": "react", "endcheck": "True",\n"memory_tag":"all",\n"dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913634866', 'executetype': '', 'description': '###以上为本局游戏记录###\n\n\n##背景##\n你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。\n\n##任务##\n以结构化的语句来模拟进行 谁是卧底的讨论环节。 在这一个环节里,所有主持人先宣布目前存活的玩家,然后每位玩家按照座位顺序发言\n\n\n##详细步骤##\nstep1. 主持人根据本局游戏历史记录,感知最开始所有的玩家 以及 在前面轮数中已经被票选死亡的玩家。注意死亡的玩家不能参与本轮游戏。得到当前存活的玩家个数以及其player_name。 并告知所有玩家当前存活的玩家个数以及其player_name。\nstep2. 主持人确定发言规则并告知所有玩家,发言规则步骤如下: 存活的玩家按照座位顺序由小到大进行发言\n(一个例子:假设总共有5个玩家,如果3号位置处玩家死亡,则发言顺序为:1_>2_>4_>5)\nstep3. 存活的的玩家按照顺序依次发言\nstpe4. 在每一位存活的玩家都发言后,结束\n\n \n \n##注意##\n1.之前的游戏轮数可能已经投票选中了某位/某些玩家,被票选中的玩家会立即死亡,不再视为存活玩家, 存活玩家为:#$#存活的玩家#$#,死亡的玩家不能参与本轮游戏 \n2.你要让所有存活玩家都参与发言,不能遗漏任何存活玩家。在本轮所有玩家只发言一次\n3.该任务的参与者为主持人和所有存活的玩家,信息可见对象为所有玩家。\n4.不仅要模拟主持人的发言,还需要模拟玩家的发言\n5.每一位存活的玩家均发完言后,本阶段结束\n\n\n\n', 'name': '开始新一轮的讨论'}),
GNode(id='剧本杀/谁是卧底/智能交互/关键信息_2', type='opsgptkg_task', attributes={'ID': -8309123437761850283, 'description': '关键信息', 'name': '关键信息', 'accesscriteria': '', 'extra': '{"ignorememory":"True","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913648645', 'executetype': ''}),
- GNode(id='剧本杀/狼人杀/智能交互/票选凶手', type='opsgptkg_task', attributes={'ID': 1492108834523573937, 'accesscriteria': '', 'extra': '{"pattern": "react"}', 'teamids': '', 'gdb_timestamp': '1725106389716', 'executetype': '', 'description': '##角色##\n你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。\n\n##任务##\n你的任务如下:\n1. 告诉玩家投票规则,规则步骤如下: \nstep1: 确定讨论阶段第一个发言的玩家A\nstep2: 从A玩家开始,按座位号依次投票,每个玩家只能对一个玩家进行投票,投票这个玩家表示认为该玩家是“卧底”。每个玩家只能投一次票。\nstep3: 将完整投票规则告诉所有玩家\n2. 指挥存活玩家依次投票。\n3. 被指定的玩家进行投票\n4. 主持人统计投票结果,并告知所有玩家,投出的角色是谁。\n\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为Python可解析的JSON,格式为\n```\n{"action": {player_name, agent_name}, "observation" or "Dungeon_Master": [{content, memory_tag}, ...]}\n```\n关键词含义如下:\n_ player_name (str): 行动方的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): 行动方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n\n##example##\n如果是玩家发言,则用 {"action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{"content": "str", "memory_tag":["agent_name_a","agent_name_b"]}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{"content": "str", memory_tag:["agent_name_a","agent_name_b"]}]}\n\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请直接输出jsonstr,不用输出markdown格式\n\n##结果##\n', 'name': '票选凶手'}),
- GNode(id='剧本杀/谁是卧底/智能交互/票选卧底_1', type='opsgptkg_task', attributes={'ID': 267468674566989196, 'teamids': '', 'gdb_timestamp': '1728913670477', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n##角色##\n你是一个统计票数大师,你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。 现在是投票阶段。\n\n##任务##\n以结构化的语句来模拟进行 谁是卧底的投票环节, 也仅仅只模拟投票环节,投票环节结束后就本阶段就停止了,由后续的阶段继续进行游戏。 在这一个环节里,由主持人先告知大家投票规则,然后组织每位存活玩家按照座位顺序发言投票, 所有人投票后,本阶段结束。 \n##详细步骤##\n你的任务如下:\nstep1. 向所有玩家通知现在进入了票选环节,在这个环节,每个人都一定要投票指定某一个玩家为卧底\nstep2. 主持人确定投票顺序并告知所有玩家。 投票顺序基于如下规则: 1: 存活的玩家按照座位顺序由小到大进行投票(一个例子:假设总共有5个玩家,如果3号位置处玩家死亡,则投票顺序为:1_>2_>4_>5)2: 按座位号依次投票,每个玩家只能对一个玩家进行投票。每个玩家只能投一次票。3:票数最多的玩家会立即死亡\n\nstep3. 存活的的玩家按照顺序进行投票\nstep4. 所有存活玩家发言完毕,主持人宣布投票环节结束\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n##注意##\n\n1.之前的游戏轮数可能已经投票选中了某位/某些玩家,被票选中的玩家会立即死亡,不再视为存活玩家 \n2.你要让所有存活玩家都参与投票,不能遗漏任何存活玩家。在本轮每一位玩家只投票一个人\n3.该任务的参与者为主持人和所有存活的玩家,信息可见对象为所有玩家。\n4.不仅要模拟主持人的发言,还需要模拟玩家的发言\n5.不允许玩家自己投自己,如果出现了这种情况,主持人会提醒玩家重新投票。\n\n\n\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为JSON,格式为\n["thought": str, {"action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}, ...]\n关键词含义如下:\n_ thought (str): 主持人执行行动的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断。 \n_ player_name (str): ***的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): ***的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n##example##\n如果是玩家发言,则用 {"thought": "str", "action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{ "memory_tag":["agent_name_a","agent_name_b"],"content": "str"}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"thought": "str", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请输出jsonstr,不用输出markdown格式\n5. 结合已有的步骤,每次只输出下一个步骤,即一个 {"thought": str, "action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}\n6. 如果是人类玩家发言, 一定要选择类似 人类agent 这样的agent_name', 'name': '票选卧底', 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "memory_tag":"all","dodisplay":"True"}'}),
+ GNode(id='剧本杀/谁是卧底/智能交互/票选卧底_1', type='opsgptkg_task', attributes={'ID': 267468674566989196, 'teamids': '', 'action':'parallel','gdb_timestamp': '1728913670477', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n##角色##\n你是一个统计票数大师,你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。 现在是投票阶段。\n\n##任务##\n以结构化的语句来模拟进行 谁是卧底的投票环节, 也仅仅只模拟投票环节,投票环节结束后就本阶段就停止了,由后续的阶段继续进行游戏。 在这一个环节里,由主持人先告知大家投票规则,然后组织每位存活玩家按照座位顺序发言投票, 所有人投票后,本阶段结束。 \n##详细步骤##\n你的任务如下:\nstep1. 向所有玩家通知现在进入了票选环节,在这个环节,每个人都一定要投票指定某一个玩家为卧底\nstep2. 主持人确定投票顺序并告知所有玩家。 投票顺序基于如下规则: 1: 存活的玩家按照座位顺序由小到大进行投票(一个例子:假设总共有5个玩家,如果3号位置处玩家死亡,则投票顺序为:1_>2_>4_>5)2: 按座位号依次投票,每个玩家只能对一个玩家进行投票。每个玩家只能投一次票。3:票数最多的玩家会立即死亡\n\nstep3. 存活的的玩家按照顺序进行投票\nstep4. 所有存活玩家发言完毕,主持人宣布投票环节结束\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n##注意##\n\n1.之前的游戏轮数可能已经投票选中了某位/某些玩家,被票选中的玩家会立即死亡,不再视为存活玩家, 存活玩家为:#$#存活的玩家#$#。\n2.你要让所有存活玩家都参与投票,不能遗漏任何存活玩家。在本轮每一位玩家只投票一个人\n3.该任务的参与者为主持人和所有存活的玩家,信息可见对象为所有玩家。\n4.不仅要模拟主持人的发言,还需要模拟玩家的发言\n5.不允许玩家自己投自己,如果出现了这种情况,主持人会提醒玩家重新投票。\n\n\n\n', 'name': '票选卧底', 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "memory_tag":"all","dodisplay":"True"}'}),
GNode(id='剧本杀/谁是卧底/智能交互/关键信息_4', type='opsgptkg_task', attributes={'ID': -4669093152651945828, 'extra': '{"ignorememory":"True","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913685959', 'executetype': '', 'description': '关键信息_4', 'name': '关键信息_4', 'accesscriteria': ''}),
- GNode(id='剧本杀/谁是卧底/智能交互/统计票数', type='opsgptkg_task', attributes={'ID': -6836070348442528830, 'teamids': '', 'gdb_timestamp': '1728913701092', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n##角色##\n你是一个统计票数大师,你非常擅长计数以及统计信息。你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。 现在是票数统计阶段\n\n##任务##\n以结构化的语句来模拟进行 谁是卧底的票数统计阶段, 也仅仅只票数统计阶段环节,票数统计阶段结束后就本阶段就停止了,由后续的阶段继续进行游戏。 在这一个环节里,由主持人根据上一轮存活的玩家投票结果统计票数。 \n##详细步骤##\n你的任务如下:\nstep1. 主持人感知上一轮投票环节每位玩家的发言, 统计投票结果,格式为[{"player_name":票数}]. \nstep2 然后,主持人宣布死亡的玩家,以最大票数为本轮被投票的目标,如果票数相同,则取座位号高的角色死亡。并告知所有玩家本轮被投票玩家的player_name。(格式为【重要通知】本轮死亡的玩家为XXX)同时向所有玩家宣布,被投票中的角色会视为立即死亡(即不再视为存活角色)\nstep3. 在宣布死亡玩家后,本阶段流程结束,由后续阶段继续推进游戏\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n##注意##\n1.如果有2个或者两个以上的被玩家被投的票数相同,则取座位号高的玩家死亡。并告知大家原因:票数相同,取座位号高的玩家死亡\n2.在统计票数时,首先确认存活玩家的数量,再先仔细回忆,谁被投了。 最后统计每位玩家被投的次数。 由于每位玩家只有一票,所以被投次数的总和等于存活玩家的数量 \n3.通知完死亡玩家是谁后,本阶段才结束,由后续阶段继续推进游戏。输出 {"action": "taskend"}即可\n4.主持人只有当通知本轮死亡的玩家时,才使用【重要通知】的前缀,其他情况下不要使用【重要通知】前缀\n5.只统计上一轮投票环节的情况\n##example##\n{"thought": "在上一轮中, 存活玩家有 小北,李光,赵鹤,张良 四个人。 其中 小北投了李光, 赵鹤投了小北, 张良投了李光, 李光投了张良。总结被投票数为: 李光:2票; 小北:1票,张良:1票. Check一下,一共有四个人投票了,被投的票是2(李光)+1(小北)+1(张良)=4,总结被投票数没有问题。 因此李光的票最多", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["all"], "content": "李光:2票; 小北:1票,张良:1票 .因此李光的票最多.【重要通知】本轮死亡玩家是李光",}]}\n\n##example##\n{"thought": "在上一轮中, 存活玩家有 小北,人类玩家,赵鹤,张良 四个人。 其中 小北投了人类玩家, 赵鹤投了小北, 张良投了小北, 人类玩家投了张良。总结被投票数为:小北:2票,人类玩家:1票,张良:0票 .Check一下,一共有四个人投票了,被投的票是2(小北)+1(人类玩家)+张良(0)=3,总结被投票数有问题。 更正总结被投票数为:小北:2票,人类玩家:1票,张良:1票。因此小北的票最多", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["all"], "content": "小北:2票,人类玩家:1票,张良:1票 .因此小北的票最多.【重要通知】本轮死亡玩家是小北",}]}\n\n\n##输出##\n请以列表的形式,给出参与者的所有行动。每个行动表示为JSON,格式为\n["thought": str, {"action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}, ...]\n关键词含义如下:\n_ thought (str): 主持人执行行动的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断。 \n_ player_name (str): ***的 player_name,若行动方为主持人,为空,否则为玩家的 player_name;\n_ agent_name (str): ***的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。\n_ content (str): 行动方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。\n_ memory_tag (List[str]): 无论行动方是主持人还是玩家,memory_tag 固定为**所有**信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]\n##example##\n如果是玩家发言,则用 {"thought": "str", "action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{ "memory_tag":["agent_name_a","agent_name_b"],"content": "str"}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description\n如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"thought": "str", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}\n##注意事项##\n1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。\n2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。\n3. 输出列表的最后一个元素一定是{"action": "taskend"}。\n4. 整个list是一个jsonstr,请输出jsonstr,不用输出markdown格式\n5. 结合已有的步骤,每次只输出下一个步骤,即一个 {"thought": str, "action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}\n6. 如果是人类玩家发言, 一定要选择类似 人类agent 这样的agent_name', 'name': '统计票数', 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "memory_tag":"all","model_name":"gpt_4","dodisplay":"True"}'}),
+ GNode(id='剧本杀/谁是卧底/智能交互/统计票数', type='opsgptkg_task', attributes={'ID': -6836070348442528830, 'teamids': '', 'action':'react','gdb_timestamp': '1728913701092', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n##角色##\n你是一个统计票数大师,你非常擅长计数以及统计信息。你正在参与“谁是卧底”这个游戏,你的角色是[主持人]。你熟悉“谁是卧底”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。 现在是票数统计阶段\n\n##任务##\n以结构化的语句来模拟进行 谁是卧底的票数统计阶段, 也仅仅只票数统计阶段环节,票数统计阶段结束后就本阶段就停止了,由后续的阶段继续进行游戏。 在这一个环节里,由主持人根据上一轮存活的玩家投票结果统计票数。 \n##详细步骤##\n你的任务如下:\nstep1. 主持人感知上一轮投票环节每位玩家的发言, 统计投票结果,格式为[{"player_name":票数}]. \nstep2 然后,主持人宣布死亡的玩家,以最大票数为本轮被投票的目标,如果票数相同,则取座位号高的角色死亡。并告知所有玩家本轮被投票玩家的player_name。(格式为【重要通知】本轮死亡的玩家为XXX)同时向所有玩家宣布,被投票中的角色会视为立即死亡(即不再视为存活角色)\nstep3. 在宣布死亡玩家后,本阶段流程结束,由后续阶段继续推进游戏\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n##注意##\n1.如果有2个或者两个以上的被玩家被投的票数相同,则取座位号高的玩家死亡。并告知大家原因:票数相同,取座位号高的玩家死亡\n2.在统计票数时,首先确认存活玩家的数量,再先仔细回忆,谁被投了。 最后统计每位玩家被投的次数。 由于每位玩家只有一票,所以被投次数的总和等于存活玩家的数量 \n3.通知完死亡玩家是谁后,本阶段才结束,由后续阶段继续推进游戏。输出 {"action": "taskend"}即可\n4.主持人只有当通知本轮死亡的玩家时,才使用【重要通知】的前缀,其他情况下不要使用【重要通知】前缀\n5.只统计上一轮投票环节的情况\n##example##\n{"thought": "在上一轮中, 存活玩家有 小北,李光,赵鹤,张良 四个人。 其中 小北投了李光, 赵鹤投了小北, 张良投了李光, 李光投了张良。总结被投票数为: 李光:2票; 小北:1票,张良:1票. Check一下,一共有四个人投票了,被投的票是2(李光)+1(小北)+1(张良)=4,总结被投票数没有问题。 因此李光的票最多", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["all"], "content": "李光:2票; 小北:1票,张良:1票 .因此李光的票最多.【重要通知】本轮死亡玩家是李光",}]}\n\n##example##\n{"thought": "在上一轮中, 存活玩家有 小北,人类玩家,赵鹤,张良 四个人。 其中 小北投了人类玩家, 赵鹤投了小北, 张良投了小北, 人类玩家投了张良。总结被投票数为:小北:2票,人类玩家:1票,张良:0票 .Check一下,一共有四个人投票了,被投的票是2(小北)+1(人类玩家)+张良(0)=3,总结被投票数有问题。 更正总结被投票数为:小北:2票,人类玩家:1票,张良:1票。因此小北的票最多", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["all"], "content": "小北:2票,人类玩家:1票,张良:1票 .因此小北的票最多.【重要通知】本轮死亡玩家是小北",}]}\n\n\n', 'name': '统计票数', 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "memory_tag":"all","model_name":"gpt_4","dodisplay":"True"}','updaterule':'{"存活的玩家":""}'}),
GNode(id='剧本杀/谁是卧底/智能交互/关键信息_3', type='opsgptkg_task', attributes={'ID': -4800215480474522940, 'accesscriteria': '', 'extra': '{"ignorememory":"True","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913715255', 'executetype': '', 'description': '关键信息', 'name': '关键信息'}),
GNode(id='剧本杀/谁是卧底/智能交互/判断游戏是否结束', type='opsgptkg_task', attributes={'ID': -5959590132883379159, 'description': '判断游戏是否结束', 'name': '判断游戏是否结束', 'accesscriteria': '', 'extra': '{"memory_tag": "None","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913728308', 'executetype': ''}),
GNode(id='剧本杀/谁是卧底/智能交互/事实_1', type='opsgptkg_phenomenon', attributes={'ID': -525629912140732688, 'description': '是', 'name': '是', 'extra': '', 'teamids': '', 'gdb_timestamp': '1725089138724'}),
GNode(id='剧本杀/谁是卧底/智能交互/事实_2', type='opsgptkg_phenomenon', attributes={'ID': 4216433814773851843, 'teamids': '', 'gdb_timestamp': '1725089593085', 'description': '否', 'name': '否', 'extra': ''}),
GNode(id='剧本杀/谁是卧底/智能交互/给出每个人的单词以及最终胜利者', type='opsgptkg_task', attributes={'ID': 8878899410716129093, 'extra': '{"dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1728913745186', 'executetype': '', 'description': '给出每个人的单词以及最终胜利者', 'name': '给出每个人的单词以及最终胜利者', 'accesscriteria': ''}),
- GNode(id='剧本杀/狼人杀/智能交互/判断游戏是否结束', type='opsgptkg_task', attributes={'ID': -2316854558435035646, 'description': '判断游戏是否结束 ', 'name': '判断游戏是否结束 ', 'accesscriteria': '', 'extra': '{"memory_tag": "None"}', 'teamids': '', 'gdb_timestamp': '1725092210244', 'executetype': ''}),
- GNode(id='剧本杀/狼人杀/智能交互/事实_2', type='opsgptkg_phenomenon', attributes={'ID': -6298561983042120406, 'extra': '', 'teamids': '', 'gdb_timestamp': '1724816562165', 'description': '否', 'name': '否'}),
- GNode(id='剧本杀/狼人杀/智能交互/事实_1', type='opsgptkg_phenomenon', attributes={'ID': 6987562967613654408, 'gdb_timestamp': '1724816495297', 'description': '是', 'name': '是', 'extra': '', 'teamids': ''}),
- GNode(id='剧本杀/l狼人杀/智能交互/宣布游戏胜利者', type='opsgptkg_task', attributes={'ID': -758955621725402723, 'extra': '', 'teamids': '', 'gdb_timestamp': '1725097362872', 'executetype': '', 'description': '判断游戏是否结束', 'name': '判断游戏是否结束', 'accesscriteria': ''}),
GNode(id='剧本杀', type='opsgptkg_intent', attributes={'ID': -3388526698926684245, 'description': '文本游戏相关(如狼人杀等)', 'name': '剧本杀', 'extra': '', 'teamids': '', 'gdb_timestamp': '1724815537102'})]
+new_nodes_2 = new_nodes_2+ \
+[
+GNode(id='剧本杀/狼人杀', type='opsgptkg_intent', attributes={'ID': 5476827419397129797, 'description': '狼人杀', 'name': '狼人杀', 'extra': '', 'teamids': '', 'gdb_timestamp': '1724815561170'}),
+GNode(id='剧本杀/狼人杀/智能交互', type='opsgptkg_schedule', attributes={'ID': -5931163481230280444, 'extra': '', 'teamids': '', 'gdb_timestamp': '1724815624907', 'description': '智能交互', 'name': '智能交互', 'enable': 'False', 'envdescription': '{"存活的玩家": "梁军、朱丽、周欣怡、贺子轩、沈强、韩刚、周杰、人类玩家", "剩余毒药的数量": "1", "剩余解药的数量": "1"}'}),
+GNode(id='剧本杀/狼人杀/智能交互/狼人杀分配座位', type='opsgptkg_task', attributes={'ID': 2541178858602010284, 'description': '狼人杀分配座位', 'name': '狼人杀分配座位', 'accesscriteria': '', 'extra': '{"dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1724849735167', 'executetype': ''}),
+GNode(id='剧本杀/狼人杀/智能交互/狼人杀角色分配和单词分配', type='opsgptkg_task', attributes={'ID': -8695604495489305484, 'description': '狼人杀角色分配和单词分配', 'name': '狼人杀角色分配和单词分配', 'accesscriteria': '', 'extra': '{"memory_tag": "None","dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1724849085296', 'executetype': ''}),
+GNode(id='剧本杀/狼人杀/智能交互/向玩家通知消息', type='opsgptkg_task', attributes={'ID': -4014299322597660132, 'extra': '{"pattern": "react","dodisplay":"True"}', 'teamids': '', 'action':'react', 'gdb_timestamp': '1725092109310', 'executetype': '', 'description': '##角色##\n你正在参与狼人杀这个游戏,你的角色是[主持人]。你熟悉狼人杀游戏的完整流程,你需要完成[任务],保证狼人杀游戏的顺利进行。\n目前已经完成位置分配和角色分配。\n##任务##\n向所有玩家通知信息他们的座位信息和角色信息。\n发送格式是:你是{player_name}, 你的位置是{位置号}号,你的身份是{角色名}。\n##详细步骤##\nstep1.依次向所有玩家通知信息他们的座位信息和角色信息。发送格式是:你是{player_name}, 你的位置是{位置号}号, 你的身份是{角色名}。\nstpe2.所有玩家信息都发送后,结束。\n##注意##\n1. 每条信息只能发送给对应的玩家,其他人无法看到。\n2. 在将每个人的信息通知到后,本阶段任务结束。3.本环节所有的对话都有主持人发起。\n', 'name': '向玩家通知消息', 'accesscriteria': ''}),
+GNode(id='剧本杀/狼人杀/智能交互/狼人时刻_投票', type='opsgptkg_task', attributes={'ID': 8926130661368382825, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1725092131050', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##背景##\n在狼人杀游戏中,主持人通知当前存活的狼人玩家击杀一位其他存活玩家,所有狼人玩家投票给出击杀目标。\n\n##任务##\n以结构化的语句来模拟进行狼人杀的狼人_投票环节。\n整个流程分为2个步骤:\n1. 存活狼人通知:主持人向所有的狼人玩家广播,告知他们当前存活的狼人玩家有哪些。\n2. 狼人投票:按照座位顺序,每一位存活的狼人明确选择要击杀一位目前存活的玩家。\n\n##example##\n{"thought": "第一晚没有什么有用信息,随机击杀一个除了狼人队友以外的玩家即可。", "action": {"agent_name": "agent_梁军", "player_name": "梁军"}, "observation": [{"memory_tag": ["agent_梁军", "agent_周欣怡", "agent_人类玩家"], "content": "我是3号梁军,我决定今晚击杀2号玩家周杰。"}]}\n\n{"thought": "第一晚没有什么有用信息,随机击杀一个除了狼人队友以外的玩家即可。", "action": {"agent_name": "agent_周欣怡", "player_name": "周欣怡"}, "observation": [{"memory_tag": ["agent_梁军", "agent_周欣怡", "agent_人类玩家"], "content": "我是8号周欣怡,我决定今晚击杀2号玩家周杰。"}]}\n\n##注意##\n1.之前的游戏轮数可能已经投票选中了某位/某些狼人玩家或者被女巫毒死,这些狼人玩家会立即死亡,不再视为存活的狼人玩家, 死亡的狼人玩家不能参与本轮游戏。\n2.你要让所有存活的狼人玩家都参与发言,不能遗漏任何存活的狼人玩家。在本轮所有的狼人玩家只发言一次。\n3.该任务的参与者为主持人和所有存活的狼人玩家,信息可见对象为所有狼人玩家。\n4.不仅要模拟主持人的发言,还需要模拟狼人玩家的发言。\n5.狼人发言不要分析前几轮游戏没有发生过的内容。\n6.每一位存活的狼人玩家均发完言后,主持人宣布狼人时刻_投票环节结束。\n7.不可击杀其他狼人玩家。\n\n该任务的参与者只有狼人玩家和主持人,信息可见对象是所有狼人玩家, 其他角色玩家不可见。', 'name': '狼人时刻_投票'}),
+GNode(id='剧本杀/狼人杀/智能交互/狼人时刻_统计票数', type='opsgptkg_task', attributes={'ID': 8926130661368382925, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1725092131051', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##背景##\n你是一个统计票数大师,你正在参与“狼人杀”这个游戏,你的角色是[主持人]。\n在狼人杀游戏中,主持人通知当前存活的狼人玩家投票给出击杀目标,主持人确定最终结果。\n\n##任务##\nstep1. 主持人感知上一轮投票环节每位狼人玩家的发言,统计投票结果,格式为[{"player_name":票数}]. \nstep2. 然后,主持人宣布死亡的玩家,以最大票数为本轮被投票的目标,如果票数相同,则取座位号高的角色死亡。并告知所有狼人玩家本轮被投票玩家的player_name。(输出格式为【重要通知】本轮死亡的玩家为XXX)同时向所有玩家宣布,被投票中的角色会视为立即死亡(即不再视为存活角色)。\nstep3. 在宣布死亡玩家后,本阶段流程结束,由后续阶段继续推进游戏。\n\n##注意##\n1.如果有2个或者两个以上的被玩家被投的票数相同,则取座位号高的玩家死亡。并告知大家原因:票数相同,取座位号高的玩家死亡。\n2.在统计票数时,首先确认存活玩家的数量,再先仔细回忆,谁被投了。最后统计玩家被投的次数。由于每位存活的狼人玩家只有一票,所以被投次数的总和等于存活狼人玩家的数量。\n3.通知完死亡玩家是谁后,本阶段才结束,由后续阶段继续推进游戏。\n4.主持人只有当通知本轮死亡的玩家时,才使用【重要通知】的前缀,其他情况下不要使用【重要通知】前缀。\n5.只统计上一轮狼人投票环节的情况。\n6.输出格式为“【重要通知】本轮死亡的玩家为XXX”,不要输出票数统计情况([{"player_name":票数}])等其他内容。\n\n##example##\n{"thought": "在上一轮中, 存活的狼人玩家有8号朱丽、6号韩刚、5号人类玩家。其中朱丽投了2号周欣怡,韩刚投了3号梁军,人类玩家也投了2号周欣怡。总结被投票数为:2号周欣怡:2票; 3号梁军:1票. Check一下,一共有三个人投票了,被投的票是2(周欣怡)+1(梁军)=3,总结被投票数没有问题。因此2号周欣怡的票最多。", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{"memory_tag":["agent_朱丽","agent_韩刚","agent_人类玩家"], "content": "【重要通知】本轮死亡的玩家为周欣怡"}]}\n\n##example##\n{"thought": "在上一轮中, 存活的狼人玩家有1号朱丽、2号韩刚和6号人类玩家。其中朱丽投了3号周欣怡,韩刚投了7号周杰,人类玩家也投了7号周杰。总结被投票数为:3号周欣怡:1票; 7号周杰:2票. Check一下,一共有三个人投票了,被投的票是1(周欣怡)+2(周杰)=3,总结被投票数没有问题。因此7号周杰的票最多。", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{"memory_tag":["agent_朱丽","agent_韩刚","agent_人类玩家"], "content": "【重要通知】本轮死亡的玩家为周杰"}]}\n\n该任务的参与者为主持人和所有存活的狼人玩家,信息可见对象是所有狼人,其他角色不可见', 'name': '狼人时刻_统计票数', 'updaterule':'{"存活的玩家":""}'}),
+GNode(id='剧本杀/狼人杀/智能交互/预言家时刻', type='opsgptkg_task', attributes={'ID': 8926130661368382826, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1725092131051', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##背景##\n在狼人杀游戏中,主持人询问“预言家”查验哪一位存活玩家的身份,“预言家”挑选自己想要查验的玩家,主持人说明被挑选玩家是“好人”还是“狼人”。\n\n##存活的玩家##\n存活的玩家:#$#存活的玩家#$#\n\n##任务##\n整个流程分为4个步骤:\n1. 主持人判断预言家是否存活,若预言家已经死亡,则直接结束这个阶段。\n2. 主持人询问“预言家”要查验哪一位存活玩家的身份。\n3. “预言家”挑选出一个自己想要查验的玩家,注意每次只能查验一个玩家。\n4. 主持人告知“预言家”被查验玩家的身份属性:若该玩家是狼人,则身份属性为“狼人”;否则,身份属性为“好人”,不能让预言家再次查验。\n\n##注意##\n1. 若预言家已经死亡,则直接结束这个阶段。\n2. 预言家发言后,一定要告知预言家被查验玩家的身份属性,只可告诉预言家被查验玩家是“狼人”还是“好人”,不可透露具体“好人”角色。告知身份属性后,本阶段结束。\n\n##example##\n{"thought": "根据5号韩刚预言家的选择,我需要告诉他8号周杰的身份。", "action": {"agent_name": "主持人", "player_name": ""}, "Dungeon_Master": [{"memory_tag": ["agent_韩刚"], "content": "8号周杰的身份是狼人。"}]}\n\n##example##\n{"thought": "根据8号周杰预言家的选择,我需要告诉他2号周欣怡的身份。", "action": {"agent_name": "主持人", "player_name": ""}, "Dungeon_Master": [{"memory_tag": ["agent_周杰"], "content": "2号欣怡的身份是好人。"}]}\n\n该任务的参与者只有“预言家”和主持人,信息可见对象是“预言家”,其他角色玩家不可见。', 'name': '预言家时刻'}),
+GNode(id='剧本杀/狼人杀/智能交互/女巫时刻', type='opsgptkg_task', attributes={'ID': 8926130661368382827, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1725092131053', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##背景##\n在狼人杀游戏中,主持人告知“女巫”使用身上的药剂,“女巫”明确是否使用药剂以及使用的对象。\n\n##存活的玩家##\n存活的玩家:#$#存活的玩家#$#\n\n##剩余毒药和解药的数量##\n剩余毒药的数量:#$#剩余毒药的数量#$#\n剩余解药的数量:#$#剩余解药的数量#$#\n\n##任务##\n整个流程分为8个步骤:\n1. 若女巫之前已经死亡或者刚被狼人击杀则本阶段结束;否则主持人根据剩余毒药和解药的数量告知“女巫”:当前毒药和解药是否已经使用,询问该轮是否需要使用药剂。\n2. “女巫”根据这两瓶药剂的使用情况,告知主持人自己是否想要使用。\n3. 若“女巫”明确不使用药剂,流程结束;否则,如果“女巫”的解药还未使用,主持人告知“女巫”今晚哪位玩家被狼人击杀了,询问她是否要使用解药;如果“女巫”的解药已使用,进入步骤5。\n4. “女巫”告知主持人是否要使用身上的解药,来复活被狼人击杀的玩家。\n5. 如果“女巫”的毒药还未使用,主持人询问“女巫”是否要使用身上的毒药;否则,若毒药已使用,流程结束。\n6. “女巫”告知主持人:自己是否要使用毒药。\n7. 若“女巫”明确要使用毒药,主持人询问“女巫”要将毒药用在哪位玩家身上,从而击杀该玩家;否则,流程结束。\n8. “女巫”告知主持人:要将毒药用在哪位目前还存活的玩家身上。\n\n##注意##\n1. 若女巫已经死亡,则直接结束这个阶段。\n2. 若女巫仍然存活,一定要严格按照任务中的8个步骤执行。8个步骤顺利执行后,本阶段结束。\n\n该任务的参与者只有“女巫”和主持人,信息可见对象是“女巫”,其他角色玩家不可见。', 'name': '女巫时刻','updaterule':'{"存活的玩家":"", "剩余解药的数量":"", "剩余毒药的数量":""}'}),
+# GNode(id='剧本杀/狼人杀/智能交互/猎人时刻_1', type='opsgptkg_task', attributes={'ID': 8926130661368382828, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1725092131054', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##背景##\n在狼人杀游戏中,“猎人”玩家发现自己被击杀出局,可以选择亮出身份并指定击杀一名玩家。\n\n##任务##\n整个过程如下:\n1. 主持人判断“猎人”是否死亡,如果猎人存活则本轮流程直接结束。\n2. 如果“猎人”死亡,“猎人”玩家考虑是否激活自己的能力,猎人玩家若激活自己的能力,则选择要击杀的玩家,注意只能选择目前存活的玩家。\n\n该任务的参与者只有“猎人”和主持人,信息可见对象是“猎人”,其他角色玩家不可见。', 'name': '猎人时刻_1'}),
+GNode(id='剧本杀/狼人杀/智能交互/天亮讨论', type='opsgptkg_task', attributes={'ID': 274796810216558717, 'action':'react','gdb_timestamp': '1725106348469', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##角色##\n你正在参与狼人杀这个游戏,你的角色是[主持人]。你熟悉狼人杀游戏的完整流程,你需要完成[任务],保证狼人杀游戏的顺利进行。\n\n##目前存活的玩家##\n#$#存活的玩家#$#\n\n##任务##\n以结构化的语句来模拟进行狼人杀的讨论环节。在这一个环节里,所有主持人先宣布目前存活的玩家,然后每位玩家按照座位顺序发言。\n\n##详细步骤##\n1. 主持人根据本局游戏历史记录,感知最开始所有的玩家以及在前面轮数中已经被票选死亡的玩家、被狼人杀死、被女巫毒死、被狼人杀死但是被女巫救活了的玩家(该玩家视为存活的玩家)。注意死亡的玩家不能参与本轮游戏。得到当前存活的玩家个数以及其player_name。 并告知所有玩家当前存活的玩家个数以及其player_name。 若一个人先被狼人杀害了,然后被女巫用解药就回了,则该玩家是存活的,即不能宣布该玩家死亡了。\n2. 告诉玩家昨晚发生的情况: 首先告诉玩家天亮了,然后你需要根据过往信息,告诉所有玩家,昨晚是否有玩家死亡。如果有,则向所有人宣布死亡玩家的名字,你只能宣布死亡玩家是谁如:"昨晚xx玩家死了。",或者有多位玩家死亡则宣布"昨晚xx和xx玩家死了。",不要透露任何其他信息,不能透露是被狼人杀死还是被女巫毒死或者救回。如果没有,则宣布昨晚是平安夜。\n3. 确定发言规则并告诉所有玩家:\n确定发言规则步骤如下: \n第一步:所有存活玩家按照座位号从小到大开始发言。\n第二步:告诉所有玩家从第一个发言玩家开始发言,存活的玩家按照发言顺序依次讨论,只讨论一轮,所有人发言完毕后结束。注意不能遗忘指挥任何存活玩家发言!\n4. 依次指定存活玩家依次发言。\n5. 被指定的玩家依次发言,在每一位存活的玩家都发言后,本阶段结束。\n\n##注意##\n1. 你必须根据规则确定第一个发言玩家是谁,然后根据第一个发言玩家的座位号,确定所有人的发言顺序并将具体发言顺序并告知所有玩家,不要做任何多余解释。\n2. 不能透露任何玩家的角色信息。\n3. 你要让所有存活玩家都参与发言,不能遗漏任何存活玩家。所有玩家发言后,本阶段结束。', 'name': '天亮讨论', 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "memory_tag":"all", "dodisplay":"True"}', 'teamids': ''}),
+GNode(id='剧本杀/狼人杀/智能交互/票选凶手', type='opsgptkg_task', attributes={'ID': 1492108834523573937, 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react','teamids': '', 'gdb_timestamp': '1725106389716', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##角色##\n你是一个统计票数大师,你正在参与“狼人杀”这个游戏,你的角色是[主持人]。你熟悉“狼人杀”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。 现在是投票阶段。\n\n##目前存活的玩家##\n#$#存活的玩家#$#\n\n##任务##\n以结构化的语句来模拟进行“狼人杀”的投票环节, 也仅仅只模拟投票环节,投票环节结束后就本阶段就停止了,由后续的阶段继续进行游戏。 在这一个环节里,由主持人先告知大家投票规则,然后组织每位存活玩家按照座位顺序发言投票, 所有人投票后,本阶段结束。 \n\n##详细步骤##\n你的任务如下:\nstep1. 向所有玩家通知现在进入了票选环节,在这个环节,每个人都一定要投票指定某一个玩家为狼人。\nstep2. 主持人确定投票顺序并告知所有玩家。 1:投票顺序与讨论环节一致。2: 按座位号依次投票,每个玩家只能对一个玩家进行投票。每个玩家只能投一次票。3:票数最多的玩家会立即死亡。\nstep3. 存活的的玩家按照依次顺序进行投票。\nstep4. 所有存活玩家发言完毕,主持人宣布投票环节结束。\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n\n##example##\n{"thought": "我认为我们应该更加关注那些行为模式突然改变的玩家,比如3号沈强,他的态度从最初的谨慎变得越来越自信,这让我有些怀疑。", "action": {"agent_name": "agent_贺子轩", "player_name": "贺子轩"}, "observation": [{"memory_tag": ["all"], "content": "我投给3号沈强。"}]}\n\n##注意##\n1.之前的游戏轮数可能已经投票选中了某位/某些玩家、被狼人杀死、被女巫毒死的玩家,这些玩家不再视为存活玩家。\n2.你要让所有存活玩家都参与投票,不能遗漏任何存活玩家。在本轮每一位玩家只投票一个人,并且只能投存活玩家。\n3.该任务的参与者为主持人和所有存活的玩家,信息可见对象为所有玩家。\n4.不仅要模拟主持人的发言,还需要模拟玩家的发言\n5.不允许玩家自己投自己,如果出现了这种情况,主持人会提醒玩家重新投票。\n6.若票数相同,则座位号大的死亡,无需重复投票。', 'name': '票选凶手'}),
+GNode(id='剧本杀/狼人杀/智能交互/统计票数', type='opsgptkg_task', attributes={'ID': 1492108834523573938, 'accesscriteria': '', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react','teamids': '', 'gdb_timestamp': '1725106389717', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##角色##\n你是一个统计票数大师,你非常擅长计数以及统计信息。你正在参与“狼人杀”这个游戏,你的角色是[主持人]。你熟悉“狼人杀”游戏的完整流程,你需要完成[任务],保证游戏的顺利进行。现在是票数统计阶段\n\n##任务##\n以结构化的语句来模拟进行“狼人杀”的票数统计阶段,也仅仅只票数统计阶段环节,票数统计阶段结束后就本阶段就停止了,由后续的阶段继续进行游戏。在这一个环节里,由主持人根据上一轮存活的玩家投票结果统计票数。 \n\n##详细步骤##\n你的任务如下:\nstep1. 主持人感知上一轮投票环节每位玩家的发言, 统计投票结果,格式为[{"player_name":票数}]. \nstep2 然后,主持人宣布死亡的玩家,以最大票数为本轮被投票的目标,如果票数相同,则取座位号高的角色死亡。并告知所有玩家本轮被投票玩家的player_name。(格式为【重要通知】本轮死亡的玩家为XXX)同时向所有玩家宣布,被投票中的角色会视为立即死亡(即不再视为存活角色)。\nstep3. 在宣布死亡玩家后,本阶段流程结束,由后续阶段继续推进游戏。\n该任务的参与者为主持人和所有存活的玩家,信息可见对象是所有玩家。\n\n##注意##\n1.如果有2个或者两个以上的被玩家被投的票数相同,则取座位号高的玩家死亡。并告知大家原因:票数相同,取座位号高的玩家死亡\n2.在统计票数时,首先确认存活玩家的数量,再先仔细回忆,谁被投了。最后统计每位玩家被投的次数。由于每位玩家只有一票,所以被投次数的总和等于存活玩家的数量 \n3.通知完死亡玩家是谁后,本阶段才结束,由后续阶段继续推进游戏。\n4.主持人只有当通知本轮死亡的玩家时,才使用【重要通知】的前缀,其他情况下不要使用【重要通知】前缀\n5.只统计上一轮投票环节的情况\n\n##example##\n{"thought": "根据上一轮发言,7号、1号、2号、3号投票给4号周欣怡,而5号和8号投票给7号梁军。", "action": {"agent_name": "主持人", "player_name": ""}, "Dungeon_Master": [{"memory_tag": ["all"], "content": "【重要通知】经过整理,投票结果如下:4号周欣怡:4票;7号梁军:2票。因此周欣怡的票最多。本轮死亡玩家是4号周欣怡。"}]}', 'name': '统计票数','updaterule':'{"存活的玩家":""}'}),
+# GNode(id='剧本杀/狼人杀/智能交互/猎人时刻_2', type='opsgptkg_task', attributes={'ID': 8926130661368382829, 'accesscriteria': 'OR', 'extra': '{"pattern": "react", "endcheck": "True", "dodisplay":"True"}', 'action':'react', 'teamids': '', 'gdb_timestamp': '1725092131055', 'executetype': '', 'description': '##以上为本局游戏历史记录##\n\n##背景##\n在狼人杀游戏中,“猎人”玩家发现自己被投票出局,可以选择亮出身份并指定击杀一名玩家。\n\n##任务##\n整个过程如下:\n1. 主持人判断“猎人”是否死亡,如果猎人存活则本轮流程直接结束。\n2. 主持人判断“猎人”是否是刚被投票出局的,若之前已经死亡,则本轮流程直接结束。\n3. 如果“猎人”死亡,“猎人”玩家考虑是否激活自己的能力,“猎人”玩家若激活自己的能力,则选择要击杀的玩家,注意只能选择目前存活的玩家。\n4. 主持人告知所有玩家:被“猎人”击杀的玩家死亡。(输出格式为【重要通知】本轮猎人击杀的玩家为XXX)同时向所有玩家宣布,被投票中的角色会视为立即死亡(即不再视为存活角色)。\n5. 在宣布死亡玩家后,本阶段流程结束,由后续阶段继续推进游戏。\n\n##注意##\n1.通知完死亡玩家是谁后,本阶段才结束,由后续阶段继续推进游戏。在下一轮输出 {"action": "taskend"}即可。\n2.主持人只有当通知本轮死亡的玩家时,才使用【重要通知】的前缀,其他情况下不要使用【重要通知】前缀。\n\n##example##\n{"thought": "根据周杰的选择,他决定使用猎人的能力,并击杀1号贺子轩。现在需要告知所有玩家这一结果,并继续游戏。", "action": {"agent_name": "主持人", "player_name": ""}, "Dungeon_Master": [{"memory_tag": ["all"], "content": "【重要通知】本轮猎人击杀的玩家为1号贺子轩。"}]}\n\n该任务的参与者只有“猎人”和主持人,信息可见对象是“猎人”,其他角色玩家不可见。', 'name': '猎人时刻_2'}),
+GNode(id='剧本杀/狼人杀/智能交互/狼人杀判断游戏是否结束', type='opsgptkg_task', attributes={'ID': -2316854558435035646, 'description': '狼人杀判断游戏是否结束', 'name': '狼人杀判断游戏是否结束', 'accesscriteria': '', 'extra': '{"memory_tag": "None"}', 'teamids': '', 'gdb_timestamp': '1725092210244', 'executetype': ''}),
+GNode(id='剧本杀/狼人杀/智能交互/事实_2', type='opsgptkg_phenomenon', attributes={'ID': -6298561983042120406, 'extra': '', 'teamids': '', 'gdb_timestamp': '1724816562165', 'description': '否', 'name': '否'}),
+GNode(id='剧本杀/狼人杀/智能交互/事实_1', type='opsgptkg_phenomenon', attributes={'ID': 6987562967613654408, 'gdb_timestamp': '1724816495297', 'description': '是', 'name': '是', 'extra': '', 'teamids': ''}),
+GNode(id='剧本杀/狼人杀/智能交互/狼人杀给出每个人的单词以及最终胜利者', type='opsgptkg_task', attributes={'ID': -758955621725402723, 'extra': '{"dodisplay":"True"}', 'teamids': '', 'gdb_timestamp': '1725097362872', 'executetype': '', 'description': '狼人杀给出每个人的单词以及最终胜利者', 'name': '狼人杀给出每个人的单词以及最终胜利者', 'accesscriteria': ''}),
+GNode(id='剧本杀', type='opsgptkg_intent', attributes={'ID': -3388526698926684245, 'description': '文本游戏相关(如狼人杀等)', 'name': '剧本杀', 'extra': '', 'teamids': '', 'gdb_timestamp': '1724815537102'})
+]
new_edges_2 = \
[GEdge(start_id='剧本杀', end_id='剧本杀/谁是卧底', type='opsgptkg_intent_route_opsgptkg_intent', attributes={'SRCID': -3388526698926684245, 'DSTID': -5201231166222141228, 'gdb_timestamp': '1725088433347', 'extra': ''}),
@@ -166,15 +176,18 @@ def hash_id(nodeId, sessionId='', otherstr = None):
GEdge(start_id='剧本杀/谁是卧底', end_id='剧本杀/谁是卧底/智能交互', type='opsgptkg_intent_route_opsgptkg_schedule', attributes={'SRCID': -5201231166222141228, 'DSTID': 603563742932974030, 'gdb_timestamp': '1725088478251', 'extra': ''}),
GEdge(start_id='剧本杀/狼人杀', end_id='剧本杀/狼人杀/智能交互', type='opsgptkg_intent_route_opsgptkg_schedule', attributes={'SRCID': 5476827419397129797, 'DSTID': -5931163481230280444, 'gdb_timestamp': '1724815633494', 'extra': ''}),
GEdge(start_id='剧本杀/谁是卧底/智能交互', end_id='剧本杀/谁是卧底/智能交互/分配座位', type='opsgptkg_schedule_route_opsgptkg_task', attributes={'SRCID': 603563742932974030, 'DSTID': 2011080219630105469, 'gdb_timestamp': '1725088659469', 'extra': ''}),
- GEdge(start_id='剧本杀/狼人杀/智能交互', end_id='剧本杀/狼人杀/智能交互/位置选择', type='opsgptkg_schedule_route_opsgptkg_task', attributes={'SRCID': -5931163481230280444, 'DSTID': 2541178858602010284, 'gdb_timestamp': '1724815720186', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互', end_id='剧本杀/狼人杀/智能交互/狼人杀分配座位', type='opsgptkg_schedule_route_opsgptkg_task', attributes={'SRCID': -5931163481230280444, 'DSTID': 2541178858602010284, 'gdb_timestamp': '1724815720186', 'extra': ''}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/分配座位', end_id='剧本杀/谁是卧底/智能交互/角色分配和单词分配', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 2011080219630105469, 'DSTID': -1817533533893637377, 'gdb_timestamp': '1725088761379', 'extra': ''}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/位置选择', end_id='剧本杀/狼人杀/智能交互/角色选择', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 2541178858602010284, 'DSTID': -8695604495489305484, 'extra': '', 'gdb_timestamp': '1724815828424'}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/狼人杀分配座位', end_id='剧本杀/狼人杀/智能交互/狼人杀角色分配和单词分配', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 2541178858602010284, 'DSTID': -8695604495489305484, 'extra': '', 'gdb_timestamp': '1724815828424'}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/角色分配和单词分配', end_id='剧本杀/谁是卧底/智能交互/通知身份', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -1817533533893637377, 'DSTID': 8901447933395410622, 'gdb_timestamp': '1725088813780', 'extra': ''}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/角色选择', end_id='剧本杀/狼人杀/智能交互/向玩家通知消息', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -8695604495489305484, 'DSTID': -4014299322597660132, 'gdb_timestamp': '1724815943792', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/狼人杀角色分配和单词分配', end_id='剧本杀/狼人杀/智能交互/向玩家通知消息', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -8695604495489305484, 'DSTID': -4014299322597660132, 'gdb_timestamp': '1724815943792', 'extra': ''}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/通知身份', end_id='剧本杀/谁是卧底/智能交互/关键信息_1', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 8901447933395410622, 'DSTID': 3196717310525578616, 'extra': '', 'gdb_timestamp': '1725364881808'}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/向玩家通知消息', end_id='剧本杀/狼人杀/智能交互/狼人时刻', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -4014299322597660132, 'DSTID': 8926130661368382825, 'gdb_timestamp': '1724815952503', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/向玩家通知消息', end_id='剧本杀/狼人杀/智能交互/狼人时刻_投票', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -4014299322597660132, 'DSTID': 8926130661368382825, 'gdb_timestamp': '1724815952503', 'extra': ''}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/关键信息_1', end_id='剧本杀/谁是卧底/智能交互/开始新一轮的讨论', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 3196717310525578616, 'DSTID': -6077057339616293423, 'extra': '', 'gdb_timestamp': '1725364891197'}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/狼人时刻', end_id='剧本杀/狼人杀/智能交互/天亮讨论', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 8926130661368382825, 'DSTID': 274796810216558717, 'gdb_timestamp': '1724911515908', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/狼人时刻_投票', end_id='剧本杀/狼人杀/智能交互/狼人时刻_统计票数', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 8926130661368382825, 'DSTID': 8926130661368382925, 'gdb_timestamp': '1724911515907', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/狼人时刻_统计票数', end_id='剧本杀/狼人杀/智能交互/预言家时刻', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 8926130661368382925, 'DSTID': 8926130661368382826, 'gdb_timestamp': '1724911515908', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/预言家时刻', end_id='剧本杀/狼人杀/智能交互/女巫时刻', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 8926130661368382826, 'DSTID': 8926130661368382827, 'gdb_timestamp': '1724911515909', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/女巫时刻', end_id='剧本杀/狼人杀/智能交互/天亮讨论', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 8926130661368382827, 'DSTID': 274796810216558717, 'gdb_timestamp': '1724911515911', 'extra': ''}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/开始新一轮的讨论', end_id='剧本杀/谁是卧底/智能交互/关键信息_2', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -6077057339616293423, 'DSTID': -8309123437761850283, 'extra': '', 'gdb_timestamp': '1725364966817'}),
GEdge(start_id='剧本杀/狼人杀/智能交互/天亮讨论', end_id='剧本杀/狼人杀/智能交互/票选凶手', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 274796810216558717, 'DSTID': 1492108834523573937, 'extra': '', 'gdb_timestamp': '1724816423574'}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/关键信息_2', end_id='剧本杀/谁是卧底/智能交互/票选卧底_1', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': -8309123437761850283, 'DSTID': 267468674566989196, 'gdb_timestamp': '1725507894066', 'extra': ''}),
@@ -186,11 +199,12 @@ def hash_id(nodeId, sessionId='', otherstr = None):
GEdge(start_id='剧本杀/谁是卧底/智能交互/判断游戏是否结束', end_id='剧本杀/谁是卧底/智能交互/事实_2', type='opsgptkg_task_route_opsgptkg_phenomenon', attributes={'SRCID': -5959590132883379159, 'DSTID': 4216433814773851843, 'extra': '', 'gdb_timestamp': '1725089603500'}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/事实_1', end_id='剧本杀/谁是卧底/智能交互/给出每个人的单词以及最终胜利者', type='opsgptkg_phenomenon_route_opsgptkg_task', attributes={'SRCID': -525629912140732688, 'DSTID': 8878899410716129093, 'gdb_timestamp': '1725089654391', 'extra': ''}),
GEdge(start_id='剧本杀/谁是卧底/智能交互/事实_2', end_id='剧本杀/谁是卧底/智能交互/开始新一轮的讨论', type='opsgptkg_phenomenon_route_opsgptkg_task', attributes={'SRCID': 4216433814773851843, 'DSTID': -6077057339616293423, 'extra': '', 'gdb_timestamp': '1725089612866'}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/票选凶手', end_id='剧本杀/狼人杀/智能交互/判断游戏是否结束', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 1492108834523573937, 'DSTID': -2316854558435035646, 'extra': '', 'gdb_timestamp': '1724816464917'}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/判断游戏是否结束', end_id='剧本杀/狼人杀/智能交互/事实_2', type='opsgptkg_task_route_opsgptkg_phenomenon', attributes={'SRCID': -2316854558435035646, 'DSTID': -6298561983042120406, 'gdb_timestamp': '1724816570641', 'extra': ''}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/判断游戏是否结束', end_id='剧本杀/狼人杀/智能交互/事实_1', type='opsgptkg_task_route_opsgptkg_phenomenon', attributes={'SRCID': -2316854558435035646, 'DSTID': 6987562967613654408, 'gdb_timestamp': '1724816506031', 'extra': ''}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/事实_2', end_id='剧本杀/狼人杀/智能交互/狼人时刻', type='opsgptkg_phenomenon_route_opsgptkg_task', attributes={'SRCID': -6298561983042120406, 'DSTID': 8926130661368382825, 'extra': '', 'gdb_timestamp': '1724816585403'}),
- GEdge(start_id='剧本杀/狼人杀/智能交互/事实_1', end_id='剧本杀/l狼人杀/智能交互/宣布游戏胜利者', type='opsgptkg_phenomenon_route_opsgptkg_task', attributes={'SRCID': 6987562967613654408, 'DSTID': -758955621725402723, 'gdb_timestamp': '1724911404270', 'extra': ''})]
+ GEdge(start_id='剧本杀/狼人杀/智能交互/票选凶手', end_id='剧本杀/狼人杀/智能交互/统计票数', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 1492108834523573937, 'DSTID': 8926130661368382838, 'extra': '', 'gdb_timestamp': '1724816464915'}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/统计票数', end_id='剧本杀/狼人杀/智能交互/狼人杀判断游戏是否结束', type='opsgptkg_task_route_opsgptkg_task', attributes={'SRCID': 1492108834523573938, 'DSTID': -2316854558435035646, 'extra': '', 'gdb_timestamp': '1724816464917'}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/狼人杀判断游戏是否结束', end_id='剧本杀/狼人杀/智能交互/事实_2', type='opsgptkg_task_route_opsgptkg_phenomenon', attributes={'SRCID': -2316854558435035646, 'DSTID': -6298561983042120406, 'gdb_timestamp': '1724816570641', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/狼人杀判断游戏是否结束', end_id='剧本杀/狼人杀/智能交互/事实_1', type='opsgptkg_task_route_opsgptkg_phenomenon', attributes={'SRCID': -2316854558435035646, 'DSTID': 6987562967613654408, 'gdb_timestamp': '1724816506031', 'extra': ''}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/事实_2', end_id='剧本杀/狼人杀/智能交互/狼人时刻_投票', type='opsgptkg_phenomenon_route_opsgptkg_task', attributes={'SRCID': -6298561983042120406, 'DSTID': 8926130661368382825, 'extra': '', 'gdb_timestamp': '1724816585403'}),
+ GEdge(start_id='剧本杀/狼人杀/智能交互/事实_1', end_id='剧本杀/狼人杀/智能交互/狼人杀给出每个人的单词以及最终胜利者', type='opsgptkg_phenomenon_route_opsgptkg_task', attributes={'SRCID': 6987562967613654408, 'DSTID': -758955621725402723, 'gdb_timestamp': '1724911404270', 'extra': ''})]
new_edges_3 = [GEdge(start_id='ekg_team_default', end_id='剧本杀', type='opsgptkg_intent_route_opsgptkg_intent', attributes={'SRCID': 9015207174144, 'DSTID': -3388526698926684245, 'gdb_timestamp': '1724816506031', 'extra': ''})]
diff --git a/muagent/connector/memory_manager.py b/muagent/connector/memory_manager.py
index 5c5bcd0..e536466 100644
--- a/muagent/connector/memory_manager.py
+++ b/muagent/connector/memory_manager.py
@@ -5,6 +5,7 @@
from collections import Counter
from loguru import logger
import numpy as np
+import logging
from langchain_community.docstore.document import Document
@@ -877,7 +878,7 @@ def get_msg_by_role_name(self, chat_index: str, role_name: str) -> Optional[Mess
return msg
return None
- def get_msg_content_by_role_name(self, chat_index: str, role_name: str) -> Optional[str]:
+ def get_msg_content_by_rule_name(self, chat_index: str, role_name: str) -> Optional[str]:
message = self.get_msg_by_role_name(chat_index, role_name)
if message == None:
return None
@@ -889,16 +890,33 @@ def update_msg_content_by_rule(self, chat_index: str, role_name: str, new_conten
if message == None:
return False
-
- prompt = f"{new_content}\n{role_name}:{message.role_content}\n{update_rule}"
-
+ if update_rule == '':
+ prompt = '任务:请根据游戏内容,更新变量,变量名为:' + role_name + ',变量更新前的内容为:' + message.role_content + '。本节点游戏记录:' + new_content + '。请根据游戏内容,输出更新后的变量内容,不要包含其他信息,不要重复变量名,只输出变量更新后的内容即可。。'
+ else:
+ prompt = '任务:请根据游戏内容,更新变量,变量名为:' + role_name + ',变量更新前的内容为:' + message.role_content + '。本节点游戏记录:' + new_content + '。变量更新规则为:' + update_rule + '。请根据游戏内容和变量更新规则,输出更新后的变量内容,不要包含其他信息,不要重复变量名,只输出变量更新后的内容即可。'
+ logging.info(f'变量更新的prompt:{prompt}')
model = getChatModelFromConfig(self.llm_config)
new_role_content = model.predict(prompt)
-
+ logging.info(f'变量更新的输出结果:{new_role_content}')
if new_role_content is not None:
message.role_content = new_role_content
self.append(message)
+ logging.info(f'输出结果:{self.get_msg_content_by_rule_name(chat_index, role_name)}')
+
+ return True
+ else:
+ return False
+
+ def update_global_msg_content(self, chat_index: str, role_name: str, new_content: str) -> bool:
+ message = self.get_msg_by_role_name(chat_index, role_name)
+ print(f' message if {message}')
+ if message == None:
+ return False
+
+ if new_content is not None:
+ message.role_content = new_content
+ self.append(message)
return True
else:
return False
\ No newline at end of file
diff --git a/muagent/db_handler/graph_db_handler/nebula_handler.py b/muagent/db_handler/graph_db_handler/nebula_handler.py
index cdbd825..9d94180 100644
--- a/muagent/db_handler/graph_db_handler/nebula_handler.py
+++ b/muagent/db_handler/graph_db_handler/nebula_handler.py
@@ -63,11 +63,13 @@ def __init__(self,gb_config : GBConfig = None):
self.nb_pw = '' or 'nebula'
self.space_name = "client"
else:
+
logger.info('NebulaGraph容器启动中,请等待')
if self.nebula_started(gb_config):
self.connection_pool.init([(gb_config.extra_kwargs.get("host"), gb_config.extra_kwargs.get("port"))], config)
+
self.username = gb_config.extra_kwargs.get("username")
self.nb_pw = gb_config.extra_kwargs.get("password")
self.space_name = gb_config.extra_kwargs.get("space")
@@ -317,16 +319,7 @@ def add_node(self, node: GNode) -> GbaseExecStatus:
if prop_name in {'extra', 'description', 'envdescription','updaterule'}:
# 转义换行符和双引号
value = value.replace("\n", "\\n").replace("\"", "\\\"")
- cypher += f'"{value}",'
- elif prop_name == 'description':
- value = value.replace("\n", "\\n").replace("\"", "\\\"")
- cypher += f'"{value}",'
- elif prop_name == 'envdescription':
- value = value.replace("\n", "\\n").replace("\"", "\\\"")
- cypher += f'"{value}",'
- else:
- cypher += f'"{value}",'
- #cypher += f'"{value}",'
+ cypher += f'"{value}",'
else:
cypher += f'{value},'
cypher = cypher.rstrip(',')
diff --git a/muagent/schemas/ekg/ekg_reason.py b/muagent/schemas/ekg/ekg_reason.py
new file mode 100644
index 0000000..11af385
--- /dev/null
+++ b/muagent/schemas/ekg/ekg_reason.py
@@ -0,0 +1,324 @@
+from pydantic import BaseModel, Field
+from typing import List, Dict, Optional, Union
+from enum import Enum
+import copy
+import json
+
+
+
+
+#####################################################################
+############################ LingSiResponse #############################
+#####################################################################
+class LingSiResponse(BaseModel):
+ '''
+ lingsi的输出值, 算法的输入值
+ '''
+ currentNodeId: Optional[str]=None
+ observation: Optional[Union[str,Dict]] # jsonstr
+ scene: str
+ sessionId: str
+ startRootNodeId: Optional[str] = None
+ intentionRule: Optional[Union[List,str] ]= None
+ intentionData: Optional[Union[List,str] ] = None
+ startFromRoot: Optional[str] = None
+ type: Optional[str] = None
+ userAnswer: Optional[str]=None
+ agentName:Optional[str]=None
+ usingRootNode:Optional[bool]=False
+
+
+
+
+
+
+
+#####################################################################
+############################ #定义PlanningRunning 和 parallel 模式下大模型返回格式 #############################
+#####################################################################
+
+class ActionOneStep(BaseModel):
+ '''
+ 指定下一步的动作由哪一个agent / player 完成
+ example {"player_name":str, "agent_name":str}
+ '''
+ agent_name: str
+ player_name:str
+
+class ActionPlan(BaseModel):
+ '''
+ 指定后续的动作由哪些agent / player 完成
+
+ [{"player_name":str, "agent_name":str}, {"player_name":str, "agent_name":str}, ... ]
+ '''
+ data: List[ActionOneStep]
+ def get_player_name_by_agent_name(self, agent_name:str)->str:
+ '''
+ 根据agent_name 返回 player_name
+
+ '''
+ for i in range(len(self.data)):
+ if self.data[i].agent_name == agent_name:
+ return self.data[i].player_name
+ return None #没找到合适的匹配
+
+
+
+
+class ObservationItem(BaseModel):
+ '''
+ 假设 agent 说话时返回的数据格式,除了content外,还有memory_tag, 指定这个信息有谁可见
+ '''
+ memory_tag: List[str]
+ content: str
+
+
+
+
+
+class PlanningRunningAgentReply(BaseModel):
+ '''
+ 示例数据
+ sss = {
+ "thought": "思考内容",
+ "action_plan": [
+ {"player_name": "player1", "agent_name": "agent1"},
+ {"player_name": "player2", "agent_name": "agent2"}
+ ],
+ "Dungeon_Master": [
+ {"memory_tag": ["agent_name_a", "agent_name_b"], "content": "DM 内容1"},
+ {"memory_tag": ["agent_name_c"], "content": "DM 内容2"}
+ ]
+ }
+ '''
+ thought: str = "None"
+ action_plan: ActionPlan=[]
+ observation: List[ObservationItem]=[]
+
+ def __init__(self, **kwargs):
+ # 处理 action_plan
+ action_steps = [ActionOneStep(**step) for step in kwargs.get('action_plan', [])]
+ action_plan = ActionPlan(data=action_steps)
+
+ # 处理 observation
+ observations = [ObservationItem(**item) for item in kwargs.get('Dungeon_Master', [])]
+
+ # 调用父类的初始化方法
+ super().__init__(
+ thought=kwargs.get('thought', "None"),
+ action_plan=action_plan,
+ observation=observations
+ )
+
+
+
+#####################################################################
+############################ #定义输出plan格式 #############################
+#####################################################################
+
+class QuestionContent(BaseModel):
+ '''
+ {'question': '请玩家根据当前情况发言', 'candidate': None }
+ '''
+ question:str
+ candidate:Optional[str]=None
+
+class QuestionDescription(BaseModel):
+ '''
+ {'questionType': 'essayQuestion',
+ 'questionContent': {'question': '请玩家根据当前情况发言','candidate': None }}
+ '''
+ questionType:str
+ questionContent:QuestionContent
+
+class ToolPlanOneStep(BaseModel):
+ '''
+ tool_plan_one_step = {'toolDescription': '请用户回答',
+ 'currentNodeId': nodeId,
+ 'memory': None,
+ 'type': 'userProblem',
+ 'questionDescription': {'questionType': 'essayQuestion',
+ 'questionContent': {'question': '请玩家根据当前情况发言',
+ 'candidate': None }}}
+ '''
+ toolDescription:str
+ currentNodeId: Optional[str] = None
+ currentNodeInfo:Optional[str] = None
+ memory:Optional[str] = None
+ type:str
+ questionDescription:Optional[QuestionDescription]=None
+
+#####################################################################
+############################ ResToLingsi #############################
+#####################################################################
+class ResToLingsi(BaseModel):
+ '''
+ lingsi的输入值, 算法的输出值
+
+{'intentionRecognitionSituation': 'None',
+ 'sessionId': 'c122401123504af09dbf80f94be0854d',
+ 'type': 'onlyTool',
+ 'summary': None,
+ 'toolPlan': [{'toolDescription': 'agent_李静',
+ 'currentNodeId': '26921eb05153216c5a1f585f9d318c77%%@@#agent_李静',
+ 'currentNodeInfo': 'agent_李静',
+ 'memory': '["{\\"content\\": \\"开始玩谁是卧底的游戏\\"}", "分配座位", "\\n\\n| 座位 | 玩家 |\\n|---|---|\\n| 1 | **李静** |\\n| 2 | **张伟** |\\n| 3 | **人类玩家** |\\n| 4 | **王鹏** |", "通知身份", "主持人 : 【身份通知】你是李静, 你的位置是1号, 你分配的单词是包子", "开始新一轮的讨论", "主持人 : 各位玩家请注意,现在所有玩家均存活,我们将按照座位顺序进行发言。发言顺序为1号李静、2号张伟、3号人类玩家、4号王鹏。现在,请1号李静开始发言。"]',
+ 'type': 'reactExecution',
+ 'questionDescription': None}],
+ 'userInteraction': '开始新一轮的讨论
**主持人:**
各位玩家请注意,现在所有玩家均存活,我们将按照座位顺序进行发言。发言顺序为1号李静、2号张伟、3号人类玩家、4号王鹏。现在,请1号李静开始发言。'}
+
+ '''
+ intentionRecognitionSituation: Optional[str]=None
+ sessionId: str
+ type: Optional[str] = None
+ summary:Optional[str] = None
+ toolPlan:Optional[List[ToolPlanOneStep]] = None
+ userInteraction:Optional[str]=None
+
+if __name__ == '__main__':
+ # response_data = LingSiResponse(
+ # currentNodeId="node_1",
+ # observation='{"key": "value"}',
+ # scene="example_scene",
+ # sessionId="session_123",
+ # startRootNodeId="root_node_1",
+ # type="example_type",
+ # userAnswer="user_answer_example"
+
+ # )
+
+ # print( response_data.type )
+ # 示例数据
+ # sss = {
+ # "thought": "思考内容",
+ # "action_plan": [
+ # {"player_name": "player1", "agent_name": "agent1"},
+ # {"player_name": "player2", "agent_name": "agent2"}
+ # ],
+ # "Dungeon_Master": [
+ # {"memory_tag": ["agent_name_a", "agent_name_b"], "content": "DM 内容1"},
+ # {"memory_tag": ["agent_name_c"], "content": "DM 内容2"}
+ # ]
+ # }
+
+ # # 直接使用 sss 创建 PlanningRunningAgentReply 对象
+ # reply = PlanningRunningAgentReply(**sss)
+
+
+ # #测试ToolPlanOneStep
+ # # 定义 Sss
+ # nodeId = 'somenodeid'
+ # Sss = {
+ # 'toolDescription': '请用户回答',
+ # 'currentNodeId': nodeId,
+ # 'memory': None,
+ # 'type': 'userProblem',
+ # 'questionDescription': {
+ # 'questionType': 'essayQuestion',
+ # 'questionContent': {
+ # 'question': '请玩家根据当前情况发言',
+ # 'candidate': None
+ # }
+ # }
+ # }
+
+ # # 将 Sss 转换为 ToolPlanOneStep 对象
+ # tool_plan_one_step = ToolPlanOneStep(**Sss)
+
+
+ ###测试 LingSiResponse
+ sessionId = '349579720439752097847_25'
+ #谁是卧底 通用
+ params_string = \
+ {
+
+ "scene": "NEXA",
+ "startRootNodeId": '4bf08f20487bfb3d34048ddf455bf5dd', #hash_id('剧本杀' ),
+ "intentionRule": ["nlp"],
+ "intentionData": "执行谁是卧底进程",
+ "observation": "{\"content\":\"开始游戏\"}",
+ "sessionId": sessionId
+ }
+
+ lingsi_response = LingSiResponse(**params_string)
+ print(lingsi_response)
+
+
+
+ # ###测试 LingSiResponse
+ # nodeId = 's'
+ # execute_agent_name = 'n'
+
+ # tool_one_step= ToolPlanOneStep(
+ # **{'toolDescription': '请用户回答',
+ # 'currentNodeId': nodeId + '%%@@#' + execute_agent_name,
+ # 'currentNodeInfo':execute_agent_name,
+ # 'memory': None,
+ # 'type': 'userProblem',
+ # 'questionDescription': {'questionType': 'essayQuestion',
+ # 'questionContent': {'question': '请玩家根据当前情况发言',
+ # 'candidate': None }}}
+ # )
+ # print(tool_one_step)
+
+
+
+ ###测试 ActionPlan
+
+
+
+ # ccc = {'data': [{'agent_name': 'agent_2', 'player_name': 'player_1'},
+ # {'agent_name': '人类agent_a', 'player_name': '李四(人类玩家)'},
+ # {'agent_name': 'agent_3', 'player_name': 'player_2'},
+ # {'agent_name': 'agent_1', 'player_name': 'player_3'}]}
+
+ # ccc = [{"player_name": "主持人", "agent_name": "主持人"},
+ # {"player_name": "player_1", "agent_name": "agent_2"},
+ # {"player_name": "李四(人类玩家)", "agent_name": "人类agent_a"},
+ # {"player_name": "player_2", "agent_name": "agent_3"},
+ # {"player_name": "player_3", "agent_name": "agent_1"}]
+ # ccc = {'data':ccc}
+
+ # action_plan =ActionPlan(**ccc)
+ # print(action_plan)
+
+ # agent_name= action_plan.get_player_name_by_agent_name('人类agent_a')
+ # print(agent_name)
+
+
+ ###测试 ResToLingsi ###
+ nodeId = 'somenodeid'
+ # sss = {
+ # 'toolDescription': '请用户回答',
+ # 'currentNodeId': nodeId,
+ # 'memory': None,
+ # 'type': 'userProblem',
+ # 'questionDescription': {
+ # 'questionType': 'essayQuestion',
+ # 'questionContent': {
+ # 'question': '请玩家根据当前情况发言',
+ # 'candidate': None
+ # }
+ # }
+ # }
+ # sss = {
+ # "toolDescription": "toolDescriptionA",
+ # "currentNodeId": "INT_1",
+ # "memory": '',
+ # "type":"onlyTool",
+ # }
+ # # 将 Sss 转换为 ToolPlanOneStep 对象
+ # tool_plan_one_step = ToolPlanOneStep(**sss)
+ # bbb = {'intentionRecognitionSituation': 'None',
+ # 'sessionId': 'c122401123504af09dbf80f94be0854d',
+ # 'type': 'onlyTool',
+ # 'summary': None,
+ # 'toolPlan': [tool_plan_one_step],
+ # 'userInteraction': '开始新一轮的讨论
**主持人:**
各位玩家请注意,现在所有玩家均存活,我们将按照座位顺序进行发言。发言顺序为1号李静、2号张伟、3号人类玩家、4号王鹏。现在,请1号李静开始发言。'}
+
+ # ResToLingsi_one = ResToLingsi(**bbb)
+ # print(ResToLingsi_one.dict())
+
+
+
+
\ No newline at end of file
diff --git a/muagent/service/ekg_reasoning/src/geabase_handler/geabase_handlerplus.py b/muagent/service/ekg_reasoning/src/geabase_handler/geabase_handlerplus.py
index 68c9000..2edcc68 100644
--- a/muagent/service/ekg_reasoning/src/geabase_handler/geabase_handlerplus.py
+++ b/muagent/service/ekg_reasoning/src/geabase_handler/geabase_handlerplus.py
@@ -7,6 +7,7 @@
#路径增加
import sys
import os
+from typing import List, Dict, Optional, Union
src_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
@@ -63,14 +64,14 @@ def geabase_is_react_node(self, start_nodeid, start_nodetype):
extra = json.loads(extra)
if 'pattern' in extra.keys():
- if extra['pattern'] == 'one-tool':
+ if extra['pattern'] == 'one-tool' or extra['pattern'] == 'single':
return False
- elif extra['pattern'] == 'react':
+ elif extra['pattern'] == 'react' or extra['pattern'] == 'parallel' or extra['pattern'] == 'plan':
return True
else:
return False
- else: #默认为 one-tool
+ else: #默认为 single
return False
@@ -95,6 +96,76 @@ def geabase_search_return_all_nodeandedge_(self, start_nodeid = 'None',
for i in range(len(t.edges)):
edge_in_subtree.append({ "startNodeId": t.edges[i].start_id, "endNodeId": t.edges[i].end_id })
return nodeid_in_subtree, edge_in_subtree
+
+
+ def geabase_search_reture_nodeslist(self, start_nodeid:str,start_nodetype:str, block_search_nodetype:List=[] ):
+
+ '''
+ #返回start_nodeid后续子树上的所有节点id , start_nodeid 是一个意图节点的末端节点, 可以设置停止探索的类型,比如
+ #block_search_nodetype = ['opsgptkg_schedule', 'opsgptkg_task'] 时,代表这两种类型的节点不往后继续探索
+ #同时返回边
+
+ 不直接使用 get_hop_infos
+
+ 一个意图节点(非叶子意图节点)下可能有多个叶子节点,每个叶子节点也可能有多个意图节点。 在并行的时候,需要将某个意图节点单独成一个 nodeid_in_subtree
+ 如果直接调用 geabase_search_return_all_nodeandedge ,会把某个节点下所有的内容全放到一个list里
+ '''
+
+ nodeid_in_subtree = [{'nodeId':start_nodeid, 'nodeType':start_nodetype, 'nodeDescription':None, 'nodeName':None}]
+ edge_in_subtree = []
+ nodeid_in_search = [{'nodeId':start_nodeid, 'nodeType':start_nodetype}]
+ count = 0
+
+ reslist = []
+
+
+ while len(nodeid_in_search)!= 0:
+ # print(f'count is {count}')
+ nodedict_now = nodeid_in_search.pop()
+ nodeid_now = nodedict_now['nodeId']
+ nodetype_now = nodedict_now['nodeType']
+
+
+ neighborNodes = self.geabase_handler.get_neighbor_nodes(attributes= {"id": nodeid_now,}
+ , node_type= nodetype_now )
+
+ for i in range(len(neighborNodes)):
+
+ nodeid_new = neighborNodes[i].id
+ nodetype_new = neighborNodes[i].type
+ nodedescription_new = neighborNodes[i].attributes['description']
+ nodename_new = neighborNodes[i].attributes['name']
+ if nodetype_new in block_search_nodetype: #遇到阻塞的节点类型就终止, 不继续探索,也不纳入到结果中
+ continue
+ if nodetype_new == 'opsgptkg_schedule':
+
+ one_subtree, _ = self.geabase_search_return_all_nodeandedge(start_nodeid = nodeid_new,
+ start_nodetype = nodetype_new, block_search_nodetype = [])
+
+ one_subtree.append({'nodeId':nodeid_now, 'nodeType':nodetype_now, 'nodeDescription':None, 'nodeName':None})
+ reslist.append(one_subtree)
+
+
+ if { "startNodeId": nodeid_now, "endNodeId": nodeid_new } not in edge_in_subtree:#避免重复导入相同的元素
+ nodeid_in_subtree.append({'nodeId':nodeid_new, 'nodeType':nodetype_new,
+ 'nodeDescription':nodedescription_new,
+ 'nodeName':nodename_new})
+ edge_in_subtree.append({ "startNodeId": nodeid_now, "endNodeId": nodeid_new })
+ nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ else:
+ continue
+ count = count +1
+
+ #去重复
+ unique_set = set(tuple(sorted(d.items())) for d in nodeid_in_subtree)
+ # 将去重后的元组转换回字典形式,得到去重后的list
+ nodeid_in_subtree = [dict(t) for t in unique_set]
+
+ unique_set = set(tuple(sorted(d.items())) for d in edge_in_subtree)
+ # 将去重后的元组转换回字典形式,得到去重后的list
+ edge_in_subtree = [dict(t) for t in unique_set]
+
+ return reslist
def geabase_search_return_all_nodeandedge(self, start_nodeid = 'None',
start_nodetype = 'opsgptkg_intent', block_search_nodetype = []):
@@ -443,6 +514,8 @@ def geabase_getnodetype(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_tas
else:
return 'onlyTool'
+
+
def get_extra_tag(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_task', key = 'ignorememory'):
# print(f'rootNodeId is {rootNodeId}, rootNodeType is {rootNodeType}')
try:
@@ -463,8 +536,14 @@ def get_extra_tag(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_task', key
return None
else:
return extra[key]
+
+
+ def get_tag_(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_task', key = 'ignorememory')->str:
+ '''
+ 得到一个节点的属性值, 只判断属性
+ '''
- def get_tag(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_task', key = 'ignorememory'):
+
# print(f'rootNodeId is {rootNodeId}, rootNodeType is {rootNodeType}')
try:
oneNode = self.geabase_handler.get_current_node(attributes={"id": rootNodeId,},
@@ -482,8 +561,23 @@ def get_tag(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_task', key = 'ig
return oneNode.attributes[key]
-
-
+ def get_tag(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_task', key = 'ignorememory')->str:
+ '''
+ 得到一个节点的属性值。 优先判断属性值;如果属性值为None,则判断extra中是否有值。都没有值返回None
+ '''
+ if key == 'action':
+ key_ = 'pattern'
+ else:
+ key_ = key
+ get_tag_result = self.get_tag_(rootNodeId , rootNodeType , key )
+ get_extra_tag_result = self.get_extra_tag(rootNodeId , rootNodeType , key_ )
+ logging.info(f'get_tag_result is {get_tag_result}, get_extra_tag_result is {get_extra_tag_result}')
+ if get_tag_result != None:
+ return get_tag_result
+ elif get_tag_result==None and get_extra_tag_result!=None:
+ return get_extra_tag_result
+ else:
+ return None
@@ -496,6 +590,8 @@ def user_input_memory_tag(self, rootNodeId = 'None', rootNodeType = 'opsgptkg_t
logging.info('user_input_memory_tag 没有找到合适的数据, 可能原因是当前查找对象不是 opsgptkg_task 类型的节点' )
return None
# print(oneNode)
+ if oneNode.attributes['extra'] == None:
+ return None
if oneNode.attributes['extra'] == '':
return None
extra = oneNode.attributes['extra']
diff --git a/muagent/service/ekg_reasoning/src/graph_search/geabase_search_plus.py b/muagent/service/ekg_reasoning/src/graph_search/geabase_search_plus.py
index 12706b1..d55c8e7 100644
--- a/muagent/service/ekg_reasoning/src/graph_search/geabase_search_plus.py
+++ b/muagent/service/ekg_reasoning/src/graph_search/geabase_search_plus.py
@@ -4,6 +4,7 @@
#路径增加
import sys
import os
+import re
src_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
@@ -45,68 +46,18 @@
from muagent.schemas.db import GBConfig
from muagent.service.ekg_construct import EKGConstructService
from muagent.service.ekg_inference import IntentionRouter
+from muagent.schemas.ekg.ekg_reason import LingSiResponse
# from loguru import logger as logging
from src.utils.call_llm import call_llm, extract_final_result , robust_call_llm
from src.geabase_handler.geabase_handlerplus import GB_handler
from src.utils.normalize import hash_id
from src.memory_handler.ekg_memory_handler import memory_handler_ekg
+from src.graph_search.task_node_agent import TaskNodeAgent
-def is_valid_json(string):
- try:
- json.loads(string)
- return True
- except ValueError:
- return False
-def robust_json_loads(llm_result):
- '''
- 将llm的输出转换为json, 有一定的概率这个json 在格式上是不完备的。不然少了 } 或者]
- '''
-
- try:
- llm_result_json = json.loads(llm_result)
- except:
- # for _ in range(2):
- try:
- logging.info('大模型的输出转换json报错, 现在再用大模型修正一遍')
- input_query = f'''
- ##输入##
- {llm_result}
- ##任务##
- 上述**输入**可能是一个不完整的dict,如果是这种情况,请将上述转换**输入**为完整的 dict。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
- 上述**输入**也可能是包含多个dict,如果是这种情况,只保留第一个dict即可。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
- ##直接输出结果##
- ''' + '以{开头,任何其他内容都是不允许的!'
- ouput_result = robust_call_llm(input_query, 'gpt_4')
- logging.info(f'修正后的输出为{ouput_result}')
- llm_result_json = json.loads(ouput_result)
-
-
- except:
- logging.info('大模型的输出转换json报错, 现在再用大模型修正一遍')
- input_query = f'''
- ##输入##
- {llm_result}
- ##任务##
- 上述**输入**可能是一个不完整的dict,如果是这种情况,请将上述转换**输入**为完整的 dict。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
- 上述**输入**也可能是包含多个dict,如果是这种情况,只保留第一个dict即可。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
- ##直接输出结果##
- ''' + '以{开头,任何其他内容都是不允许的!'
- ouput_result = robust_call_llm(input_query, 'gpt_4',temperature = 0.1)
- logging.info(f'修正后的输出为{ouput_result}')
- llm_result_json = json.loads(ouput_result)
- return llm_result_json
-
-def agent_respond_extract_output(input_str):
- if 'output' not in input_str:
- return input_str
- input_str = input_str.split('output')[-1] #取output后面的值
- input_str = input_str.replace('"', '').replace(':', '').replace('}', '').replace('{', '') #去除掉可能的符号
- return input_str
-
class graph_search_tool():
@@ -120,675 +71,9 @@ def __init__(self, geabase_handler, memory_manager, llm_config=None):
self.gb_handler = GB_handler(self.geabase_handler) #gb_handler 以 geabase_handler 为基础,封装了一些处理逻辑
self.memory_handler = memory_handler_ekg(memory_manager, geabase_handler)
self.llm_config = llm_config
-
+ self.task_node_agent = TaskNodeAgent(self.geabase_handler, self.memory_manager, self.llm_config)
- def robust_call_llm_with_llmname(self, input_query, rootNodeId, stop = None, temperature = 0, presence_penalty=0):
-
- #logging.info('using a gpt_4')
- res = call_llm(input_content = input_query, llm_model = 'gpt_4', stop = stop,temperature=temperature, presence_penalty=presence_penalty,
- llm_config=self.llm_config)
- return res
-
-
- model_name = self.gb_handler.get_extra_tag( rootNodeId = rootNodeId, rootNodeType = 'opsgptkg_task', key = 'model_name')
- logging.info(f'model_name is {model_name}')
- if model_name == None:
- model_name = 'gpt_4'
- if model_name == 'gpt_4':
- try:
- logging.info('using a gpt_4')
- res = call_llm(input_content = input_query, llm_model = 'gpt_4', stop = stop,temperature=temperature, presence_penalty=presence_penalty)
- except:
- logging.info('using Qwen2_72B_Instruct_OpsGPT')
- res = call_llm(input_content = input_query, llm_model = 'Qwen2_72B_Instruct_OpsGPT',stop = stop, temperature=temperature,presence_penalty=presence_penalty)
- return res
- else:
- try:
- logging.info('using Qwen2_72B_Instruct_OpsGPT')
- res = call_llm(input_content = input_query, llm_model = 'Qwen2_72B_Instruct_OpsGPT',stop = stop, temperature=temperature,presence_penalty=presence_penalty)
- except:
- logging.info('using a gpt_4')
- res = call_llm(input_content = input_query, llm_model = 'gpt_4', stop = stop,temperature=temperature, presence_penalty=presence_penalty)
- return res
-
- def stop_at_observation(self, historytext, llm_text):
-
-
- # 检查llm_text是否完全不包含"observation"
- if "observation" not in llm_text:
- return llm_text
-
- # 检查historytext中是否有"observation"
- if "observation" not in historytext:
- return llm_text.split("observation", 1)[0]
-
- # 统计historytext中的"observation"数量
- n_hist = historytext.count("observation")
-
- # 统计llm_text中的"observation"数量
- n_llm = llm_text.count("observation")
-
- # 如果两者数量相等,返回第n个observation之后的文本
- if n_hist == n_llm:
- parts = llm_text.rsplit("observation", n_hist)
- return parts[-1] # 返回最后一个部分(即最后一个"observation"之后的文本)
-
- # 如果上述条件都不满足,找到第n个和第n+1个"observation"之间的文本
- else:
- parts = llm_text.split("observation", n_hist + 1) # 分割出n+1个部分
- if len(parts) > n_hist + 1: # 确保有足够多的部分来获取所需范围
- return parts[n_hist] # 返回第n个和第n+1个observation之间的文本
- else:
- return "" # 如果没有找到合适的范围,则返回空字符串或其他适当处理
-
-
-
- def sort_messages_by_time(self, messages):
- # 使用 sorted() 对列表进行排序,key 参数指定排序依据
- return sorted(messages, key=lambda msg: msg.start_datetime)
-
- def endcheck(self, nodeId, nodeType, oneNodeName='None', oneNodeDescription='None', current_node_history_json='None'):
- '''
- 借助gpt4 来帮忙判断本阶段任务是否结束
- '''
- oneNode = self.geabase_handler.get_current_node(attributes={"id": nodeId,},
- node_type=nodeType)
- extra = oneNode.attributes['extra']
- print(extra)
- try:
-
- extra_json = json.loads(extra)
- if extra_json['endcheck'] == 'True':
- endcheck_flag = True
- else:
- endcheck_flag = False
- except:
- endcheck_flag= False
-
- if endcheck_flag == False:
- return True #endcheck 通过
-
- else:
- endcheck_llm_input = oneNodeName + '\n' +oneNodeDescription+ '\n##已有步骤##\n' + json.dumps(current_node_history_json,ensure_ascii=False) + \
- '\n##请结合已有步骤,判断本阶段任务是否结束,只返回中文的 是 或者 否即可,不要输出其他内容:##\n'
-
- logging.info('=============endcheck_llm_result==================')
- logging.info(endcheck_llm_input)
- llm_result = self.robust_call_llm_with_llmname(endcheck_llm_input, nodeId)
- logging.info('=============endcheck_llm_result==================')
- logging.info(llm_result)
-
- if '是' in llm_result:
- return False
- else:
- return True
-
-
- def react_running(self, sessionId, nodeId, nodeType, agent_respond = ''):
- '''
- react 模块 运行
-
- '''
- if agent_respond == None:
- agent_respond = ''
- if type(agent_respond) == str:
- agent_respond = agent_respond.replace('"', '').replace("'", "") #需要去除agent返回中的 " 和 '
- agent_respond = agent_respond_extract_output(agent_respond) # 去除 agent_respond 中的 thought 和 output
- #stpe1 判断当前状态
- get_messages_res = self.memory_handler.nodecount_get( sessionId, nodeId)
-
- if get_messages_res == [] :
- logging.info('当前这个{sessionId} react节点 是第一次运行')
- first_run_react_flag = True
- else:
- if json.loads(get_messages_res[0].role_content)['nodestage'] == 'end' :#在上一轮已经结束了,这一轮还未开始
- logging.info('当前这个{sessionId} react节点在上一轮已经结束了,这一轮还未开始,在这一轮也算是第一次执行')
- first_run_react_flag = True
- else:
- logging.info('当前这个{sessionId} react节点 不是第一次执行')
- first_run_react_flag = False
-
- if first_run_react_flag == True:
- # 当react的状态是 end 或者为空的时候调用此函数,进行初始化 或者 chapter + 1
- self.memory_handler.init_react_count(sessionId, nodeId)
-
- #step2 获取节点名字 + 节点描述
- oneNode = self.geabase_handler.get_current_node(attributes={"id": nodeId,},
- node_type=nodeType)
-
- oneNodeName = oneNode.attributes['name']
- oneNodeDescription = oneNode.attributes['description']
-
- #step3.1 获取memory, 构成给大模型的输入
- #获取memory, 主持人能看到的memory, 和获取tool的memory类似
-
- # tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
- # get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #此时是主持人,所以需要看到所有的memory,无需加tag。 对于在我这一侧需要运行的llm,一定是看到所有信息,因为我就是主持人
- # assembled_memory = self.assemble_memory(nodeId, nodeType, get_messages_res_sorted)
- assembled_memory = self.get_memory_for_dm(sessionId, nodeId)
- assembled_memory = json.dumps(assembled_memory, ensure_ascii=False)
- logging.info(f'assembled_memory is {assembled_memory}')
-
- #step3.2 获取当前节点的历史运行情况。如果是第一次运行,需要将react node 的name 存入到 memory中
- if first_run_react_flag == True:
- current_node_history = ''
- #第一次运行,对于react模块,只将标题放置在memory里, 因为对于react模块,description太多了,循环的情况下,很罗嗦且超过上下文
- self.memory_handler.react_nodedescription_save(sessionId, nodeId, oneNodeName)
-
- else:
- #不是第一次运行。那么肯定历史history进来
- logging.info(f'#不是第一次运行。那么肯定历史history进来{sessionId}, {nodeId}')
- current_node_history = self.memory_handler.react_current_history_get(sessionId, nodeId)
- # llm_result_truncation + '": [{"content": ' + user_responds
-
- '''
- history 里存储 observation的截断,不包含observation,
- llm_output 输出整个完整的流程(如果是gpt_4, 不能有停用词,因为每次都是从头开始录的),
- self.stop_at_observation,需要接受 current_node_history ,先分析里面有几个observation(N个), 然后我可以往后扩展一个observation, 不包含observation
-
- jsonstring 就转全量的, 但是录入到memory中的时候,注意只录入 N+1 个 observation的信息。
-
-
-
- '''
- #step4 执行 llm,
- if first_run_react_flag == True:
- llm_input = assembled_memory + '\n' + oneNodeName + '\n' + oneNodeDescription + '\n##已有步骤##\n无' + '\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
- logging.info('=============llm_input==================')
- logging.info(llm_input)
- llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
- current_node_history_json = []
- logging.info('=============llm_result==================')
- logging.info(llm_result)
- llm_result_json = robust_json_loads(llm_result)
- if type(llm_result_json)!=dict:
- llm_result_json = llm_result_json[0]
- logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
- # raise ValueError(f'llm的输出应该是一个dict才对 ')
- current_node_history_json.append(llm_result_json)
- else:
- # current_node_history[-1]["observation"]['content'] = agent_respond
- # llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n' + current_node_history + '": [{"content":" ' + agent_respond + '"'
- # llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n' + current_node_history + '": [{"content":" ' + agent_respond + '", "memory_tag:' \
- # + '\n '
-
- current_node_history_json = json.loads(current_node_history) #历史记录里可能包含虚假信息
- logging.info(f'current_node_history_json is {current_node_history_json}')
- if current_node_history_json[-1]['action']['agent_name'] != '主持人':
- current_node_history_json[-1]["observation"][0]['content'] = agent_respond #将历史中最后一次主持人幻觉的输出,转换为用户补充的输入
- try:
- current_node_history_json[-1]["thought"] = '' #在非主持人环节时,应该将thought 设置为''
- except:
- pass
- llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n##已有步骤##\n' + json.dumps(current_node_history_json,ensure_ascii=False) + \
- '\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
- logging.info('=============llm_input==================')
- logging.info(llm_input)
- llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
- logging.info('=============llm_result==================')
- logging.info(llm_result)
- llm_result_json = robust_json_loads(llm_result)
- if type(llm_result_json)!=dict:
- llm_result_json = llm_result_json[0]
- logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
- # raise ValueError(f'llm的输出应该是一个dict才对 ')
- current_node_history_json.append(llm_result_json)
-
- retry_llm = 0
- while(( retry_llm <= 8) and ("taskend" not in llm_result) and (llm_result_json['action']['agent_name'] == '主持人' )):
- logging.info('由于是主持人发言,情况继续')
-
- endcheck_res = self.endcheck( nodeId, nodeType, oneNodeName, oneNodeDescription, current_node_history_json)
- if endcheck_res== False:
- logging.info('endchek没有通过,主持人发言终止, 强行将 llm_result == {"action": "taskend"}')
- llm_result = json.dumps({"action": "taskend"})
- llm_result_json = robust_json_loads(llm_result)
- current_node_history_json.append(llm_result_json)
- break
-
-
-
- llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n##已有步骤##\n' + json.dumps(current_node_history_json,ensure_ascii=False) + \
- '\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
- logging.info('=============llm_input==================')
- logging.info(llm_input)
- llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
- logging.info('=============llm_result==================')
- logging.info(llm_result)
- llm_result_json = robust_json_loads(llm_result)
- current_node_history_json.append(llm_result_json)
- if type(llm_result_json)!=dict:
- llm_result_json = llm_result_json[0]
- logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
- raise ValueError(f'llm的输出应该是一个dict才对 ')
- retry_llm = retry_llm + 1
-
- logging.info('大模型调用结束')
-
-
-
- #step5 分析 llm_result 执行结果
- #llm_result 为最后一次llm的输出
- if 'taskend' in llm_result:
-
-
- react_flag = 'end'
- logging.info(f'当前为{react_flag}, 将本次节点的count设置为end ')
- self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'end')
- elif 'observation' in llm_result:
- react_flag = 'waiting_other_agent'
-
- logging.info(f'当前为{react_flag}, 尝试补充字符使得llm_result_truncation能转为json格式 ')
- # llm_result_truncation_json = json.loads(current_node_history + llm_result_truncation + '":[]}]')
- # llm_result_truncation_json = json.loads(current_node_history + llm_result_truncation + '":[]}]')
-
- #提取此时应该执行的agent_name
- execute_agent_name = current_node_history_json[-1]['action']['agent_name']
- execute_player_name = current_node_history_json[-1]['action']['player_name']
-
- #将该节点的count 设置为 runninng
- self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'running')
-
-
-
-
- #step6 存储 history # for DM
- logging.info(f'存储 history # for DM')
- if react_flag == 'waiting_other_agent' and first_run_react_flag == True:
- #step6.1 存储 llm_result_truncation
-
- self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False) )
-
- elif react_flag == 'waiting_other_agent' and first_run_react_flag == False:
- self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False))
-
- elif react_flag == 'end' and first_run_react_flag == True: #第一次运行就运行到结尾了
-
- self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False))
-
- elif react_flag == 'end' and first_run_react_flag == False: #第N次运行 运行到结尾了
- self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False))
-
-
- #step7 存储 memory # for other agent
- logging.info(f'存储 memory # for other agent')
- if react_flag == 'waiting_other_agent' and first_run_react_flag == True:
- logging.info('#第一次运行 等待agent返回')
- self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
- if react_flag == 'waiting_other_agent' and first_run_react_flag == False:
- logging.info('#第N次运行 等待agent返回')
- self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
-
- elif react_flag == 'end' and first_run_react_flag == True: #第一次运行就运行到结尾了:
- logging.info('#第一次运行就运行到结尾了:')
- self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
-
- elif react_flag == 'end' and first_run_react_flag == False: #第N次运行 运行到结尾了
- logging.info('#第N次运行 运行到结尾了')
- self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
-
-
- #step8 返回 question_plan
- if react_flag == 'end':
- question_plan = []
- elif react_flag == 'waiting_other_agent':
- question_plan = self.react_get_question_plan(sessionId, nodeId, execute_agent_name)
- else:
- question_plan = []
- return react_flag, question_plan
-
-
-
- def get_memory_for_tool(self,sessionId, nodeId):
- '''
- react 节点中 对于一个 主持人, 构建memory的函数。
- 只需要将祖先节点弄好即可,不需要加自己,因为自己有 history进行维护
- '''
- nodeType = 'opsgptkg_task' #假设一定是task节点
- tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
- get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #对于tool,假设都是主持人的工具,所以需要看到所有的memory,无需加tag。
- assembled_memory = self.assemble_memory_for_tool(nodeId, nodeType, get_messages_res_sorted) # tool 的memory需要兼顾以前的格式
- return assembled_memory
-
-
- def get_memory_for_dm(self,sessionId, nodeId):
- '''
- react 节点中 对于一个 主持人, 构建memory的函数。
- 只需要将祖先节点弄好即可,不需要加自己,因为自己有 history进行维护
- '''
- nodeType = 'opsgptkg_task' #假设一定是task节点
- tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
- get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #此时是主持人,所以需要看到所有的memory,无需加tag。 对于在我这一侧需要运行的llm,一定是看到所有信息,因为我就是主持人
- assembled_memory = self.assemble_memory_for_reactagent(nodeId, nodeType, get_messages_res_sorted)
- return assembled_memory
-
- def get_memory_for_computer_agent(self,sessionId, nodeId, execute_agent_name):
- '''
- react 节点中 对于一个 agent_x (电脑agent), 构建memory的函数
- '''
- nodeType = 'opsgptkg_task' #假设一定是task节点
- tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
-
- if nodeId not in [nodeinancestor['nodeId'] for nodeinancestor in tool_ancestor]:
- tool_ancestor = tool_ancestor + [{'nodeId': nodeId, 'nodeType':nodeType}]
-
- #需要将自己也加上,方便在下一步memory检索的时候把本节点的历史也得到,由于在生成str的时候,第一时间就save本届点的memory,所以这样做是可以的
- #需要注意的是,给agent和给主持人看到的输入是不一样的。 主持人看到的是 memory + node_text + currentnodehistory, currentnodehistory 是文本,因为主持人需要维持一个 结构化的输出。
- #agent看到的是 memory,agent只需要说出一句话即可
- get_messages_res_sorted = self.get_memory_from_ancestor(tool_ancestor, sessionId, execute_agent_name) #此时是调用外部agent,所以需要加tag
- assembled_memory = self.assemble_memory_for_reactagent(nodeId, nodeType, get_messages_res_sorted)
- return assembled_memory
-
- def react_get_question_plan(self, sessionId, nodeId, execute_agent_name):
- '''
- 如果react模块 react_flag==waiting_other_agent, 则需要返回 question_plan
- 可能需要区分人来回答还是大模型来回答
- '''
- if '人类' in execute_agent_name: #需要提交给人
- '''
- example: {'toolDescription': '请用户回答',
- 'currentNodeId': 'INT_3',
- 'memory': None,
- 'type': 'userProblem',
- 'questionDescription': {'questionType': 'essayQuestion',
- 'questionContent': {'question': '请输入',
- 'candidate': None }}}
-
- 一定是一个问答题, 无需提问,这里question变成一个固定值了。 最重要的是把memory 也是空, 因为历史信息在对话里已经显示了。
- '''
- toolPlan = [ {'toolDescription': '请用户回答',
- 'currentNodeId': nodeId,
- 'memory': None,
- 'type': 'userProblem',
- 'questionDescription': {'questionType': 'essayQuestion',
- 'questionContent': {'question': '请玩家根据当前情况发言',
- 'candidate': None }}} ]
-
- return toolPlan
- else: #需要执行agent
- '''
- example :[{
- "toolDescription": "toolDescriptionA",
- "currentNodeId": "INT_1",
- "memory": JsonStr,
- "type":"onlyTool",
- }]
- '''
-
-
- assembled_memory = self.get_memory_for_computer_agent(sessionId, nodeId, execute_agent_name)
- react_memory = assembled_memory
- if type(react_memory)!= str:
- react_memory = json.dumps(react_memory, ensure_ascii=False)
-
- # logging.info(f'react_memory is {react_memory}')
- toolPlan = [{
- "toolDescription": execute_agent_name,
- "currentNodeId": nodeId,
- "memory": react_memory,
- "type":"reactExecution",
- }]
- return toolPlan
-
-
-
- def get_tool_ancestor(self, sessionId, start_nodeid = '为什么余额宝没收到收益_complaint', start_nodetype = 'opsgptkg_task'):
-
- #1 对每个nodeid,得到其memory, 首先需要遍历其所有的祖先task节点,将相关信息记录下来
- tool_ancestor = []
- nodeid_in_search = [{'nodeId':start_nodeid, 'nodeType':start_nodetype}]
- nodeid_in_search_all = [{'nodeId':start_nodeid, 'nodeType':start_nodetype}]
-
- while len(nodeid_in_search)!= 0:
- nodedict_now = nodeid_in_search.pop()
- nodeid_now = nodedict_now['nodeId']
- nodetype_now = nodedict_now['nodeType']
-
-
- #查祖先节点 reverse=True
- neighborNodes = self.geabase_handler.get_neighbor_nodes(attributes={"id": nodeid_now,}, node_type=nodetype_now, reverse=True)
- #print(nodeid_now, nodetype_now, neighborNodes, '=========')
-
-
- for i in range(len(neighborNodes) ):
- # if res['resultSet']['rows'][i]['columns'][0] == {}:
- # continue
-
- # else:
- nodeid_new = neighborNodes[i].id
- nodetype_new = neighborNodes[i].type
- if nodeid_new in [kk['nodeId'] for kk in nodeid_in_search]: #已经探索过了,不再探索
- continue
-
- elif nodetype_new == 'opsgptkg_task': #如果是task节点,则加入到tool_plan中,同时继续往前延展。
-
- #查询该任务节点有没有收到过response,直接查询count,不用在意count的个数
- message_res = self.memory_handler.nodecount_get( sessionId, nodeid_new) #查看这个节点的count计数
-
- if len(message_res) == 0: #这个task节点没有memory 或者没有收到response,则不再往前延展,减少geabase查询个数
- print(f'#这个task节点{nodeid_new}没有memory 或者没有收到response,则不再往前延展,减少geabase查询个数')
- continue
- else:
- print('#如果是task节点,则加入到tool_plan中,同时继续往前延展。 get_tool_ancestor')
- tool_ancestor.insert(0, {'nodeId':nodeid_new, 'nodeType':nodetype_new}) # 倒叙插入到图谱中
- if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
- nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
-
-
-
-
- elif nodetype_now != 'opsgptkg_intent' and nodetype_new == 'opsgptkg_intent':
- #第一次出现意图节点,需要尝试
- #print('#第一次出现意图节点,需要尝试')
- tool_ancestor.insert(0, {'nodeId':nodeid_new, 'nodeType':nodetype_new}) # 倒叙插入到图谱中
- if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
- nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- elif nodetype_now == 'opsgptkg_intent' and nodetype_new == 'opsgptkg_intent':
- #从意图节点再次碰到意图节点,终止
- #print('#从意图节点再次碰到意图节点,终止')
- pass
- elif nodetype_new == 'opsgptkg_phenomenon':
- #如果是事实节点,则继续
- tool_ancestor.insert(0, {'nodeId':nodeid_new, 'nodeType':nodetype_new}) # 倒叙插入到图谱中
- if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
- nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
-
- else:##如果是不是task节点,也不是意图节点,不加入到tool_plan中,继续延展
- #print('#如果是不是task节点,也不是意图节点,不加入到tool_plan中,继续延展')
- if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
- nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
-
- tool_ancestor_new = []
- for i in range(len(tool_ancestor)):
- item_i = tool_ancestor[i]
- if item_i not in tool_ancestor_new:
- tool_ancestor_new.append(item_i)
- logging.info(f'geabase_getmemory tool_ancestor_new 的个数为{len(tool_ancestor_new)}')
- logging.info(f'geabase_getmemory tool_ancestor 的个数为{len(tool_ancestor)}')
- #print('tool_ancestor_new', tool_ancestor_new)
- tool_ancestor = tool_ancestor_new
- return tool_ancestor
-
- def get_memory_from_ancestor(self, tool_ancestor, sessionId, role_tags = None):
- '''
- 给定了一堆祖先节点 + 当前节点,从中提取出memory
- 祖先节点
- 对于祖先tool 而言,提取出 nodedescription、tool responds
- 对于祖先react 而言,提取出 nodedescription(name), 每一条message
- 将这个list 按照时间顺序排好
- 当前节点
- 对于tool 而言,没有当前节点的memory
- 对于react节点而言, 有运行到中间状态的memory
- 将这个list 按照时间顺序排好
- 按照固定格式输出 memory_list_output
-
- role_tags 是一系列list, 如果指定为空,则没有约束
-
- '''
- if role_tags == None:
- role_tags = None
- else:
- role_tags = ['all'] + [role_tags]
- # print(role_tags)
- message_res_list = []
- for i in range(len(tool_ancestor)):
- logging.info(f'geabase_getmemory 查询第{i}个祖先节点')
- # logging.info(tool_ancestor[i])
- nodeId = tool_ancestor[i]['nodeId']
- nodeType = tool_ancestor[i]['nodeType']
- logging.info(f'【查询】memory message_index {nodeId}; sessionId {sessionId} ')
- # if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
- #当前节点为tool or react 节点,一次性获得该节点所有的 chapter的memory数据
- if nodeType == 'opsgptkg_task':
- memory_res = self.memory_manager.get_memory_pool_by_all({
- # "message_index": hash_id(nodeId, sessionId), #nodeId.replace(":", "_").replace("-", "_"),
- 'user_name': hash_id(nodeId),
- "chat_index": sessionId,
- "role_tags": role_tags,
- }
- )
- message_res = memory_res.get_messages()
- message_res_list = message_res_list + message_res
-
-
-
- elif nodeType == 'opsgptkg_intent':
- #如果祖先节点是意图节点, 意图节点的memory 暂时不分 tag
- memory_res = self.memory_manager.get_memory_pool_by_all({
- "message_index": hash_id(nodeId, sessionId), #nodeId.replace(":", "_").replace("-", "_"),
- "chat_index": sessionId,
- "role_type": "user"})
-
- message_res = memory_res.get_messages()
- message_res_list = message_res_list + message_res
-
- #根据时间排序
- # message_res_list = message_res_list[::-1] #倒转message, 因为发现tbase存数据是类似堆栈的格式存的。 后来者在上; 似乎不对
- get_messages_res_sorted = self.sort_messages_by_time(message_res_list)
- return get_messages_res_sorted
-
-
- def assemble_memory_for_reactagent(self, nodeId, nodeType, get_messages_res_sorted):
- '''
- 假设 祖先节点已经选择好了,而且 节点中相关的message也选择好了, 也经过时间排序了
- react 节点中 对于一个 agent_x (电脑agent), 组装memory的函数
- '''
-
- if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
- raise ValueError(f'当前应该是 react 节点才对 ')
- else: #react 节点
- memory_list = []
- for i in range(len( get_messages_res_sorted ) ):
- if get_messages_res_sorted[i].role_name in ['firstUserInput', 'function_caller', 'user' ]:
- # # 第一次输入, tool返回, tool描述,
- # memory_list.append({
- # 'role_type': get_messages_res_sorted[i].role_type,
- # 'role_name': get_messages_res_sorted[i].role_name,
- # 'role_content': get_messages_res_sorted[i].role_content}
- # )#此处存疑,需要实验后才知道效果如何,注释后,相当于主持人和agent只能看到tool的标题和执行结果,且以list的形式呈现
- memory_list.append(get_messages_res_sorted[i].role_content)
- elif get_messages_res_sorted[i].role_type in ['react_memory_save']:
- # react 模块各个角色说的话,
- memory_list.append(get_messages_res_sorted[i].role_content)
- return memory_list
-
- def assemble_memory_for_tool(self, nodeId, nodeType, get_messages_res_sorted):
- if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
- '''
- '''
- memory_list = []
- for i in range(len( get_messages_res_sorted ) ):
- if get_messages_res_sorted[i].role_name == 'firstUserInput':
- memory_list.append({
- 'role_type': 'user',
- 'role_name': 'firstUserInput',
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- if get_messages_res_sorted[i].role_name == 'user':
- memory_list.append({
- 'role_type': 'user',
- 'role_name': 'None',
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- if get_messages_res_sorted[i].role_name == 'function_caller':
- memory_list.append({
- 'role_type': 'observation',
- 'role_name': 'function_caller',
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- if get_messages_res_sorted[i].role_type in ['react_memory_save']:
- # react 模块各个角色说的话,
- memory_list.append({
- 'role_type': get_messages_res_sorted[i].role_type,
- 'role_name': get_messages_res_sorted[i].role_name,
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- else: #react 节点
- raise ValueError(f'当前应该是 tool task 节点才对 ')
-
- return memory_list
-
- def assemble_memory(self, nodeId, nodeType, get_messages_res_sorted):
- '''
- 组装memory
- get_messages_res_sorted 已经根据时间排序好了。 但是对于tool 和 react模块的memory拼装做法有所不同
- '''
- if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
- '''
-
- '''
- memory_list = []
- for i in range(len( get_messages_res_sorted ) ):
- if get_messages_res_sorted[i].role_name == 'firstUserInput':
- memory_list.append({
- 'role_type': 'user',
- 'role_name': 'firstUserInput',
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- if get_messages_res_sorted[i].role_name == 'user':
- memory_list.append({
- 'role_type': 'user',
- 'role_name': 'None',
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- if get_messages_res_sorted[i].role_name == 'function_caller':
- memory_list.append({
- 'role_type': 'observation',
- 'role_name': 'function_caller',
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- if get_messages_res_sorted[i].role_type in ['react_memory_save']:
- # react 模块各个角色说的话,
- memory_list.append({
- 'role_type': get_messages_res_sorted[i].role_type,
- 'role_name': get_messages_res_sorted[i].role_name,
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- else: #react 节点
- memory_list = []
- for i in range(len( get_messages_res_sorted ) ):
- if get_messages_res_sorted[i].role_name in ['firstUserInput', 'function_caller', 'user' ]:
- # 第一次输入, tool返回, tool描述,
- memory_list.append({
- 'role_type': get_messages_res_sorted[i].role_type,
- 'role_name': get_messages_res_sorted[i].role_name,
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- elif get_messages_res_sorted[i].role_type in ['react_memory_save']:
- # react 模块各个角色说的话,
- memory_list.append({
- 'role_type': get_messages_res_sorted[i].role_type,
- 'role_name': get_messages_res_sorted[i].role_name,
- 'role_content': get_messages_res_sorted[i].role_content}
- )
- return memory_list
+
def search_node_type(self, nodeid_in_subtree, nodeId ):
'''
#返回nodeid_in_subtree 中 nodeId 的nodeType
@@ -829,6 +114,7 @@ def geabase_judgeNodeReachability(self, sessionId,
# else:
nodeid_new = neighborNodes[i].id
nodetype_new = neighborNodes[i].type
+ logging.info(f'======nodeid_new is {nodeid_new} , nodetype_new is {nodetype_new}=====')
if nodetype_new == 'opsgptkg_task' or nodetype_new == 'opsgptkg_phenomenon': #如果是task 或者 phenomenon节点,则加入到tool_plan中,但是不往后续延展了。
tool_parent.append({'nodeId':nodeid_new, 'nodeType': nodetype_new})
elif nodetype_new == 'opsgptkg_intent':
@@ -837,19 +123,23 @@ def geabase_judgeNodeReachability(self, sessionId,
else:##如果是不是task节点,也不是意图节点,不加入到tool_plan中,往后续延展
nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
-
+ logging.info(f'===tool_parent is {tool_parent} ===')
#tool_parent = list(set(tool_parent))
# 将字典转化为元组,然后放入set中去重
unique_set = set(tuple(d.items()) for d in tool_parent)
# 再将元组转回字典
tool_parent = [{k: v for k, v in t} for t in unique_set]
+ if len(tool_parent) == 0:
+ logging.info(f'======geabase_judgeNodeReachability 没有有效的父节点 则一定为可达=====')
+ return True
node_description = self.gb_handler.geabase_getDescription( rootNodeId = start_nodeid, rootNodeType = start_nodetype)
if gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = start_nodeid, rootNodeType = start_nodetype,key = 'accesscriteria') == 'AND' \
or gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = start_nodeid, rootNodeType = start_nodetype,key = 'accesscriteria') == '{"type":"AND"}' \
or gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = start_nodeid, rootNodeType = start_nodetype,key = 'accesscriteria') == None or \
- gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = start_nodeid, rootNodeType = start_nodetype,key = 'accesscriteria') == '':
+ gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = start_nodeid, rootNodeType = start_nodetype,key = 'accesscriteria') == '' or \
+ gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = start_nodeid, rootNodeType = start_nodetype,key = 'accesscriteria') == '{}':
logging.info('#此节点为and处理逻辑')
#此节点为and处理逻辑
logging.info(f'{start_nodeid}的tool_parent为{tool_parent}')
@@ -974,7 +264,7 @@ def fact_branch_judgment(self, current_task, neighbor_node_id_list, next_node_de
thought(尽量不要超过40个字):
'''
logging.info(prompt_temp)
- response = call_llm(input_content = prompt_temp, llm_model = 'Qwen2_72B_Instruct_OpsGPT',llm_config=self.llm_config)# qwen_chat_14b #Qwen_72B_Chat_vLLM
+ response = call_llm(input_content = prompt_temp, llm_model = 'Qwen2_72B_Instruct_OpsGPT' , llm_config=self.llm_config)# qwen_chat_14b #Qwen_72B_Chat_vLLM
logging.info(f'大模型的结果为:{response}')
#final_choice = extract_final_result(json.loads(response.text)['data'], special_str = "最终结果为:" )
final_choice = extract_final_result(response, special_str = "最终结果为:" )
@@ -1055,7 +345,27 @@ def write_analysis_memory(self, sessionId, neighbor_node_id_list, chosen_nodeid)
-
+ def toolResponseError_check(self, lingsi_response:LingSiResponse)->bool:
+ #判断当前tool 执行是否成功, 主要根据 observation中是否有toolResponse字段来判断
+ print('=======print(lingsi_response)===========')
+ print(lingsi_response.dict())
+ try:
+ lingsi_response.observation = json.loads(lingsi_response.observation)
+ except:
+ pass
+ if lingsi_response.type == None:
+ return False #第一次输入,不涉及tool 的返回
+ elif lingsi_response.observation == None:
+ return True
+ elif 'toolResponse' not in list(lingsi_response.observation.keys()):
+ return True
+ elif 'toolKey' in list(lingsi_response.observation.keys()) and lingsi_response.observation['toolKey'] == 'COMMON_QA_QWEN:MAIN_SITE:HttpClient.execute':
+ #灵思兜底逻辑
+ logging.info(f'当前为灵思兜底逻辑,toolResponseError_check == True')
+ return True
+ else:
+ return False
+ #raise ValueError('toolResponseError_check 出现未知情况')
def geabase_summary_check(self, sessionId, nodeid_in_subtree):
logging.info('geabase_summary_check start 判断当前状态下是不是已经走到了需要进行summary的情况')
@@ -1182,7 +492,8 @@ def geabase_summary_check(self, sessionId, nodeid_in_subtree):
def geabase_nodediffusion_plus(self, sessionId,
- start_nodeid = '为什么余额宝没收到收益_complaint', start_nodetype = 'opsgptkg_intent', agent_respond = None):
+ start_nodeid = 'complaint', start_nodetype = 'opsgptkg_intent',
+ agent_respond = None, lingsi_response = None):
'''
增加了react模块的逻辑
进行单步的向后扩散, 得到下一步应该执行的计划
@@ -1192,6 +503,7 @@ def geabase_nodediffusion_plus(self, sessionId,
logging.info('================geabase_nodediffusion_plus start===================')
logging.info('===================================')
logging.info('===================================')
+ logging.info(f'agent_respond is {agent_respond}; lingsi_response is {lingsi_response}')
# 0 创建 GB_handler的实例
gb_handler = self.gb_handler
self.memory_handler = memory_handler_ekg(self.memory_manager, self.geabase_handler)
@@ -1207,8 +519,8 @@ def geabase_nodediffusion_plus(self, sessionId,
if node_completed_flag == False: #没有执行完,需要继续执行
#
- logging.info(f'react节点{start_nodeid}没有执行完,需要继续执行')
- runningFlag, reactPlan = self.react_running( sessionId, start_nodeid, start_nodetype, agent_respond )
+ logging.info(f'类react节点{start_nodeid}没有执行完,需要继续执行')
+ runningFlag, reactPlan = self.task_node_agent.task_running(sessionId, start_nodeid, start_nodetype, lingsi_response )
#继续执行还是没有执行完
if runningFlag == 'waiting_other_agent':
@@ -1238,6 +550,7 @@ def geabase_nodediffusion_plus(self, sessionId,
nodetype_now = nodedict_now['nodeType']
neighborNodes = self.geabase_handler.get_neighbor_nodes(attributes={"id": nodeid_now,}, node_type=nodetype_now, reverse=False)
+ logging.info(f'neighborNodes is {neighborNodes}')
if self.gb_handler.all_nodetype_check(rootNodeId = nodeid_now, rootNodeType = nodetype_now,
neighborNodeType = 'opsgptkg_phenomenon') == True:
@@ -1287,7 +600,10 @@ def geabase_nodediffusion_plus(self, sessionId,
# logging.info(f'{nodeid_new}#是task 节点中的 react, 节点尝试执行')
#是task 节点中的 react, 节点尝试执行
# logging.info(f'sessionId {sessionId}, nodeid_new {nodeid_new}, nodetype_new {nodetype_new}')
- runningFlag, reactPlan = self.react_running( sessionId, nodeid_new, nodetype_new, None )#这种时候是react节点第一次运行,一定是主持人,一定要看到全局信息
+
+ runningFlag, reactPlan = self.task_node_agent.task_running( sessionId, nodeid_new, nodetype_new, None )
+ logging.info(f'前面节点执行完毕后,在向后探索的过程中,执行了task_running,执行结果为runningFlag is {runningFlag}, reactPlan is {reactPlan}' )
+ #这种时候是react节点第一次运行,一定是主持人,一定要看到全局信息
# logging.info(f'{nodeid_new}#继续执行还是没有执行完, 需要留下 reactPlan,且不往后面扩展')
#继续执行还是没有执行完, 需要留下 reactPlan,且不往后面扩展
@@ -1300,7 +616,11 @@ def geabase_nodediffusion_plus(self, sessionId,
#这个react 执行了一下执行完了. 继续, tool_plan中不标记这个节点,往后探索即可。
nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
-
+ #
+ elif nodetype_new == 'opsgptkg_analysis': #如果是opsgptkg_analysis节点, 将其标注为已到达
+ self.write_analysis_memory( sessionId, [nodeid_new], None)
+
+
else:##如果是不是task节点,不加入到tool_plan中,往后续延展
nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
@@ -1308,19 +628,20 @@ def geabase_nodediffusion_plus(self, sessionId,
# unique_set = set(tuple(sorted(d.items())) for d in tool_plan) #暂时去掉去重,但不知有何问题
# 将去重后的元组转换回字典形式,得到去重后的list
# tool_plan = [dict(t) for t in unique_set] #暂时去掉去重,但不知有何问题
- # logging.info('====tool_plan===在去重之后===')
- # logging.info(f'tool_plan {tool_plan}')
- # logging.info('====tool_plan======')
+ logging.info('====tool_plan===在去重之后===')
+ logging.info(f'tool_plan {tool_plan}')
+ logging.info('====tool_plan======')
#2 判断这些tool的父节点是否都已经有observation, 即判断可达性, 进行筛选
- tool_plan_2 = []
+ tool_plan_reachable = []
for i in range(len(tool_plan)):
nodeId = tool_plan[i]['nodeId']
nodeType = tool_plan[i]['nodeType']
if self.geabase_judgeNodeReachability( sessionId,
nodeId, nodeType) == True:
- tool_plan_2.append({'nodeId':nodeId, 'nodeType':nodeType})
- # logging.info(f'tool_plan_2 经过可达性判断删选后的 {tool_plan_2}')
+ tool_plan_reachable.append( tool_plan[i] )
+ # logging.info(f'tool_plan_reachable 经过可达性判断删选后的 {tool_plan_reachable}')
+
#3 获取每个tool的memory
tool_plan_return = []
@@ -1331,10 +652,12 @@ def geabase_nodediffusion_plus(self, sessionId,
# }
- for i in range(len(tool_plan_2)):
- nodeId = tool_plan[i]['nodeId']
- nodeType = tool_plan[i]['nodeType']
- if 'reactFlag' not in tool_plan[i].keys():
+ for i in range(len(tool_plan_reachable)):
+ nodeId = tool_plan_reachable[i]['nodeId']
+ nodeType = tool_plan_reachable[i]['nodeType']
+
+ if 'reactFlag' not in tool_plan_reachable[i].keys():
+ # 这里表示是tool 执行
# memory = self.geabase_getmemory( sessionId, nodeId, nodeType)
#获取memory, 这个是task tool的memory,
# tool_ancestor = self.get_tool_ancestor( sessionId, nodeId, nodeType)
@@ -1342,7 +665,7 @@ def geabase_nodediffusion_plus(self, sessionId,
# get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #此时是nodeId是一个tool,假设tool一定是主持人的工具,能看到所有tag的memory
# memory = self.assemble_memory( nodeId, nodeType, get_messages_res_sorted)
- memory = self.get_memory_for_tool(sessionId, nodeId)
+ memory = self.task_node_agent.get_memory_for_tool(sessionId, nodeId)
# logging.info(f'{nodeId}的 memory is {memory}')
toolDescription = self.gb_handler.geabase_getDescription( rootNodeId = nodeId, rootNodeType = nodeType)
@@ -1358,12 +681,19 @@ def geabase_nodediffusion_plus(self, sessionId,
)
else:
#对于react 模块的 memory,另有方法提取,在制定plan的时候就提取了。
+ #这里也包括 parallel 和 plan类型节点
+ #
+ for m in range(len(tool_plan_reachable[i]['reactPlan'])) :#tool_plan_reachable[i]['reactPlan'] 本身就是一个list, 必须先取元素,否则格式会出错
+ tool_plan_return.append( tool_plan_reachable[i]['reactPlan'][m] )
- tool_plan_return.append( tool_plan[i]['reactPlan'][0] ) #tool_plan[i]['reactPlan'] 本身就是一个list, 必须先取元素,否则格式会出错
-
-
-
- return tool_plan, tool_plan_return
+ logging.info('===================================')
+ logging.info('===================================')
+ logging.info('================geabase_nodediffusion_plus end===================')
+ logging.info('===================================')
+ logging.info('===================================')
+ logging.info(f'tool_plan_reachable is {tool_plan_reachable}, tool_plan_return is {tool_plan_return}')
+
+ return tool_plan_reachable, tool_plan_return
if __name__ == "__main__":
diff --git a/muagent/service/ekg_reasoning/src/graph_search/graph_search_main.py b/muagent/service/ekg_reasoning/src/graph_search/graph_search_main.py
index 92b9696..2051bcc 100644
--- a/muagent/service/ekg_reasoning/src/graph_search/graph_search_main.py
+++ b/muagent/service/ekg_reasoning/src/graph_search/graph_search_main.py
@@ -44,6 +44,11 @@
from muagent.schemas.db import GBConfig
from muagent.service.ekg_construct import EKGConstructService
from muagent.service.ekg_inference import IntentionRouter
+from muagent.schemas.ekg.ekg_reason import LingSiResponse, ResToLingsi
+if os.environ['operation_mode'] == 'antcode': # 'open_source' or 'antcode'
+ from muagent.service.web_operation.web_act_ant import WebAgent
+
+
#内部其他函数
from src.utils.call_llm import call_llm, extract_final_result
@@ -54,11 +59,13 @@
if os.environ['operation_mode'] == 'antcode': # 'open_source' or 'antcode'
#内部的意图识别接口调用函数
from src.intention_recognition.intention_recognition_tool import intention_recognition_ekgfunc, intention_recognition_querypatternfunc, intention_recognition_querytypefunc
+ from src.generalization_reasoning.generalization_reason import GeneralizationReason
from src.question_answer.qa_function import qa_class
from src.memory_handler.ekg_memory_handler import memory_handler_ekg
from src.graph_search.call_old_fuction import call_old_fuction
+
# 配置logging模块
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s - %(lineno)d', level=logging.INFO)
@@ -69,7 +76,7 @@ class graph_search_process():
'''
图谱推理主流程class
'''
- def __init__(self, geabase_handler, memory_manager, intention_router, scene , sessionId, currentNodeId,
+ def __init__(self, geabase_handler, memory_manager, intention_router, lingsi_response, scene , sessionId, currentNodeId,
observation, userAnswer, inputType, startRootNodeId, intentionRule, intentionData,
startFromRoot = True,
index_name = 'ekg_migration_new', unique_name="EKG",
@@ -77,6 +84,7 @@ def __init__(self, geabase_handler, memory_manager, intention_router, scene , s
):
self.memory_manager = memory_manager #memory_init(index_name = 'ekg_migration', unique_name="EKG")
+ self.lingsi_response = lingsi_response
self.scene = scene
self.sessionId = sessionId
self.currentNodeId = currentNodeId
@@ -111,8 +119,11 @@ def __init__(self, geabase_handler, memory_manager, intention_router, scene , s
#计算起始时间
self.start_datetime = int(time.time()*1000)
-
- def state_judgement(self):
+ # def scene_judgement(self, scene:str)->str:
+ # if scene == 'generalizationReasoning':
+ # return 'generalizationReasoning'
+
+ def state_judgement(self, inputType:str)->str:
'''
根据当前情况,判断当前算法输入所处的状态
{
@@ -123,24 +134,24 @@ def state_judgement(self):
"tool_QUESTION_RETURN_ANSWER" #task阶段,不执行tool,而是向用户问填空题/选择题
}
'''
- if self.inputType == None:
- self.algorithm_State = 'FIRST_INPUT'
+ if inputType == None:
+ #self.algorithm_State = 'FIRST_INPUT'
return 'FIRST_INPUT'
- elif self.inputType == 'intentQuestion':#INTENT_QUESTION
- self.algorithm_State = 'INTENT_QUESTION_RETURN_ANSWER'
+ elif inputType == 'intentQuestion':#INTENT_QUESTION
+ #self.algorithm_State = 'INTENT_QUESTION_RETURN_ANSWER'
return "INTENT_QUESTION_RETURN_ANSWER"
- elif self.inputType == 'onlyTool':
- self.algorithm_State = "TOOL_EXECUTION_RESULT"
+ elif inputType == 'onlyTool':
+ #self.algorithm_State = "TOOL_EXECUTION_RESULT"
return "TOOL_EXECUTION_RESULT"
- elif self.inputType == 'reactExecution':#REACT_EXECUTION_RESULT
- self.algorithm_State = "REACT_EXECUTION_RESULT"
+ elif inputType == 'reactExecution':#REACT_EXECUTION_RESULT
+ #self.algorithm_State = "REACT_EXECUTION_RESULT"
return "REACT_EXECUTION_RESULT"
- elif self.inputType == 'userProblem':
- self.algorithm_State = "TOOL_QUESTION_RETURN_ANSWER"
+ elif inputType == 'userProblem':
+ #self.algorithm_State = "TOOL_QUESTION_RETURN_ANSWER"
return "TOOL_QUESTION_RETURN_ANSWER"
def intentionRecongnition(self):
@@ -214,13 +225,17 @@ def intentionRecongnitionProcess(self):
if os.environ['operation_mode'] == 'antcode':
node_id = '剧本杀/狼人杀'
Designated_intent = [node_id]
+ self.queryPattern = 'executePattern'
+ self.intentionRecognitionSituation = 'success'
+ self.intention_recognition_path = Designated_intent
+ self.currentNodeId = '剧本杀/狼人杀'
else:
node_id = hash_id('剧本杀/狼人杀')
Designated_intent = [node_id]
- self.queryPattern = 'executePattern'
- self.intentionRecognitionSituation = 'success'
- self.intention_recognition_path = Designated_intent
- self.currentNodeId = hash_id('剧本杀/狼人杀')
+ self.queryPattern = 'executePattern'
+ self.intentionRecognitionSituation = 'success'
+ self.intention_recognition_path = Designated_intent
+ self.currentNodeId = hash_id('剧本杀/狼人杀')
return 1 #直接结束
if self.scene == 'UNDERCOVER' and self.algorithm_State == "FIRST_INPUT":
logging.info(f'现在直接到谁是卧底模式')
@@ -228,14 +243,31 @@ def intentionRecongnitionProcess(self):
if os.environ['operation_mode'] == 'antcode':
node_id = '剧本杀/谁是卧底'
Designated_intent = [node_id]
+ logging.info(f'意图识别结果为:{Designated_intent}')
+ self.queryPattern = 'executePattern'
+ self.intentionRecognitionSituation = 'success'
+ self.intention_recognition_path = Designated_intent
+ self.currentNodeId = '剧本杀/谁是卧底'
else:
node_id = hash_id('剧本杀/谁是卧底')
Designated_intent = [node_id]
+ logging.info(f'意图识别结果为:{Designated_intent}')
+ self.queryPattern = 'executePattern'
+ self.intentionRecognitionSituation = 'success'
+ self.intention_recognition_path = Designated_intent
+ self.currentNodeId = hash_id('剧本杀/谁是卧底')
+ return 1 #直接结束
+ if self.lingsi_response.usingRootNode == True and self.algorithm_State == "FIRST_INPUT":
+ logging.info(f'现在直接到fatherIntention {self.startRootNodeId}')
+
+ node_id = self.startRootNodeId
+ Designated_intent = [node_id]
logging.info(f'意图识别结果为:{Designated_intent}')
self.queryPattern = 'executePattern'
self.intentionRecognitionSituation = 'success'
self.intention_recognition_path = Designated_intent
- self.currentNodeId = hash_id('剧本杀/谁是卧底')
+ self.currentNodeId = node_id
+
return 1 #直接结束
@@ -246,8 +278,13 @@ def intentionRecongnitionProcess(self):
#step 2 调用 ekg func 主函数, self.intentionRecognitionSituation , self.intentionRecognitionRes
#step 3 写入memory
#不是第一次输入, 接受的是意图识别问题用户给的答案
+
+ self.queryPattern 有两种 'executePattern' or 'qaPattern'
+ self.queryType 整体计划查询; 'allPlan', 下一步任务查询;'nextStep', 闲聊; 'justChat'
+
+
'''
- #step 1 判断是执行式还是答疑式输入
+ #step 1.1 判断是执行式还是答疑式输入
if self.intentionRule != ['nlp']:
self.queryPattern = 'executePattern'
else:
@@ -261,11 +298,13 @@ def intentionRecongnitionProcess(self):
self.queryPattern = 'executePattern'
else:
self.queryPattern = 'qaPattern'
- if os.environ['operation_mode'] == 'antcode' :
- self.queryType = intention_recognition_querytypefunc( self.intentionData )
- elif os.environ['operation_mode'] == 'open_source' :
- self.queryType = self.intention_router.get_intention_consult_which( self.intentionData )
-
+
+ #step 1.2 判断是 整体计划查询; 'allPlan', 下一步任务查询;'nextStep', 闲聊; 'justChat'
+ if os.environ['operation_mode'] == 'antcode' :
+ self.queryType = intention_recognition_querytypefunc( self.intentionData )
+ elif os.environ['operation_mode'] == 'open_source' :
+ self.queryType = self.intention_router.get_intention_consult_which( self.intentionData )
+
logging.info(f'意图分析的结果为 queryType is {self.queryType}, self.queryPattern is {self.queryPattern}')
@@ -302,6 +341,8 @@ def intentionRecongnitionProcess(self):
#raise ValueError('意图识别noMatch ,退出')
elif intentionRecognitionRes['intentionRecognitionSituation'] == 'toChoose':
self.intentionRecognitionSituation = 'toChoose'
+ #raise ValueError('反问逻辑尚未构建,退出')
+ return 'intention_error'
else:
return 'intention_error'
#raise ValueError('意图识别得到了意料之外的状态值,退出')
@@ -407,6 +448,49 @@ def summary_check(self, nodeid_in_subtree):
return False
return True #所有task节点都有observation,则需要summary
+
+ def initialize_replacements(self, nodeId: str, nodeType: str) -> bool:
+ """
+ 初始化变量,调用self.memory_manager.init_global_msg实现
+ """
+ # nodeId, nodeType = "剧本杀/谁是卧底/智能交互", "opsgptkg_schedule"
+ #cur_node = self.geabase_handler.get_current_node(attributes={"id": nodeId}, node_type=nodeType)
+ #cur_node_envdescription = json.loads(cur_node.attributes['envdescription'])
+
+ try:
+ node_envdescription = self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = nodeType, key = 'envdescription')
+ cur_node_envdescription = json.loads(node_envdescription)
+ except Exception as e:
+ logging.info(f"发生了一个错误:{e}")
+ logging.info(f"变量初始化失败,略过")
+ #logging.info(f"node_envdescription:{node_envdescription}")
+ return False
+
+ #raise ValueError(f"node_envdescription is {node_envdescription}")
+
+
+ # cur_node_envdescription = json.loads('{"witch_poision": "当前女巫的毒药个数为1"}')
+
+ try:
+ node_envdescription = self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = nodeType, key = 'envdescription')
+ cur_node_envdescription = json.loads(node_envdescription)
+ except Exception as e:
+ logging.info(f"发生了一个错误:{e}")
+ logging.info(f"输入不是json格式或者为空,变量初始化失败,略过")
+ logging.info(f"node_envdescription: {node_envdescription}")
+ return False
+
+
+ init_flag = False
+ try:
+ for role_name, role_content in cur_node_envdescription.items():
+ if role_name and role_content:
+ init_flag = self.memory_manager.init_global_msg(self.sessionId, role_name, role_content)
+ except Exception as e:
+ logging.info(f"变量初始化错误!{e}")
+ return init_flag
+
+
def first_user_memory_write(self):
#如果当前是第一次输入,memory如何填写
@@ -428,8 +512,23 @@ def first_user_memory_write(self):
# logging.info(f'type(geabase_search_return_all_node) is {gsran}')
nodeid_in_subtree, _ = self.gb_handler.geabase_search_return_all_nodeandedge( last_intention_nodeid, 'opsgptkg_intent')
- logging.info(f'整条链路上的节点个数是 len(nodeid_in_subtree) is {len(nodeid_in_subtree)}')
nodeid_in_subtree_str = json.dumps(nodeid_in_subtree, ensure_ascii=False)
+ logging.info(f'整条链路上的节点个数是 len(nodeid_in_subtree) is {len(nodeid_in_subtree)}')
+ logging.info(f'整条连路上的节点信息是:{nodeid_in_subtree}')
+
+ #nodeid_in_subtree_list = self.gb_handler.geabase_search_reture_nodeslist( last_intention_nodeid, 'opsgptkg_intent')
+ #nodeid_in_subtree_list_str = json.dumps(nodeid_in_subtree_list, ensure_ascii=False)
+
+ # 获取 `nodeType` 为 `opsgptkg_schedule` 的 `nodeId`
+ try:
+ nodeId = [item['nodeId'] for item in nodeid_in_subtree if item['nodeType'] == 'opsgptkg_schedule'][0]
+ except Exception as e:
+ logging.info("不存在opsgptkg_schedule节点")
+ init_flag = self.initialize_replacements(nodeId, nodeType='opsgptkg_schedule')
+ if init_flag:
+ logging.info('变量初始化完成!')
+ else:
+ logging.info('变量初始化失败!')
# logging.info(f'=============================================')
# logging.info(f'nodeid_in_subtree_str {nodeid_in_subtree_str}')
# logging.info(f'=============================================')
@@ -502,8 +601,10 @@ def memorywrite(self):
def get_summary(self):
#后续待优化,当前只输出所有激活的summary节点
summary_list = []
- nodeid_in_subtree_memory = self.memory_manager.get_memory_pool_by_all({ "chat_index": self.sessionId, "role_type": "nodeid_in_subtree"})
- nodeid_in_subtree = json.loads( nodeid_in_subtree_memory.get_messages()[0].role_content )
+ #nodeid_in_subtree_memory = self.memory_manager.get_memory_pool_by_all({ "chat_index": self.sessionId, "role_type": "nodeid_in_subtree"})
+ #nodeid_in_subtree = json.loads( nodeid_in_subtree_memory.get_messages()[0].role_content )
+
+ nodeid_in_subtree = self.get_nodeid_in_subtree(self.sessionId, self.currentNodeId)
for i in range(len(nodeid_in_subtree)):
if nodeid_in_subtree[i]['nodeType'] == 'opsgptkg_analysis': #从nodeid_in_subtree中找到analysis的节点
nodeId = nodeid_in_subtree[i]['nodeId']
@@ -555,17 +656,35 @@ def get_summary(self):
return summary_str
- def get_nodeid_in_subtree(self):
- logging.info(f'self.sessionId is {self.sessionId}')
- # nodeid_in_subtree_memory= self.memory_manager.get_memory_pool_by_all({ "chat_index": self.sessionId, "role_type": "nodeid_in_subtree"})
- nodeid_in_subtree_memory= self.memory_manager.get_memory_pool_by_all({ "chat_index": self.sessionId, "role_type": "nodeid_in_subtree"})
+ def get_nodeid_in_subtree(self, sessionId, nodeId):
+ #logging.info(f' sessionId is {self.sessionId}, nodeId is {nodeId}')
+ nodeid_in_subtree_memory = self.memory_manager.get_memory_pool_by_all({ "chat_index": self.sessionId, "role_type": "nodeid_in_subtree"})
+ nodeid_in_subtree = json.loads( nodeid_in_subtree_memory.get_messages()[0].role_content )
+ return nodeid_in_subtree
+
- logging.info(f'nodeid_in_subtree_memory is {nodeid_in_subtree_memory}')
+ # if nodeId == None:
+ # logging.info(f' nodeId == None, 为第一次输入,调用 geabase_search_return_all_nodeandedge 取 nodeid_in_subtree的值' )
+ # nodeid_in_subtree, _ = self.gb_handler.geabase_search_return_all_nodeandedge( last_intention_nodeid, 'opsgptkg_intent')
+ # return nodeid_in_subtree
+
+
+ # # nodeid_in_subtree_memory= self.memory_manager.get_memory_pool_by_all({ "chat_index": self.sessionId, "role_type": "nodeid_in_subtree"})
+ # nodeid_in_subtree_memory= self.memory_manager.get_memory_pool_by_all({ "chat_index": sessionId, "role_type": "nodeid_in_subtree"})
- nodeid_in_subtree = json.loads( nodeid_in_subtree_memory.get_messages()[0].role_content )
- # logging.info(f'nodeid_in_subtree is {nodeid_in_subtree}')
- return nodeid_in_subtree, nodeid_in_subtree_memory
+
+ # logging.info(f'nodeid_in_subtree_memory is {nodeid_in_subtree_memory}')
+
+ # nodeid_in_subtree_list = json.loads( nodeid_in_subtree_memory.get_messages()[0].role_content )
+ # if len(nodeid_in_subtree_list) == 0:
+ # return nodeid_in_subtree_list[0]
+ # for nodeid_in_subtree in nodeid_in_subtree_list:
+ # for one_node_info in nodeid_in_subtree:
+ # if one_node_info['nodeId'] == nodeId:
+ # return nodeid_in_subtree
+
+ # raise ValueError('len(nodeid_in_subtree_list)>0 但是当前节点不在nodeid_in_subtree_list 中')
def qaProcess(self, nodeid_in_subtree):
'''
@@ -592,6 +711,53 @@ def qaProcess(self, nodeid_in_subtree):
res = '输入为闲聊,暂不做回复'
return res
+
+ def grProcess(self, scene:str, sessionId:str, currentNodeId:str, algorithm_State:bool,
+ lingsi_response:LingSiResponse,
+ geabase_handler,
+ memory_handler, llm_config)->dict:
+ '''
+ 调用 泛化推理
+ '''
+ logging.info(f'当前scene为{scene}')
+ if scene == 'generalizationReasoning_nonretrieval':
+ generalizaiton_reasoning = GeneralizationReason( sessionId = sessionId,
+ currentNodeId = currentNodeId,
+ memory_handler = memory_handler,
+ geabase_handler = geabase_handler,
+ llm_config=llm_config,
+ retrieval_flag = False)
+ elif scene == 'generalizationReasoning_test':
+ generalizaiton_reasoning = GeneralizationReason( sessionId = sessionId,
+ currentNodeId = currentNodeId,
+ memory_handler = memory_handler,
+ geabase_handler = geabase_handler,
+ llm_config=llm_config,
+ retrieval_flag = True)
+ elif scene == 'generalizationReasoning' or scene =='NEXA':
+ generalizaiton_reasoning = GeneralizationReason( sessionId = sessionId,
+ currentNodeId = currentNodeId,
+ memory_handler = memory_handler,
+ geabase_handler = geabase_handler,
+ llm_config=llm_config,
+ retrieval_flag = True)
+ else:
+ raise ValueError(f'scene if {scene}')
+ if algorithm_State == 'FIRST_INPUT':
+ if type(lingsi_response.observation) == str:
+ lingsi_response.observation = json.loads(lingsi_response.observation)
+ input_str_gr = lingsi_response.observation['content']
+ else:
+ if type(lingsi_response.observation) == str:
+ lingsi_response.observation = json.loads(lingsi_response.observation)
+ input_str_gr = lingsi_response.observation['toolResponse']
+
+ res_to_lingsi = generalizaiton_reasoning.process(
+ sessionId = sessionId,
+ currentNodeId = 'generalization_reason',
+ input_str = input_str_gr)
+ return res_to_lingsi
+
def outputFuc(self):
try:
tool_plan = self.tool_plan
@@ -608,7 +774,7 @@ def outputFuc(self):
if self.gb_handler.get_extra_tag(rootNodeId = self.currentNodeId, rootNodeType = currentNodeType, key = 'dodisplay') == 'True' \
or self.gb_handler.get_extra_tag(rootNodeId = self.currentNodeId, rootNodeType = currentNodeType, key = 'dodisplay') == 'Ture' \
or self.gb_handler.get_tag(rootNodeId = self.currentNodeId, rootNodeType = currentNodeType, key = 'dodisplay') == 'True' :
- outputinfo_str = self.memory_handler.get_output(self.sessionId, self.start_datetime, self.end_datetime)
+ outputinfo_str = self.memory_handler.get_output(self.sessionId, self.start_datetime, self.end_datetime, self.lingsi_response.agentName)
else:
dodisplaystr = self.gb_handler.get_tag(rootNodeId = self.currentNodeId, rootNodeType = currentNodeType, key = 'dodisplay')
logging.info(f" 查询dodisplay字段结果为空, 或者为{dodisplaystr},本次不对外输出")
@@ -696,6 +862,16 @@ def outputFuc(self):
"toolPlan": None ,
"userInteraction": outputinfo_str,
}
+ elif self.toolResponseError_flag == True:
+ logging.info("出现了tool 执行错误,终止, 当前能返回什么summary就返回什么summary")
+ res_to_lingsi = {
+ 'intentionRecognitionSituation': self.intentionRecognitionSituation,
+ "sessionId": self.sessionId,
+ "type": "summary",
+ "summary" : self.get_summary(),
+ "toolPlan": None ,
+ "userInteraction": outputinfo_str,
+ }
else:
raise ValueError('图谱扩散得到了预料之外的情况,退出')
@@ -705,26 +881,115 @@ def outputFuc(self):
def process(self):
#step1 根据当前情况,判断当前算法输入所处的状态
logging.info(f'#step1 根据当前情况,判断当前算法输入所处的状态 self.inputType is {self.inputType}')
- self.state_judgement()
+ self.algorithm_State = self.state_judgement(self.inputType)
logging.info(f'#step1 over,当前算法状态为{self.algorithm_State}')
+ #step1.1 其他情况, 泛化推理 或者 WebAgent
+ html = None
+ task_query = None
+
+ gr_flag = self.memory_handler.message_get(
+ sessionId = self.sessionId,
+ nodeId = 'gr',
+ hashpostfix='gr',
+ role_name='gr',
+ role_type='gr')
+ logging.info(f'gr_flag is {gr_flag}')
+
+ if self.scene == 'WebAgent':
+ logging.info(f'当前scene为{self.scene}')
+
+ if type(self.lingsi_response.observation) == str:
+ self.lingsi_response.observation = json.loads(self.lingsi_response.observation)
+
+ if self.algorithm_State == 'FIRST_INPUT':
+ task_query = self.lingsi_response.observation['content']
+ html = self.lingsi_response.observation['toolResponse']
+
+ else:
+ html = self.lingsi_response.observation['toolResponse']
+ #html = html[0:50000]
+
+ agent = WebAgent(memory_manager=self.memory_manager, llm_model_name = 'Qwen2_72B_Instruct_OpsGPT')
+ logging.info(f'self.sessionId is {self.sessionId}')
+ res_to_lingsi = agent.web_action(chat_index=self.sessionId ,
+ html = html, task_query = task_query)
+
+ return res_to_lingsi
+ # example :[{
+ # "toolDescription": "toolDescriptionA",
+ # "currentNodeId": "INT_1",
+ # "memory": JsonStr,
+ # "type":"onlyTool",
+ # }]
+
+
+
+ elif self.scene == 'generalizationReasoning_test' or \
+ self.scene == 'generalizationReasoning_nonretrieval' or \
+ self.scene == 'generalizationReasoning' or \
+ gr_flag == 'gr':
+ #print(f'scene is {self.scene}')
+ #根据scene 直接触发 grProcess
+
+ self.memory_handler.message_save(
+ sessionId = self.sessionId,
+ nodeId = 'gr',
+ role_content='gr',
+ hashpostfix='gr',
+ user_name='gr',
+ role_name='gr',
+ role_type='gr')
+
+ res_to_lingsi = self.grProcess(scene = self.scene, sessionId = self.sessionId,
+ currentNodeId = self.currentNodeId, algorithm_State = self.algorithm_State,
+ lingsi_response = self.lingsi_response,
+ geabase_handler = self.geabase_handler,
+ memory_handler = self.memory_handler,
+ llm_config = self.llm_config)
+
+ return res_to_lingsi
+
+
+
#step2 意图识别
logging.info('#step2 意图识别')
intention_error_flag = self.intentionRecongnitionProcess()
if intention_error_flag == 'intention_error':
-
-
- logging.info("所有分支到达终点,summary")
- res_to_lingsi = {
- 'intentionRecognitionSituation': self.intentionRecognitionSituation,
- "sessionId": self.sessionId,
- "type": "summary",
- "summary" : '意图识别未检验到相关数据,终止',
- "toolPlan": None ,
- "userInteraction": None,
- }
- return res_to_lingsi
+ # 意图识别查询失败
+
+ if self.queryType != 'justChat' and os.environ['operation_mode'] == 'antcode':
+ logging.info(f'意图识别查询失败, 表示没有数据,同时现在不是闲聊 需要泛化推理')
+
+ self.memory_handler.message_save(
+ sessionId = self.sessionId,
+ nodeId = 'gr',
+ role_content='gr',
+ hashpostfix='gr',
+ user_name='gr',
+ role_name='gr',
+ role_type='gr')
+
+ res_to_lingsi = self.grProcess(scene = self.scene, sessionId = self.sessionId,
+ currentNodeId = self.currentNodeId, algorithm_State = self.algorithm_State,
+ lingsi_response= self.lingsi_response,
+ geabase_handler = self.geabase_handler,
+ memory_handler = self.memory_handler, llm_config = self.llm_config)
+ return res_to_lingsi
+ else:
+ logging.info('意图识别查询失败, 表示没有数据,且现在是闲聊。直接终止')
+ res_to_lingsi = {
+ 'intentionRecognitionSituation': self.intentionRecognitionSituation,
+ "sessionId": self.sessionId,
+ "type": "summary",
+ "summary" : '意图识别未检验到相关数据,且提问和已沉淀知识无关,终止',
+ "toolPlan": None ,
+ "userInteraction": None,
+ }
+ return res_to_lingsi
+
+
logging.info(f'#step2 意图识别 over,')
#step3 memory 写入
@@ -734,7 +999,7 @@ def process(self):
#step4 #get_nodeid_in_subtree
logging.info('#step4 get_nodeid_in_subtree')
- nodeid_in_subtree, nodeid_in_subtree_memory = self.get_nodeid_in_subtree()
+ nodeid_in_subtree = self.get_nodeid_in_subtree(self.sessionId, self.currentNodeId)
self.nodeid_in_subtree = nodeid_in_subtree
logging.info('#step4 get_nodeid_in_subtree')
@@ -742,6 +1007,11 @@ def process(self):
logging.info(f'step5 #summary_flag 判断')
self.summary_flag = self.gst.geabase_summary_check(self.sessionId, nodeid_in_subtree)
logging.info(f'step5 over, summary_flag is {self.summary_flag}')
+
+ #step5.1 #toolResponseError_flag 判断
+ logging.info(f'step5 #toolResponseError_flag 判断')
+ self.toolResponseError_flag = self.gst.toolResponseError_check(self.lingsi_response)
+ logging.info(f'step5 over, toolResponseError_flag is {self.summary_flag}')
#step 6 self.currentNodeId 更新 得到 start_nodetype
logging.info(f'step 6 self.currentNodeId 更新 得到 start_nodetype')
@@ -783,7 +1053,7 @@ def process(self):
logging.info(f'图谱扩散的输入 self.sessionId {self.sessionId}; currentNodeId {currentNodeId}; start_nodetype {start_nodetype}')
tool_plan, tool_plan_3 = self.gst.geabase_nodediffusion_plus(self.sessionId,
-currentNodeId, start_nodetype, agent_respond )
+currentNodeId, start_nodetype, agent_respond , self.lingsi_response)
self.tool_plan = tool_plan
self.tool_plan_3 = tool_plan_3
logging.info(f'step 8 图谱扩散 over')
@@ -813,12 +1083,24 @@ def main(params_string, memory_manager, geabase_handler, intention_router = No
params = params_string
if type(params) == str:
params = json.loads(params)
+ logging.info(f'=======开始新一轮输入=============')
+ logging.info(f'=======开始新一轮输入=============')
+ logging.info(f'=======开始新一轮输入=============')
+ logging.info(f'=======开始新一轮输入=============')
+ logging.info(f'=======开始新一轮输入=============')
logging.info(f'params={params}')
+ logging.info(f'llm_config={llm_config}')
+ lingsi_response = LingSiResponse(**params)
+ lingsi_response = lingsi_response_process(lingsi_response) # process,currentnodeid 和 agentname都放到currentnodeid里,需要分割开来
+ logging.info(f'lingsi_response is {lingsi_response}')
+ #params = lingsi_response.dict()
+
scene = params.get('scene', None)
sessionId = params.get('sessionId', None) #
- currentNodeId = params.get('currentNodeId', None) #
+ #currentNodeId = params.get('currentNodeId', None) #
+ currentNodeId = lingsi_response.currentNodeId
observation = params.get('observation', None) #
userAnswer = params.get('userAnswer', None) #
inputType = params.get('type', None) #
@@ -827,9 +1109,13 @@ def main(params_string, memory_manager, geabase_handler, intention_router = No
intentionData = params.get('intentionData', None) #
startFromRoot = params.get('startFromRoot', True) #
+
+
- if scene in ['WEREWOLF' , 'NEXA', 'UNDERCOVER']:
+
+ if scene in ['WEREWOLF' , 'NEXA', 'UNDERCOVER',
+ 'generalizationReasoning_nonretrieval', 'generalizationReasoning', 'WebAgent']:
#标注这个sessionId属于新逻辑
message = Message(
@@ -901,10 +1187,11 @@ def main(params_string, memory_manager, geabase_handler, intention_router = No
else:
logging.info(f'当前不为graphStructureSearch模式, 正常EKG')
- state, last_res_to_lingsi = abnormal_and_retry(inputType, observation, sessionId, memory_manager)
- if state == 'retry_now':
- logging.info('现在进行重试,返回上一次在memory存储的结果')
- return last_res_to_lingsi
+ # #重试逻辑,如果返回值 observation中没有toolresponse, 则进行重试
+ # state, last_res_to_lingsi = abnormal_and_retry(inputType, observation, sessionId, memory_manager)
+ # if state == 'retry_now':
+ # logging.info('现在进行重试,返回上一次在memory存储的结果')
+ # return last_res_to_lingsi
@@ -912,6 +1199,7 @@ def main(params_string, memory_manager, geabase_handler, intention_router = No
geabase_handler = geabase_handler,
memory_manager=memory_manager,
intention_router = intention_router,
+ lingsi_response = lingsi_response,
scene= scene,
sessionId=sessionId, currentNodeId = currentNodeId,
observation = observation, userAnswer = userAnswer, inputType = inputType,
@@ -928,6 +1216,28 @@ def main(params_string, memory_manager, geabase_handler, intention_router = No
return res_to_lingsi
+
+
+def lingsi_response_process(lingsi_response:LingSiResponse)->LingSiResponse:
+ '''
+ # currentnodeid 和 agentname都放到currentnodeid里,需要分割开来
+ # 是在agentname字段未上线的临时方案
+ # 后续可以去除
+ '''
+
+ currentNodeId_add_agentName = lingsi_response.currentNodeId
+ if currentNodeId_add_agentName == None:
+ #不做任何处理
+ return lingsi_response
+ elif '%%@@#' not in currentNodeId_add_agentName:
+ #lingsi_response.currentNodeId = lingsi_response.currentNodeId
+ #lingsi_response.agentName = None
+ return lingsi_response
+ else:
+ currentNodeId, agentName = currentNodeId_add_agentName.split('%%@@#')
+ lingsi_response.currentNodeId = currentNodeId
+ lingsi_response.agentName = agentName
+ return lingsi_response
def save_res_to_memory(res_to_lingsi, sessionId, memory_manager):
'''
diff --git a/muagent/service/ekg_reasoning/src/graph_search/task_node_agent.py b/muagent/service/ekg_reasoning/src/graph_search/task_node_agent.py
new file mode 100644
index 0000000..4578604
--- /dev/null
+++ b/muagent/service/ekg_reasoning/src/graph_search/task_node_agent.py
@@ -0,0 +1,1487 @@
+# -*- coding: utf-8 -*-
+#此代码为在aistudio上运行的代码
+
+#路径增加
+import sys
+import os
+import re
+from typing import List, Dict, Optional, Tuple, Literal,Union
+src_dir = os.path.join(
+ os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+)
+# print(src_dir)
+sys.path.append(src_dir)
+
+
+src_dir = os.path.join(
+ os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
+)
+sys.path.append(src_dir)
+# print(src_dir)
+
+
+
+
+
+
+
+#一般依赖包
+import json
+import requests
+import time
+import logging
+import copy
+import sys
+import os, base64
+from loguru import logger
+import uuid
+
+
+
+#muagent 依赖包
+from muagent.connector.schema import Message
+from muagent.schemas.db import TBConfig
+from muagent.db_handler import *
+from muagent.connector.memory_manager import TbaseMemoryManager
+from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
+from muagent.schemas.db import GBConfig
+from muagent.service.ekg_construct import EKGConstructService
+from muagent.service.ekg_inference import IntentionRouter
+from muagent.schemas.ekg.ekg_reason import LingSiResponse, ToolPlanOneStep, PlanningRunningAgentReply, ActionOneStep, ActionPlan
+
+from src.graph_search.task_node_prompt import REACT_RUNNING_PROMPT, PLANNING_RUNNING_PROMPT, \
+ PLANNING_RUNNING_PROMPT_SUFFIX_1,PLANNING_RUNNING_PROMPT_SUFFIX_2, PLANNING_RUNNING_AGENT_REPLY_TEMPLATE, \
+ PLANNING_RUNNING_PROMPT_DM_SPEECH,PARALLEL_RUNNING_PROMPT
+# from loguru import logger as logging
+from src.utils.call_llm import call_llm, extract_final_result , robust_call_llm
+from src.geabase_handler.geabase_handlerplus import GB_handler
+from src.utils.normalize import hash_id
+from src.memory_handler.ekg_memory_handler import memory_handler_ekg
+
+
+
+
+
+def is_valid_json(string):
+ try:
+ json.loads(string)
+ return True
+ except ValueError:
+ return False
+
+def remove_duplicate_keys(data):
+ if not isinstance(data, dict):
+ return data
+
+ result = {}
+ for key, value in data.items():
+ if key not in result:
+ result[key] = remove_duplicate_keys(value)
+ else:
+ result[key] = value
+
+ return result
+
+def robust_json_loads(llm_result):
+ '''
+ 将llm的输出转换为json, 有一定的概率这个json 在格式上是不完备的。不然少了 } 或者]
+ '''
+
+ try:
+ llm_result_json = json.loads(llm_result)
+ except:
+ # for _ in range(2):
+ try:
+ logging.info('大模型的输出转换json报错, 现在再用大模型修正一遍')
+ input_query = f'''
+ ##输入##
+ {llm_result}
+ ##任务##
+ 上述**输入**可能是一个不完整的dict,如果是这种情况,请将上述转换**输入**为完整的 dict。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
+ 上述**输入**也可能是包含多个dict,如果是这种情况,只保留第一个dict即可。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
+ ##直接输出结果##
+ ''' + '以{开头,任何其他内容都是不允许的!'
+ ouput_result = call_llm(input_query )
+ logging.info(f'修正后的输出为{ouput_result}')
+ llm_result_json = json.loads(ouput_result)
+
+
+ except:
+ logging.info('大模型的输出转换json报错, 现在再用大模型修正一遍')
+ input_query = f'''
+ ##输入##
+ {llm_result}
+ ##任务##
+ 上述**输入**可能是一个不完整的dict,如果是这种情况,请将上述转换**输入**为完整的 dict。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
+ 上述**输入**也可能是包含多个dict,如果是这种情况,只保留第一个dict即可。 不要新增任何内容,只将格式补全/修正为一个完整dict格式即可
+ ##直接输出结果##
+ ''' + '以{开头,任何其他内容都是不允许的!'
+ ouput_result = call_llm(input_query, temperature = 0.1)
+ logging.info(f'修正后的输出为{ouput_result}')
+ llm_result_json = json.loads(ouput_result)
+ if type(llm_result_json)!=dict:
+ llm_result_json = llm_result_json[0]
+ logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
+ raise ValueError(f'llm的输出应该是一个dict才对 ,经过修正仍然报错')
+ return llm_result_json
+
+def agent_respond_extract_output(input_str):
+ if input_str == None:
+ return None
+ if 'output' not in input_str:
+ return input_str
+ input_str = input_str.split('output')[-1] #取output后面的值
+ input_str = input_str.replace('"', '').replace(':', '').replace('}', '').replace('{', '') #去除掉可能的符号
+ return input_str
+
+
+
+class TaskNodeAgent():
+ '''
+ 在图谱推理过程中task node 的运行agent
+ 需要同时 使用memory and geabase
+ '''
+ def __init__(self, geabase_handler, memory_manager, llm_config=None):
+ self.geabase_handler = geabase_handler
+ self.memory_manager = memory_manager
+ self.gb_handler = GB_handler(self.geabase_handler) #gb_handler 以 geabase_handler 为基础,封装了一些处理逻辑
+ self.memory_handler = memory_handler_ekg(memory_manager, geabase_handler)
+ self.llm_config = llm_config
+
+
+
+
+ def robust_call_llm_with_llmname(self, input_query, rootNodeId, stop = [], temperature = 0, presence_penalty=0):
+
+ #logging.info('using a gpt_4')
+ res = call_llm(input_content = input_query,
+ llm_model = None ,
+ stop = stop,temperature=temperature, presence_penalty=presence_penalty,
+ llm_config=self.llm_config)
+ return res
+
+
+ # model_name = self.gb_handler.get_extra_tag( rootNodeId = rootNodeId, rootNodeType = 'opsgptkg_task', key = 'model_name')
+ # logging.info(f'model_name is {model_name}')
+ # if model_name == None:
+ # model_name = 'gpt_4'
+ # if model_name == 'gpt_4':
+ # try:
+ # logging.info('using a gpt_4')
+ # res = call_llm(input_content = input_query, llm_model = 'gpt_4', stop = stop,temperature=temperature, presence_penalty=presence_penalty)
+ # except:
+ # logging.info('using Qwen2_72B_Instruct_OpsGPT')
+ # res = call_llm(input_content = input_query, llm_model = 'Qwen2_72B_Instruct_OpsGPT',stop = stop, temperature=temperature,presence_penalty=presence_penalty)
+ # return res
+ # else:
+ # try:
+ # logging.info('using Qwen2_72B_Instruct_OpsGPT')
+ # res = call_llm(input_content = input_query, llm_model = 'Qwen2_72B_Instruct_OpsGPT',stop = stop, temperature=temperature,presence_penalty=presence_penalty)
+ # except:
+ # logging.info('using a gpt_4')
+ # res = call_llm(input_content = input_query, llm_model = 'gpt_4', stop = stop,temperature=temperature, presence_penalty=presence_penalty)
+ # return res
+
+ def stop_at_observation(self, historytext, llm_text):
+
+
+ # 检查llm_text是否完全不包含"observation"
+ if "observation" not in llm_text:
+ return llm_text
+
+ # 检查historytext中是否有"observation"
+ if "observation" not in historytext:
+ return llm_text.split("observation", 1)[0]
+
+ # 统计historytext中的"observation"数量
+ n_hist = historytext.count("observation")
+
+ # 统计llm_text中的"observation"数量
+ n_llm = llm_text.count("observation")
+
+ # 如果两者数量相等,返回第n个observation之后的文本
+ if n_hist == n_llm:
+ parts = llm_text.rsplit("observation", n_hist)
+ return parts[-1] # 返回最后一个部分(即最后一个"observation"之后的文本)
+
+ # 如果上述条件都不满足,找到第n个和第n+1个"observation"之间的文本
+ else:
+ parts = llm_text.split("observation", n_hist + 1) # 分割出n+1个部分
+ if len(parts) > n_hist + 1: # 确保有足够多的部分来获取所需范围
+ return parts[n_hist] # 返回第n个和第n+1个observation之间的文本
+ else:
+ return "" # 如果没有找到合适的范围,则返回空字符串或其他适当处理
+
+
+
+ def sort_messages_by_time(self, messages):
+ # 使用 sorted() 对列表进行排序,key 参数指定排序依据
+ return sorted(messages, key=lambda msg: msg.start_datetime)
+
+
+
+ def endcheck(self, nodeId, nodeType, oneNodeName='None', oneNodeDescription='None', current_node_history_json='None'):
+ '''
+ 借助gpt4 来帮忙判断本阶段任务是否结束
+ '''
+ logging.info(f'endcheck nodeId is {nodeId} nodeType is {nodeType}')
+ oneNode = self.geabase_handler.get_current_node(attributes={"id": nodeId,},
+ node_type=nodeType)
+ extra = oneNode.attributes['extra']
+ print(extra)
+ try:
+
+ extra_json = json.loads(extra)
+ if extra_json['endcheck'] == 'True':
+ endcheck_flag = True
+ else:
+ endcheck_flag = False
+ except:
+ endcheck_flag= False
+
+ if endcheck_flag == False:
+ return True #endcheck 通过
+
+ else:
+ endcheck_llm_input = oneNodeName + '\n' +oneNodeDescription+ '\n##已有步骤##\n' + json.dumps(current_node_history_json,ensure_ascii=False) + \
+ '\n##请结合已有步骤,判断本阶段任务是否结束,只返回中文的 是 或者 否即可,不要输出其他内容:##\n'
+
+ logging.info('=============endcheck_llm_result==================')
+ logging.info(endcheck_llm_input)
+ llm_result = self.robust_call_llm_with_llmname(endcheck_llm_input, nodeId)
+ logging.info('=============endcheck_llm_result==================')
+ logging.info(llm_result)
+
+ if '是' in llm_result:
+ return False
+ else:
+ return True
+
+
+ def naive_agent_input_prompt(self, histroy:str, node_name:str, node_decription:str, running_type_prompt:str,
+ suffix_1:str = '', suffix_2:str = '') -> str:
+ llm_input = histroy + '\n' + node_name + '\n' + node_decription + '\n' + running_type_prompt\
+ + suffix_1 + suffix_2
+ return llm_input
+
+
+ def task_running(self, sessionId : str, nodeId: str, nodeType:str, lingsi_respond: LingSiResponse):
+ '''
+ 通用的节点task_running 模块, 根据情况,当前分为 react, parallel,plan
+ '''
+
+ if self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = nodeType, key = 'action') == 'react':
+ logging.info(f'======对于节点{nodeId},当前节点为 react 模式======')
+ return self.react_running( sessionId ,nodeId , nodeType , lingsi_respond )
+ elif self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = nodeType, key = 'action') == 'parallel':
+ logging.info(f'======对于节点{nodeId}, 当前节点为 parallel 模式======')
+ return self.parallel_running( sessionId ,nodeId , nodeType , lingsi_respond )
+ elif self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = nodeType, key = 'action') == 'plan':
+ logging.info(f'======对于节点{nodeId}, 当前节点为 plan 模式======')
+ return self.planning_running( sessionId ,nodeId , nodeType , lingsi_respond )
+ else:
+ action = self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = nodeType, key = 'action')
+ logging.info(f'action当前字段为{action}')
+ raise ValueError('action 字段格式不合法,当前仅支持 react parallel plan 三种模式')
+
+
+ def judge_running_state(self, sessionId: str, nodeId: str)-> bool :
+ '''
+ 判断当前节点是否是第一次运行
+ '''
+ get_messages_res = self.memory_handler.nodecount_get( sessionId, nodeId)
+
+ if get_messages_res == [] :
+ logging.info('当前这个{sessionId} react节点 是第一次运行')
+ first_run_flag = True
+ else:
+ if json.loads(get_messages_res[0].role_content)['nodestage'] == 'end' :#在上一轮已经结束了,这一轮还未开始
+ logging.info('当前这个{sessionId} react节点在上一轮已经结束了,这一轮还未开始,在这一轮也算是第一次执行')
+ first_run_flag = True
+ else:
+ logging.info('当前这个{sessionId} react节点 不是第一次执行')
+ first_run_flag = False
+ return first_run_flag
+
+
+ def parallel_running(self, sessionId : str, nodeId: str, nodeType:str, lingsi_respond: LingSiResponse):
+ '''
+ parallel 模块
+
+ '''
+ #step0 根据返回值,得到实际agent执行内容
+
+ logging.info(f'react_running 接受到的 lingsi_respond is {lingsi_respond}')
+ try:
+ if lingsi_respond == None:#第一次该该节点被调用是,输入式将lingsi_respond设置为None
+ agent_respond = None
+ else:
+ if type(lingsi_respond.observation) ==str:
+ agent_respond = json.loads(lingsi_respond.observation)['toolResponse']
+ else:
+ agent_respond = lingsi_respond.observation['toolResponse']
+ agent_respond = agent_respond_extract_output(agent_respond)
+ except Exception as e:
+ logging.info(f'lingsi_respond is {lingsi_respond}' )
+ raise ValueError(f'从lingsi_respond中提取 agent_respond报错, 报错信息:{e}')
+
+
+
+ #stpe1 判断当前状态, 看是否是第一次运行, 如果是,则需要将节点初始化
+ first_run_flag = self.judge_running_state( sessionId,nodeId)
+ if first_run_flag == True:
+ # 当react的状态是 end 或者为空的时候调用此函数,进行初始化 或者 chapter + 1
+ self.memory_handler.init_react_count(sessionId, nodeId)
+
+
+ #step2 获取节点名字 + 节点描述 + # 变量替换
+ oneNode = self.geabase_handler.get_current_node(attributes={"id": nodeId,},
+ node_type=nodeType)
+ oneNodeName = oneNode.attributes['name']
+ oneNodeDescription = oneNode.attributes['description']
+ oneNodeDescription = self.fill_replacements(oneNodeDescription, sessionId)
+
+
+
+
+ #step3.1 获取memory, 构成给大模型的输入
+ #获取memory, 主持人能看到的memory, 和获取tool的memory类似
+ assembled_memory = self.get_memory_for_dm(sessionId, nodeId)
+ assembled_memory = json.dumps(assembled_memory, ensure_ascii=False)
+ logging.info(f'assembled_memory is {assembled_memory}')
+
+ #step3.2 获取当前节点的历史运行情况。如果是第一次运行,需要将react node 的name 存入到 memory中
+ #如果不是第一次运行,肯定有历史history 和 action plan
+ if first_run_flag == True:
+ current_node_history = ''
+ current_node_history_json = []
+ #第一次运行,对于react模块,只将标题放置在memory里, 因为对于react模块,description太多了,循环的情况下,很罗嗦且超过上下文
+ self.memory_handler.react_nodedescription_save(sessionId, nodeId, oneNodeName)
+ else:
+ #不是第一次运行。那么肯定历史history进来
+ logging.info(f'#不是第一次运行。那么肯定存储了plan')
+ current_node_history = self.memory_handler.react_current_history_get(sessionId, nodeId)
+ current_node_history_json = json.loads(current_node_history)
+
+ action_plan = json.loads( self.memory_handler.current_plan_get(sessionId, nodeId ) )
+ #action_plan = json.loads(json.dumps(action_plan, ensure_ascii=False)) #去除乱码
+ logging.info(f'对于parallel, 之前存储的action_plan is {action_plan} ,type(action_plan) is {type(action_plan) } ')
+ action_plan =ActionPlan(**action_plan)
+
+
+ #step4 执行 llm(如果是第一次调用),生成action_plan
+ #如不是第一次调用,将已运行的agent_name 存下来
+ if first_run_flag == True:
+
+ llm_input = self.naive_agent_input_prompt(assembled_memory, oneNodeName, oneNodeDescription ,PARALLEL_RUNNING_PROMPT
+ , PLANNING_RUNNING_PROMPT_SUFFIX_1 , PLANNING_RUNNING_PROMPT_SUFFIX_2)
+
+ logging.info('=============llm_input==================')
+ logging.info(llm_input)
+
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+
+ logging.info('=============llm_result==================')
+ logging.info(llm_result)
+ llm_result_json = robust_json_loads(llm_result)
+ llm_result_PRAR = PlanningRunningAgentReply(**llm_result_json)
+
+ current_node_history_json.append(llm_result_json) #将当前记录放到current_node_history_json里
+
+ logging.info('planning_running 第一次大模型调用结束')
+
+
+
+ #此时的action_plan 是一个string
+ #action_plan = llm_result_json["action_plan"]
+ # 将 sss 中的字典转换为 ActionOneStep 对象
+ #action_steps = [ActionOneStep(**step) for step in sss]
+
+ # 创建 ActionPlan 对象并设置 data 属性
+ action_plan = llm_result_PRAR.action_plan
+
+
+
+ else:
+ logging.info('不是第一次调用,无需执行大模型, 将已运行的agent_name 存下来')
+ self.memory_handler.processed_agentname_save(sessionId, nodeId, lingsi_respond.agentName )
+
+ #if lingsi_respond.agentName == '主持人'
+
+
+
+ #step5 分析 llm_result 执行结果,如果第一次运行,则提取execute_agent_names from action_plan
+ #如果不是第一次运行,则判断 remain_action_plan,看是否节点任务结束
+
+ if first_run_flag == True and len(action_plan.data) >0 :
+ react_flag = 'waiting_other_agent'
+
+ logging.info(f'当前为{react_flag}, 因为初次运行且plan不为空')
+ execute_agent_names = [action_plan.data[i].agent_name for i in range(len(action_plan.data))]
+ #将该节点的count 设置为 runninng
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'running')
+
+
+ else:
+ processed_agentname = self.memory_handler.processed_agentname_get(sessionId, nodeId)#list
+ logging.info(f'已经处理的agent_name 为 {processed_agentname}')
+ if processed_agentname == []:
+ raise ValueError(f'此时不是第一次运行,在memory中应该已经有processed_agentname才对,但是processed_agentname is {processed_agentname} ')
+
+ remain_action_plan = []
+ for i in range(len(action_plan.data)):
+ if action_plan.data[i].agent_name not in processed_agentname:
+ remain_action_plan.append(action_plan.data[i])
+
+ logging.info(f'剩余待处理的的agent_name 为 {remain_action_plan}')
+ if len(remain_action_plan) == 0:
+ execute_agent_names = []
+ react_flag = 'end'
+ logging.info(f'当前为{react_flag}, 将本次节点的count设置为end, 因为remain_action_plan 为空 ')
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'end')
+ else:
+ react_flag = 'waiting_other_agent'
+ logging.info(f'当前为{react_flag}, 因为第一次大模型生成的plan还没有执行成完,还需等待其他步骤执行完毕 ')
+ execute_agent_names = [] #对于parallel 模式,中途的结果不给下一步指示,因为tool_plan在第一次就并行执行了
+
+ #将该节点的count 设置为 runninng
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'running')
+
+
+ #step6 分析 llm_result 执行结果, 并保存
+
+ if first_run_flag == True:
+ action_plan_json = llm_result_json["action_plan"]
+ # 将 sss 中的字典转换为 ActionOneStep 对象
+ action_steps = [ActionOneStep(**step) for step in action_plan_json]
+
+ # 创建 ActionPlan 对象并设置 data 属性
+ action_plan = ActionPlan(data=action_steps)
+ action_plan_json_str = json.dumps(action_plan.dict() ,ensure_ascii=False)
+
+ logging.info(f'将要存储action_plan, action_plan_json_str is {action_plan_json_str}')
+ #存plan
+ self.memory_handler.current_plan_save(sessionId, nodeId, json.dumps(action_plan.dict() ,ensure_ascii=False) )
+ #存对话
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False) )
+
+ else:
+ #存agent返回结果的对话
+ #agent_respond_template = self.plan_running_agent_responds_build()
+
+
+ agent_respond_template = copy.deepcopy(PLANNING_RUNNING_AGENT_REPLY_TEMPLATE)
+
+ agent_respond_template['action']['agent_name'] = lingsi_respond.agentName
+ agent_respond_template['action']['player_name'] = action_plan.get_player_name_by_agent_name(lingsi_respond.agentName)
+ #agent_respond_template['observation'] = [{ "memory_tag":[nodeId],"content": agent_respond}] #
+ first_memory_tag = current_node_history_json[0]['Dungeon_Master'][0]['memory_tag'] #取大模型第一次选中的memory_tag,在这一轮里沿用
+ agent_respond_template['observation'] = [{ "memory_tag":first_memory_tag,"content": agent_respond}]
+
+ ## TODO: "memory_tag":["all"] 这是一个临时解决方案,需要在后续版本中替换为更优的算法,具体为 无论是planning还是parallel,可见范围都最好需要时立即能知道。所以不能等到玩家发言后,再到前端显示
+ ## 当前设置为all了,但是这是不对的, 有一个解决方案是,将主持人最开始的memory_tag作为后续所有的memory_tag,有一个风险是需要区分 信息的可见范围以及 plan的区别
+ ## 可以看见这些memory不一定表示 要参与行动。
+
+
+ current_node_history_json.append(agent_respond_template)
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False) )
+ logging.info(f'current_node_history_json is {current_node_history_json}##### lingsi_respond is {lingsi_respond}; ##### agent_respond is {agent_respond} ##### agent_respond_template is {agent_respond_template}' )
+
+
+ #step7 存储 memory # for other agent
+ self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
+
+ update_flag = self.update_replacement(sessionId, nodeId)
+ if react_flag == 'end':
+ if update_flag:
+ logging.info("变量更新成功!")
+ else:
+ logging.info("无变量更新或变量更新失败")
+ #step8 返回 question_plan
+ if react_flag == 'end':
+ question_plan = []
+ elif react_flag == 'waiting_other_agent':
+ question_plan = self.react_get_question_plan(sessionId, nodeId, execute_agent_names)
+ logging.info(f'parallel question_plan is {question_plan}')
+ else:
+ question_plan = []
+ return react_flag, question_plan
+
+
+ def planning_running(self, sessionId : str, nodeId: str, nodeType:str, lingsi_respond: LingSiResponse):
+
+ '''
+ planning 模块
+
+ '''
+ #step0 根据返回值,得到实际agent执行内容
+
+ logging.info(f'planning_running 接受到的 lingsi_respond is {lingsi_respond}')
+ try:
+ if lingsi_respond == None:#第一次该该节点被调用是,输入式将lingsi_respond设置为None
+ agent_respond = None
+ else:
+ if type(lingsi_respond.observation) ==str:
+ agent_respond = json.loads(lingsi_respond.observation)['toolResponse']
+ else:
+ agent_respond = lingsi_respond.observation['toolResponse']
+ agent_respond = agent_respond_extract_output(agent_respond)
+ except Exception as e:
+ logging.info(f'lingsi_respond is {lingsi_respond}' )
+ raise ValueError(f'从lingsi_respond中提取 agent_respond报错, 报错信息:{e}')
+
+
+
+
+ #stpe1 判断当前状态, 看是否是第一次运行
+ first_run_flag = self.judge_running_state( sessionId,nodeId)
+ if first_run_flag == True:
+ # 当react的状态是 end 或者为空的时候调用此函数,进行初始化 或者 chapter + 1
+ self.memory_handler.init_react_count(sessionId, nodeId)
+
+
+ #step2 获取节点名字 + 节点描述
+ oneNode = self.geabase_handler.get_current_node(attributes={"id": nodeId,},
+ node_type=nodeType)
+ oneNodeName = oneNode.attributes['name']
+ oneNodeDescription = oneNode.attributes['description']
+
+
+ oneNodeDescription = self.fill_replacements(oneNodeDescription, sessionId)
+
+ # 变量替换
+
+
+ #step3.1 获取memory, 构成给大模型的输入
+ #获取memory, 主持人能看到的memory, 和获取tool的memory类似
+ assembled_memory = self.get_memory_for_dm(sessionId, nodeId)
+ assembled_memory = json.dumps(assembled_memory, ensure_ascii=False)
+ logging.info(f'assembled_memory is {assembled_memory}')
+
+ #step3.2 获取当前节点的历史运行情况。如果是第一次运行,需要将react node 的name 存入到 memory中
+ if first_run_flag == True:
+ current_node_history = ''
+ current_node_history_json = []
+ #第一次运行,对于react模块,只将标题放置在memory里, 因为对于react模块,description太多了,循环的情况下,很罗嗦且超过上下文
+ self.memory_handler.react_nodedescription_save(sessionId, nodeId, oneNodeName)
+ else:
+ #不是第一次运行。那么肯定历史history进来
+ logging.info(f'#不是第一次运行。那么肯定存储了plan')
+ current_node_history = self.memory_handler.react_current_history_get(sessionId, nodeId)
+ current_node_history_json = json.loads(current_node_history)
+
+
+
+ #step4 执行 llm(如果是第一次调用) 获取action plan; 如果不是第一次调用,无需执行大模型,将已运行的agent_name 存下来
+ if first_run_flag == True:
+
+ llm_input = self.naive_agent_input_prompt(assembled_memory, oneNodeName, oneNodeDescription ,PLANNING_RUNNING_PROMPT
+ , PLANNING_RUNNING_PROMPT_SUFFIX_1 , PLANNING_RUNNING_PROMPT_SUFFIX_2)
+
+ logging.info('=============llm_input==================')
+ logging.info(llm_input)
+
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+
+ logging.info('=============llm_result==================')
+ logging.info(llm_result)
+ llm_result_json = robust_json_loads(llm_result)
+
+ current_node_history_json.append(llm_result_json) #将当前记录放到current_node_history_json里
+
+ logging.info('planning_running 第一次大模型调用结束')
+
+
+
+ action_plan = llm_result_json["action_plan"]
+ action_plan =ActionPlan(**{'data':action_plan})
+
+
+
+ else:
+ #存已有的agentName
+ logging.info('不是第一次调用,无需执行大模型, 将已运行的agent_name 存下来')
+ self.memory_handler.processed_agentname_save(sessionId, nodeId, lingsi_respond.agentName )
+
+ #从memory中获取 action plan
+ action_plan = json.loads( self.memory_handler.current_plan_get(sessionId, nodeId ) )
+ logging.info(f'对于 planning 之前存储的action_plan is {action_plan} ,type(action_plan) is {type(action_plan) } ')
+ action_plan =ActionPlan(**action_plan)
+
+
+ #组装agent返回的格式 agent_respond_template
+ agent_respond_template = copy.deepcopy(PLANNING_RUNNING_AGENT_REPLY_TEMPLATE)
+
+ agent_respond_template['action']['agent_name'] = lingsi_respond.agentName
+ #agent_respond_template['action']['player_name'] = self.get_player_name_from_action_plan(action_plan, lingsi_respond.agentName)
+ first_memory_tag = current_node_history_json[0]['Dungeon_Master'][0]['memory_tag'] #取大模型第一次选中的memory_tag,在这一轮里沿用
+ agent_respond_template['action']['player_name'] = action_plan.get_player_name_by_agent_name(lingsi_respond.agentName)
+ agent_respond_template['observation'] = [{ "memory_tag":first_memory_tag,"content": agent_respond}]# TODO
+ current_node_history_json.append(agent_respond_template)
+
+
+ #step4.5 如果需要执行 主持人。 修改 action_plan 和 current_node_history_json
+ if first_run_flag == True:
+ while action_plan.data[0].agent_name == '主持人':
+
+ llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription + PLANNING_RUNNING_PROMPT_DM_SPEECH + '\n##已有步骤##\n' + \
+ json.dumps(current_node_history_json,ensure_ascii=False) + PLANNING_RUNNING_PROMPT_SUFFIX_2
+
+ logging.info('=============llm_input 主持人==================')
+ logging.info(llm_input)
+
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+
+ logging.info('=============llm_result 主持人==================')
+ logging.info(llm_result)
+ dm_result_json = robust_json_loads(llm_result)
+
+
+ dm_respond_template = copy.deepcopy(PLANNING_RUNNING_AGENT_REPLY_TEMPLATE)
+ dm_respond_template['action']['agent_name'] = '主持人'
+ dm_respond_template['action']['player_name'] = '主持人'
+ first_memory_tag = current_node_history_json[0]['Dungeon_Master'][0]['memory_tag'] #取大模型第一次选中的memory_tag,在这一轮里沿用
+ dm_respond_template['Dungeon_Master'] = [{ "memory_tag":first_memory_tag, "content": dm_result_json['content']}]# TODO
+ current_node_history_json.append(dm_respond_template) #将当前记录放到current_node_history_json里
+ action_plan.data.pop(0) #从action_plan.data里 抛弃第一个元素
+ logging.info(f'经过pop 后 action_plan.data is {action_plan.data}')
+ else:
+
+ processed_agentname = self.memory_handler.processed_agentname_get(sessionId, nodeId)#list
+ logging.info(f'已经处理的agent_name 为 {processed_agentname}')
+ if processed_agentname == []:
+ raise ValueError(f'此时不是第一次运行,在memory中应该已经有processed_agentname才对,但是processed_agentname is {processed_agentname} ')
+
+ remain_action_plan = []
+ for i in range(len(action_plan.data)):
+ if action_plan.data[i].agent_name not in processed_agentname:
+ remain_action_plan.append(action_plan.data[i])
+
+ while len(remain_action_plan)>=1 and remain_action_plan[0].agent_name == '主持人':
+
+ llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription + PLANNING_RUNNING_PROMPT_DM_SPEECH + '\n##已有步骤##\n' + \
+ json.dumps(current_node_history_json,ensure_ascii=False) + PLANNING_RUNNING_PROMPT_SUFFIX_2
+
+ logging.info('=============llm_input 主持人==================')
+ logging.info(llm_input)
+
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+
+ logging.info('=============llm_result 主持人==================')
+ logging.info(llm_result)
+ dm_result_json = robust_json_loads(llm_result)
+
+
+ dm_respond_template = copy.deepcopy(PLANNING_RUNNING_AGENT_REPLY_TEMPLATE)
+ dm_respond_template['action']['agent_name'] = '主持人'
+ dm_respond_template['action']['player_name'] = '主持人'
+ first_memory_tag = current_node_history_json[0]['Dungeon_Master'][0]['memory_tag'] #取大模型第一次选中的memory_tag,在这一轮里沿用
+ dm_respond_template['Dungeon_Master'] = [{ "memory_tag":first_memory_tag, "content": dm_result_json['content']}]# TODO
+ current_node_history_json.append(dm_respond_template) #将当前记录放到current_node_history_json里
+ remain_action_plan.pop(0) #remain_action_plan.data里 抛弃第一个元素
+ logging.info(f'经过pop 后 action_plan.data is {action_plan.data}')
+
+
+ #step5 分析 llm_result 执行结果
+ if first_run_flag == True and len(action_plan.data) >0 :
+ react_flag = 'waiting_other_agent'
+
+ logging.info(f'当前为{react_flag}, 因为初次运行且plan不为空')
+ # llm_result_truncation_json = json.loads(current_node_history + llm_result_truncation + '":[]}]')
+ # llm_result_truncation_json = json.loads(current_node_history + llm_result_truncation + '":[]}]')
+
+ #提取此时应该执行的agent_name
+ execute_agent_name = action_plan.data[0].agent_name
+ execute_player_name = action_plan.data[0].player_name
+ logging.info(f'execute_agent_name is {execute_agent_name}, execute_player_name is {execute_player_name}')
+
+ #将该节点的count 设置为 runninng
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'running')
+
+
+ else:
+
+ logging.info(f'剩余待处理的的agent_name 为 {remain_action_plan}')
+ if len(remain_action_plan) == 0:
+
+ react_flag = 'end'
+ logging.info(f'当前为{react_flag}, 将本次节点的count设置为end, 因为remain_action_plan 为空 ')
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'end')
+ else:
+ react_flag = 'waiting_other_agent'
+ logging.info(f'当前为{react_flag}, 因为第一次大模型生成的plan还没有执行成完 ')
+
+ #提取此时应该执行的agent_name
+ execute_agent_name = remain_action_plan[0].agent_name
+ execute_player_name = remain_action_plan[0].player_name
+ logging.info(f'execute_agent_name is {execute_agent_name}, execute_player_name is {execute_player_name}')
+
+ #将该节点的count 设置为 runninng
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'running')
+
+
+ #step6 分析 llm_result 执行结果, 并保存
+
+ if first_run_flag == True:
+ self.memory_handler.current_plan_save(sessionId, nodeId, json.dumps(action_plan.dict() ,ensure_ascii=False) )
+
+
+
+ #step7 存储 memory # for other agent
+ logging.info(f'current_node_history_json if {current_node_history_json}')
+
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False) )
+ self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
+
+ if react_flag == 'end':
+ update_flag = self.update_replacement(sessionId, nodeId)
+ if update_flag:
+ logging.info("变量更新成功!")
+ else:
+ logging.info("无变量更新或变量更新失败")
+ #step8 返回 question_plan
+ if react_flag == 'end':
+ question_plan = []
+ elif react_flag == 'waiting_other_agent':
+ question_plan = self.react_get_question_plan(sessionId, nodeId, execute_agent_name)
+ else:
+ question_plan = []
+ return react_flag, question_plan
+
+ def get_player_name_from_action_plan(self, action_plan, agent_name):
+ '''
+ 从action_plan中获取agent_name
+ '''
+ for i in range(len(action_plan)):
+ if action_plan[i]['agent_name'] == agent_name:
+ return action_plan[i]['player_name']
+ raise ValueError(f'action plan {action_plan }中未找到 对应的 agent name {agent_name}')
+
+ def fill_replacements(self, node_description: str, sessionId: str) -> str:
+ """
+ 构建替换后的 prompt 字符串。
+ :param sessionId: 一个函数,接收占位符名称并返回对应的值
+ :return prompt: 替换后的 prompt 字符串
+ :return placeholders 涉及到的变量
+ """
+ prompt = node_description
+ logging.info(f'prompt:{prompt}')
+ placeholders = re.findall(r"#\$\#(.*?)#\$\#", prompt)
+ logging.info("开始变量替换")
+ logging.info(f'需要替换的变量{placeholders}')
+ try:
+ for placeholder in placeholders:
+ logging.info(f'placeholder为{placeholder}')
+ value = self.memory_manager.get_msg_content_by_rule_name(sessionId, placeholder)
+ logging.info(f'value为{value}')
+ if value != None:
+ prompt = prompt.replace(f'#$#{placeholder}#$#', value)
+ logging.info(f'替换后的prompt{prompt}')
+ except Exception as e:
+ logging.info(f"变量替换出现错误!{e}")
+
+ return prompt
+
+
+ def update_replacement(self, sessionId: str, nodeId: str) -> bool:
+ """
+ 更新变量名,
+ :param sessionId: 对话id
+ :param nodeId: 节点id
+ """
+ cur_node_memory = self.get_cur_node_memory(sessionId, nodeId)
+ logging.info(f'当前节点游戏记录:{cur_node_memory}')
+ cur_node_memory = "当前节点游戏记录:" + ''.join(cur_node_memory)
+
+ try:
+ updaterule = self.gb_handler.get_tag(rootNodeId = nodeId, rootNodeType = "opsgptkg_task", key = 'updaterule')
+ cur_node_updaterule = json.loads(updaterule)
+ except Exception as e:
+ logging.info(f"出现错误: {e}")
+ logging.info(f"输入不是json格式或者为空,变量更新失败,略过.")
+ logging.info(f"node_envdescription: {updaterule}")
+ return False
+
+ update_flag = False
+ try:
+ for placeholder, update_role in cur_node_updaterule.items():
+ logging.info(f'更新的变量为:{placeholder}')
+ logging.info(f'当前节点游戏记录为:{cur_node_memory}')
+ update_flag = self.memory_manager.update_msg_content_by_rule(sessionId, placeholder, cur_node_memory, update_role)
+ logging.info(f'update_flag:{update_flag}')
+ except Exception as e:
+ logging.info(f"变量更新出现错误!{e}")
+ return update_flag
+
+
+ def react_running(self, sessionId : str, nodeId: str, nodeType:str, lingsi_respond: LingSiResponse):
+ '''
+ react 模块 运行
+ '''
+
+
+ logging.info(f'react_running 接受到的 lingsi_respond is {lingsi_respond}')
+ try:
+ if lingsi_respond == None:#第一次该该节点被调用是,输入式将lingsi_respond设置为None
+ agent_respond = None
+ else:
+ if type(lingsi_respond.observation) ==str:
+ agent_respond = json.loads(lingsi_respond.observation)['toolResponse']
+ else:
+ agent_respond = lingsi_respond.observation['toolResponse']
+ except Exception as e:
+ logging.info(f'lingsi_respond is {lingsi_respond}' )
+ raise ValueError(f'从lingsi_respond中提取 agent_respond报错, 报错信息:{e}')
+
+
+
+
+ if agent_respond == None:
+ agent_respond = ''
+ if type(agent_respond) == str:
+ agent_respond = agent_respond.replace('"', '').replace("'", "") #需要去除agent返回中的 " 和 '
+ agent_respond = agent_respond_extract_output(agent_respond) # 去除 agent_respond 中的 thought 和 output
+ #stpe1 判断当前状态
+ get_messages_res = self.memory_handler.nodecount_get( sessionId, nodeId)
+
+ if get_messages_res == [] :
+ logging.info('当前这个{sessionId} react节点 是第一次运行')
+ first_run_react_flag = True
+ else:
+ if json.loads(get_messages_res[0].role_content)['nodestage'] == 'end' :#在上一轮已经结束了,这一轮还未开始
+ logging.info('当前这个{sessionId} react节点在上一轮已经结束了,这一轮还未开始,在这一轮也算是第一次执行')
+ first_run_react_flag = True
+ else:
+ logging.info('当前这个{sessionId} react节点 不是第一次执行')
+ first_run_react_flag = False
+
+ if first_run_react_flag == True:
+ # 当react的状态是 end 或者为空的时候调用此函数,进行初始化 或者 chapter + 1
+ self.memory_handler.init_react_count(sessionId, nodeId)
+
+ #step2 获取节点名字 + 节点描述
+ oneNode = self.geabase_handler.get_current_node(attributes={"id": nodeId,},
+ node_type=nodeType)
+
+ oneNodeName = oneNode.attributes['name']
+ oneNodeDescription = oneNode.attributes['description']
+
+ #step3.1 获取memory, 构成给大模型的输入
+ #获取memory, 主持人能看到的memory, 和获取tool的memory类似
+
+ # tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
+ # get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #此时是主持人,所以需要看到所有的memory,无需加tag。 对于在我这一侧需要运行的llm,一定是看到所有信息,因为我就是主持人
+ # assembled_memory = self.assemble_memory(nodeId, nodeType, get_messages_res_sorted)
+ assembled_memory = self.get_memory_for_dm(sessionId, nodeId)
+ assembled_memory = json.dumps(assembled_memory, ensure_ascii=False)
+ logging.info(f'assembled_memory is {assembled_memory}')
+
+ #step3.2 获取当前节点的历史运行情况。如果是第一次运行,需要将react node 的name 存入到 memory中
+ if first_run_react_flag == True:
+ current_node_history = ''
+ #第一次运行,对于react模块,只将标题放置在memory里, 因为对于react模块,description太多了,循环的情况下,很罗嗦且超过上下文
+ self.memory_handler.react_nodedescription_save(sessionId, nodeId, oneNodeName)
+
+ else:
+ #不是第一次运行。那么肯定历史history进来
+ logging.info(f'#不是第一次运行。那么肯定历史history进来{sessionId}, {nodeId}')
+ current_node_history = self.memory_handler.react_current_history_get(sessionId, nodeId)
+ # llm_result_truncation + '": [{"content": ' + user_responds
+
+ '''
+ history 里存储 observation的截断,不包含observation,
+ llm_output 输出整个完整的流程(如果是gpt_4, 不能有停用词,因为每次都是从头开始录的),
+ self.stop_at_observation,需要接受 current_node_history ,先分析里面有几个observation(N个), 然后我可以往后扩展一个observation, 不包含observation
+
+ jsonstring 就转全量的, 但是录入到memory中的时候,注意只录入 N+1 个 observation的信息。
+
+
+
+ '''
+ #step4 执行 llm,
+ if '##输入##' not in oneNodeDescription:
+ oneNodeDescription += REACT_RUNNING_PROMPT
+ logging.info("react 模式 prompt 分离完成")
+ oneNodeDescription = self.fill_replacements(oneNodeDescription, sessionId)
+ logging.info("变量替换完成")
+ if first_run_react_flag == True:
+ llm_input = assembled_memory + '\n' + oneNodeName + '\n' + oneNodeDescription + '\n##已有步骤##\n无' + '\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
+ logging.info('=============llm_input==================')
+ logging.info(llm_input)
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+ current_node_history_json = []
+ llm_result_json = robust_json_loads(llm_result)
+ llm_result_json = remove_duplicate_keys(llm_result_json)
+ logging.info('=============llm_result==================')
+ logging.info(llm_result_json)
+ if type(llm_result_json)!=dict:
+ llm_result_json = llm_result_json[0]
+ logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
+ # raise ValueError(f'llm的输出应该是一个dict才对 ')
+ current_node_history_json.append(llm_result_json)
+ else:
+ # current_node_history[-1]["observation"]['content'] = agent_respond
+ # llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n' + current_node_history + '": [{"content":" ' + agent_respond + '"'
+ # llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n' + current_node_history + '": [{"content":" ' + agent_respond + '", "memory_tag:' \
+ # + '\n '
+
+ current_node_history_json = json.loads(current_node_history) #历史记录里可能包含虚假信息
+ logging.info(f'current_node_history_json is {current_node_history_json}')
+ if current_node_history_json[-1]['action']['agent_name'] != '主持人':
+ current_node_history_json[-1]["observation"][0]['content'] = agent_respond #将历史中最后一次主持人幻觉的输出,转换为用户补充的输入
+ try:
+ current_node_history_json[-1]["thought"] = '' #在非主持人环节时,应该将thought 设置为''
+ except:
+ pass
+ llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n##已有步骤##\n' + json.dumps(current_node_history_json,ensure_ascii=False) + \
+ '\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
+ logging.info('=============llm_input==================')
+ logging.info(llm_input)
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+ llm_result_json = robust_json_loads(llm_result)
+ llm_result_json = remove_duplicate_keys(llm_result_json)
+ logging.info('=============llm_result==================')
+ logging.info(llm_result_json)
+ if type(llm_result_json)!=dict:
+ llm_result_json = llm_result_json[0]
+ logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
+ # raise ValueError(f'llm的输出应该是一个dict才对 ')
+ current_node_history_json.append(llm_result_json)
+
+ retry_llm = 0
+ while(( retry_llm <= 20) and ("taskend" not in llm_result) and (llm_result_json['action']['agent_name'] == '主持人' )):
+ logging.info('由于是主持人发言,情况继续')
+
+ endcheck_res = self.endcheck( nodeId, nodeType, oneNodeName, oneNodeDescription, current_node_history_json)
+ if endcheck_res== False:
+ logging.info('endchek没有通过,主持人发言终止, 强行将 llm_result == {"action": "taskend"}')
+ llm_result = json.dumps({"action": "taskend"})
+ llm_result_json = robust_json_loads(llm_result)
+ current_node_history_json.append(llm_result_json)
+ break
+
+
+
+ llm_input = assembled_memory + '\n' + oneNodeName + '\n' +oneNodeDescription+ '\n##已有步骤##\n' + json.dumps(current_node_history_json,ensure_ascii=False) + \
+ '\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
+ logging.info('=============llm_input==================')
+ logging.info(llm_input)
+ llm_result = self.robust_call_llm_with_llmname(llm_input, nodeId)
+ llm_result_json = robust_json_loads(llm_result)
+ llm_result_json = remove_duplicate_keys(llm_result_json)
+ logging.info('=============llm_result==================')
+ logging.info(llm_result_json)
+ current_node_history_json.append(llm_result_json)
+ if type(llm_result_json)!=dict:
+ llm_result_json = llm_result_json[0]
+ logging.info('llm的输出应该是一个dict才对, 有时候出现[{one step}], 所以尝试选其中一个元素转换为dict')
+ raise ValueError(f'llm的输出应该是一个dict才对 ')
+ retry_llm = retry_llm + 1
+
+ logging.info('大模型调用结束')
+
+
+
+ #step5 分析 llm_result 执行结果
+ #llm_result 为最后一次llm的输出
+ if 'taskend' in llm_result:
+ react_flag = 'end'
+ logging.info(f'当前为{react_flag}, 将本次节点的count设置为end ')
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'end')
+ elif 'observation' in llm_result:
+ react_flag = 'waiting_other_agent'
+
+ logging.info(f'当前为{react_flag}, 尝试补充字符使得llm_result_truncation能转为json格式 ')
+ # llm_result_truncation_json = json.loads(current_node_history + llm_result_truncation + '":[]}]')
+ # llm_result_truncation_json = json.loads(current_node_history + llm_result_truncation + '":[]}]')
+
+ #提取此时应该执行的agent_name
+ execute_agent_name = current_node_history_json[-1]['action']['agent_name']
+ execute_player_name = current_node_history_json[-1]['action']['player_name']
+
+ #将该节点的count 设置为 runninng
+ self.memory_handler.nodecount_set_key(sessionId, nodeId, 'nodestage', 'running')
+
+
+
+
+ #step6 存储 history # for DM
+ logging.info(f'存储 history # for DM')
+ if react_flag == 'waiting_other_agent' and first_run_react_flag == True:
+ #step6.1 存储 llm_result_truncation
+
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False) )
+
+ elif react_flag == 'waiting_other_agent' and first_run_react_flag == False:
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False))
+
+ elif react_flag == 'end' and first_run_react_flag == True: #第一次运行就运行到结尾了
+
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False))
+
+ elif react_flag == 'end' and first_run_react_flag == False: #第N次运行 运行到结尾了
+ self.memory_handler.react_current_history_save(sessionId, nodeId, json.dumps(current_node_history_json ,ensure_ascii=False))
+
+
+ #step7 存储 memory # for other agent
+ logging.info(f'存储 memory # for other agent')
+ if react_flag == 'waiting_other_agent' and first_run_react_flag == True:
+ logging.info('#第一次运行 等待agent返回')
+ self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
+ if react_flag == 'waiting_other_agent' and first_run_react_flag == False:
+ logging.info('#第N次运行 等待agent返回')
+ self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
+
+ elif react_flag == 'end' and first_run_react_flag == True: #第一次运行就运行到结尾了:
+ logging.info('#第一次运行就运行到结尾了:')
+ self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
+
+ elif react_flag == 'end' and first_run_react_flag == False: #第N次运行 运行到结尾了
+ logging.info('#第N次运行 运行到结尾了')
+ self.memory_handler.react_memory_save(sessionId, nodeId, current_node_history_json)
+
+ if react_flag == 'end':
+ update_flag = self.update_replacement(sessionId, nodeId)
+ if update_flag:
+ logging.info("变量更新成功!")
+ else:
+ logging.info("无变量更新或变量更新失败")
+ #step8 返回 question_plan
+ if react_flag == 'end':
+ question_plan = []
+ elif react_flag == 'waiting_other_agent':
+ question_plan = self.react_get_question_plan(sessionId, nodeId, execute_agent_name)
+ else:
+ question_plan = []
+ return react_flag, question_plan
+
+
+
+ def get_intentionNodes_from_subtree(self, sessionId)->list:
+ '''
+ #抽取 nodeid_in_subtree 中的所有 intention_nodes
+ 返回值以这种list [{'nodeId': 'xxx=','nodeType': 'opsgptkg_intent'}]
+ '''
+ intention_nodes = []
+ try:
+ nodeid_in_subtree_memory = self.memory_manager.get_memory_pool_by_all(
+ { "chat_index": sessionId, "role_type": "nodeid_in_subtree"})
+ nodeid_in_subtree = json.loads( nodeid_in_subtree_memory.get_messages()[0].role_content )
+ intention_nodes = []
+ for i in range(len(nodeid_in_subtree)):
+ onenode = nodeid_in_subtree[i]
+ if onenode['nodeType'] == 'opsgptkg_intent':
+ intention_nodes.append( {'nodeId': onenode['nodeId'], 'nodeType': onenode['nodeType']})
+ except Exception as e:
+ logging.info(f"发生了一个错误:{e}")
+ return intention_nodes
+
+ def get_memory_for_tool(self,sessionId, nodeId):
+ '''
+ react 节点中 对于一个 主持人, 构建memory的函数。
+ 只需要将祖先节点弄好即可,不需要加自己,因为自己有 history进行维护
+
+ 默认将这个sessionId里所有的意图识别节点,弄到 tool_ancestor里
+ '''
+
+ intention_nodes = self.get_intentionNodes_from_subtree(sessionId)
+ nodeType = 'opsgptkg_task' #假设一定是task节点
+ tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
+ '''[{'nodeId': 'xxx=','nodeType': 'opsgptkg_intent'}]'''
+ tool_ancestor = tool_ancestor + intention_nodes
+ get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #对于tool,假设都是主持人的工具,所以需要看到所有的memory,无需加tag。
+ assembled_memory = self.assemble_memory_for_tool(nodeId, nodeType, get_messages_res_sorted) # tool 的memory需要兼顾以前的格式
+ return assembled_memory
+
+
+ def get_memory_for_dm(self,sessionId, nodeId):
+ '''
+ react 节点中 对于一个 主持人, 构建memory的函数。
+ 只需要将祖先节点弄好即可,不需要加自己,因为自己有 history进行维护
+ '''
+ intention_nodes = self.get_intentionNodes_from_subtree(sessionId)
+ nodeType = 'opsgptkg_task' #假设一定是task节点
+ tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
+ tool_ancestor = tool_ancestor + intention_nodes
+ get_messages_res_sorted = self.get_memory_from_ancestor( tool_ancestor, sessionId, role_tags = None) #此时是主持人,所以需要看到所有的memory,无需加tag。 对于在我这一侧需要运行的llm,一定是看到所有信息,因为我就是主持人
+ assembled_memory = self.assemble_memory_for_reactagent(nodeId, nodeType, get_messages_res_sorted)
+ return assembled_memory
+
+ def get_memory_for_computer_agent(self,sessionId, nodeId, execute_agent_name):
+ '''
+ react 节点中 对于一个 agent_x (电脑agent), 构建memory的函数
+ '''
+ intention_nodes = self.get_intentionNodes_from_subtree(sessionId)
+ nodeType = 'opsgptkg_task' #假设一定是task节点
+ tool_ancestor = self.get_tool_ancestor(sessionId, nodeId, nodeType)
+ tool_ancestor = tool_ancestor + intention_nodes
+
+ if nodeId not in [nodeinancestor['nodeId'] for nodeinancestor in tool_ancestor]:
+ tool_ancestor = tool_ancestor + [{'nodeId': nodeId, 'nodeType':nodeType}]
+
+ #需要将自己也加上,方便在下一步memory检索的时候把本节点的历史也得到,由于在生成str的时候,第一时间就save本届点的memory,所以这样做是可以的
+ #需要注意的是,给agent和给主持人看到的输入是不一样的。 主持人看到的是 memory + node_text + currentnodehistory, currentnodehistory 是文本,因为主持人需要维持一个 结构化的输出。
+ #agent看到的是 memory,agent只需要说出一句话即可
+ get_messages_res_sorted = self.get_memory_from_ancestor(tool_ancestor, sessionId, execute_agent_name) #此时是调用外部agent,所以需要加tag
+ assembled_memory = self.assemble_memory_for_reactagent(nodeId, nodeType, get_messages_res_sorted)
+ return assembled_memory
+ def get_cur_node_memory(self, sessionId, nodeId):
+ """
+ 获取当前节点的游戏记录,不包含祖先节点。
+ """
+ nodeType = 'opsgptkg_task' #假设一定是task节点
+ tool_ancestor = [{'nodeId':nodeId, 'nodeType':nodeType}]
+ logging.info(f'tool_ancestor:{tool_ancestor}')
+ get_messages_res_sorted = self.get_memory_from_ancestor(tool_ancestor, sessionId, role_tags = None) #此时是主持人,所以需要看到所有的memory,无需加tag。 对于在我这一侧需要运行的llm,一定是看到所有信息,因为我就是主持人
+ logging.info(f'get_messages_res_sorted:{get_messages_res_sorted}')
+ assembled_memory = self.assemble_memory_for_reactagent(nodeId, nodeType, get_messages_res_sorted)
+ return assembled_memory
+
+ def react_get_question_plan(self, sessionId:str, nodeId:str, execute_agent_names : Union[str, List[str]]):
+ '''
+ 如果react模块 react_flag==waiting_other_agent, 则需要返回 question_plan
+ 可能需要区分人来回答还是大模型来回答
+ '''
+ logging.info(f"nodeId is {nodeId} , execute_agent_names is {execute_agent_names}")
+ toolPlan = []
+ if type(execute_agent_names) == str:
+ #如果只输入一个agent_name
+ execute_agent_names = [execute_agent_names]
+
+
+ for execute_agent_name in execute_agent_names:
+ if '人类' in execute_agent_name: #需要提交给人
+ '''
+ example: {'toolDescription': '请用户回答',
+ 'currentNodeId': 'INT_3',
+ 'memory': None,
+ 'type': 'userProblem',
+ 'questionDescription': {'questionType': 'essayQuestion',
+ 'questionContent': {'question': '请输入',
+ 'candidate': None }}}
+
+ 一定是一个问答题, 无需提问,这里question变成一个固定值了。 最重要的是把memory 也是空, 因为历史信息在对话里已经显示了。
+ '''
+ tool_one_step= ToolPlanOneStep(
+ **{'toolDescription': '请用户回答',
+ 'currentNodeId': nodeId + '%%@@#' + execute_agent_name,
+ 'currentNodeInfo':execute_agent_name,
+ 'memory': None,
+ 'type': 'userProblem',
+ 'questionDescription': {'questionType': 'essayQuestion',
+ 'questionContent': {'question': '请玩家根据当前情况发言',
+ 'candidate': None }}}
+ )
+
+ toolPlan.append( tool_one_step.dict())
+
+ else: #需要执行agent
+ '''
+ example :[{
+ "toolDescription": "toolDescriptionA",
+ "currentNodeId": "INT_1",
+ "memory": JsonStr,
+ "type":"onlyTool",
+ }]
+ '''
+
+
+ assembled_memory = self.get_memory_for_computer_agent(sessionId, nodeId, execute_agent_name)
+ react_memory = assembled_memory
+ if type(react_memory)!= str:
+ react_memory = json.dumps(react_memory, ensure_ascii=False)
+
+ # logging.info(f'react_memory is {react_memory}')
+ tool_one_step= ToolPlanOneStep(**{
+ "toolDescription": execute_agent_name,
+ 'currentNodeInfo':execute_agent_name,
+ "currentNodeId": nodeId + '%%@@#' + execute_agent_name,
+ "memory": react_memory,
+ "type":"reactExecution",
+ })
+
+
+ # toolPlan = [{
+ # "toolDescription": execute_agent_name,
+ # "currentNodeId": nodeId,
+ # "memory": react_memory,
+ # "type":"reactExecution",
+ # }]
+
+ toolPlan.append(tool_one_step.dict())
+
+
+ return toolPlan
+
+
+
+ def get_tool_ancestor(self, sessionId,
+ start_nodeid = '为什么余额宝没收到收益_complaint',
+ start_nodetype = 'opsgptkg_task'):
+
+ #1 对每个nodeid,得到其memory, 首先需要遍历其所有的祖先task节点,将相关信息记录下来
+ tool_ancestor = []
+ nodeid_in_search = [{'nodeId':start_nodeid, 'nodeType':start_nodetype}]
+ nodeid_in_search_all = [{'nodeId':start_nodeid, 'nodeType':start_nodetype}]
+
+ while len(nodeid_in_search)!= 0:
+ nodedict_now = nodeid_in_search.pop()
+ nodeid_now = nodedict_now['nodeId']
+ nodetype_now = nodedict_now['nodeType']
+
+
+ #查祖先节点 reverse=True
+ neighborNodes = self.geabase_handler.get_neighbor_nodes(attributes={"id": nodeid_now,}, node_type=nodetype_now, reverse=True)
+ print(nodeid_now, nodetype_now, neighborNodes, '=========')
+
+
+ for i in range(len(neighborNodes) ):
+ # if res['resultSet']['rows'][i]['columns'][0] == {}:
+ # continue
+
+ # else:
+ nodeid_new = neighborNodes[i].id
+ nodetype_new = neighborNodes[i].type
+ if nodeid_new in [kk['nodeId'] for kk in nodeid_in_search]: #已经探索过了,不再探索
+ continue
+
+ elif nodetype_new == 'opsgptkg_task': #如果是task节点,则加入到tool_plan中,同时继续往前延展。
+
+ #查询该任务节点有没有收到过response,直接查询count,不用在意count的个数
+ message_res = self.memory_handler.nodecount_get( sessionId, nodeid_new) #查看这个节点的count计数
+
+ if len(message_res) == 0: #这个task节点没有memory 或者没有收到response,则不再往前延展,减少geabase查询个数
+ print(f'#这个task节点{nodeid_new}没有memory 或者没有收到response,则不再往前延展,减少geabase查询个数')
+ continue
+ else:
+ print('#如果是task节点,则加入到tool_plan中,同时继续往前延展。 get_tool_ancestor')
+ tool_ancestor.insert(0, {'nodeId':nodeid_new, 'nodeType':nodetype_new}) # 倒叙插入到图谱中
+ if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
+ nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+
+
+
+
+ elif nodetype_now != 'opsgptkg_intent' and nodetype_new == 'opsgptkg_intent':
+ #第一次出现意图节点,需要尝试
+ #print('#第一次出现意图节点,需要尝试')
+ tool_ancestor.insert(0, {'nodeId':nodeid_new, 'nodeType':nodetype_new}) # 倒叙插入到图谱中
+ if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
+ nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ elif nodetype_now == 'opsgptkg_intent' and nodetype_new == 'opsgptkg_intent':
+ #从意图节点再次碰到意图节点,终止
+ #print('#从意图节点再次碰到意图节点,终止')
+ pass
+ elif nodetype_new == 'opsgptkg_phenomenon':
+ #如果是事实节点,则继续
+ tool_ancestor.insert(0, {'nodeId':nodeid_new, 'nodeType':nodetype_new}) # 倒叙插入到图谱中
+ if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
+ nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+
+ else:##如果是不是task节点,也不是意图节点,不加入到tool_plan中,继续延展
+ #print('#如果是不是task节点,也不是意图节点,不加入到tool_plan中,继续延展')
+ if {'nodeId':nodeid_new, 'nodeType':nodetype_new} not in nodeid_in_search_all :
+ nodeid_in_search_all.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+
+ tool_ancestor_new = []
+ for i in range(len(tool_ancestor)):
+ item_i = tool_ancestor[i]
+ if item_i not in tool_ancestor_new:
+ tool_ancestor_new.append(item_i)
+ logging.info(f'geabase_getmemory tool_ancestor_new 的个数为{len(tool_ancestor_new)}')
+ logging.info(f'geabase_getmemory tool_ancestor 的个数为{len(tool_ancestor)}')
+ #print('tool_ancestor_new', tool_ancestor_new)
+ tool_ancestor = tool_ancestor_new
+ return tool_ancestor
+
+ def get_memory_from_ancestor(self, tool_ancestor, sessionId, role_tags = None):
+ '''
+ 给定了一堆祖先节点 + 当前节点,从中提取出memory
+ 祖先节点
+ 对于祖先tool 而言,提取出 nodedescription、tool responds
+ 对于祖先react 而言,提取出 nodedescription(name), 每一条message
+ 将这个list 按照时间顺序排好
+ 当前节点
+ 对于tool 而言,没有当前节点的memory
+ 对于react节点而言, 有运行到中间状态的memory
+ 将这个list 按照时间顺序排好
+ 按照固定格式输出 memory_list_output
+
+ role_tags 是一系列list, 如果指定为空,则没有约束
+
+ '''
+ if role_tags == None:
+ role_tags = None
+ else:
+ role_tags = ['all'] + [role_tags]
+ # print(role_tags)
+ message_res_list = []
+ for i in range(len(tool_ancestor)):
+ logging.info(f'geabase_getmemory 查询第{i}个祖先节点')
+ # logging.info(tool_ancestor[i])
+ nodeId = tool_ancestor[i]['nodeId']
+ nodeType = tool_ancestor[i]['nodeType']
+ logging.info(f'【查询】memory message_index {nodeId}; sessionId {sessionId} ')
+ # if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
+ #当前节点为tool or react 节点,一次性获得该节点所有的 chapter的memory数据
+ if nodeType == 'opsgptkg_task':
+ memory_res = self.memory_manager.get_memory_pool_by_all({
+ # "message_index": hash_id(nodeId, sessionId), #nodeId.replace(":", "_").replace("-", "_"),
+ 'user_name': hash_id(nodeId),
+ "chat_index": sessionId,
+ "role_tags": role_tags,
+ }
+ )
+ logging.info(f'memory_res为:{memory_res}')
+ message_res = memory_res.get_messages()
+ message_res_list = message_res_list + message_res
+
+
+
+ elif nodeType == 'opsgptkg_intent':
+ #如果祖先节点是意图节点, 意图节点的memory 暂时不分 tag
+ memory_res = self.memory_manager.get_memory_pool_by_all({
+ "message_index": hash_id(nodeId, sessionId), #nodeId.replace(":", "_").replace("-", "_"),
+ "chat_index": sessionId,
+ "role_type": "user"})
+
+ message_res = memory_res.get_messages()
+ message_res_list = message_res_list + message_res
+
+ #根据时间排序
+ # message_res_list = message_res_list[::-1] #倒转message, 因为发现tbase存数据是类似堆栈的格式存的。 后来者在上; 似乎不对
+ get_messages_res_sorted = self.sort_messages_by_time(message_res_list)
+ return get_messages_res_sorted
+
+
+ def assemble_memory_for_reactagent(self, nodeId, nodeType, get_messages_res_sorted):
+ '''
+ 假设 祖先节点已经选择好了,而且 节点中相关的message也选择好了, 也经过时间排序了
+ react 节点中 对于一个 agent_x (电脑agent), 组装memory的函数
+ '''
+
+ if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
+ raise ValueError(f'当前应该不是 single 节点才对 ')
+ else: #react 节点
+ memory_list = []
+ for i in range(len( get_messages_res_sorted ) ):
+ if get_messages_res_sorted[i].role_name in ['firstUserInput', 'function_caller', 'user' ]:
+ # # 第一次输入, tool返回, tool描述,
+ # memory_list.append({
+ # 'role_type': get_messages_res_sorted[i].role_type,
+ # 'role_name': get_messages_res_sorted[i].role_name,
+ # 'role_content': get_messages_res_sorted[i].role_content}
+ # )#此处存疑,需要实验后才知道效果如何,注释后,相当于主持人和agent只能看到tool的标题和执行结果,且以list的形式呈现
+ memory_list.append(get_messages_res_sorted[i].role_content)
+ elif get_messages_res_sorted[i].role_type in ['react_memory_save']:
+ # react 模块各个角色说的话,
+ memory_list.append(get_messages_res_sorted[i].role_content)
+ return memory_list
+
+ def assemble_memory_for_tool(self, nodeId, nodeType, get_messages_res_sorted):
+ if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
+ '''
+ '''
+ memory_list = []
+ for i in range(len( get_messages_res_sorted ) ):
+ if get_messages_res_sorted[i].role_name == 'firstUserInput':
+ memory_list.append({
+ 'role_type': 'user',
+ 'role_name': 'firstUserInput',
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ if get_messages_res_sorted[i].role_name == 'user':
+ memory_list.append({
+ 'role_type': 'user',
+ 'role_name': 'None',
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ if get_messages_res_sorted[i].role_name == 'function_caller':
+ memory_list.append({
+ 'role_type': 'observation',
+ 'role_name': 'function_caller',
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ if get_messages_res_sorted[i].role_type in ['react_memory_save']:
+ # react 模块各个角色说的话,
+ memory_list.append({
+ 'role_type': get_messages_res_sorted[i].role_type,
+ 'role_name': get_messages_res_sorted[i].role_name,
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ else: #react 节点
+ raise ValueError(f'当前应该是 tool task 节点才对 ')
+
+ return memory_list
+
+ def assemble_memory(self, nodeId, nodeType, get_messages_res_sorted):
+ '''
+ 组装memory
+ get_messages_res_sorted 已经根据时间排序好了。 但是对于tool 和 react模块的memory拼装做法有所不同
+ 已经弃用
+ '''
+ if self.gb_handler.geabase_is_react_node(nodeId, nodeType) == False:
+ '''
+
+ '''
+ memory_list = []
+ for i in range(len( get_messages_res_sorted ) ):
+ if get_messages_res_sorted[i].role_name == 'firstUserInput':
+ memory_list.append({
+ 'role_type': 'user',
+ 'role_name': 'firstUserInput',
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ if get_messages_res_sorted[i].role_name == 'user':
+ memory_list.append({
+ 'role_type': 'user',
+ 'role_name': 'None',
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ if get_messages_res_sorted[i].role_name == 'function_caller':
+ memory_list.append({
+ 'role_type': 'observation',
+ 'role_name': 'function_caller',
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ if get_messages_res_sorted[i].role_type in ['react_memory_save']:
+ # react 模块各个角色说的话,
+ memory_list.append({
+ 'role_type': get_messages_res_sorted[i].role_type,
+ 'role_name': get_messages_res_sorted[i].role_name,
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ else: #react 节点
+ memory_list = []
+ for i in range(len( get_messages_res_sorted ) ):
+ if get_messages_res_sorted[i].role_name in ['firstUserInput', 'function_caller', 'user' ]:
+ # 第一次输入, tool返回, tool描述,
+ memory_list.append({
+ 'role_type': get_messages_res_sorted[i].role_type,
+ 'role_name': get_messages_res_sorted[i].role_name,
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ elif get_messages_res_sorted[i].role_type in ['react_memory_save']:
+ # react 模块各个角色说的话,
+ memory_list.append({
+ 'role_type': get_messages_res_sorted[i].role_type,
+ 'role_name': get_messages_res_sorted[i].role_name,
+ 'role_content': get_messages_res_sorted[i].role_content}
+ )
+ return memory_list
+
+
+
+if __name__ == "__main__":
+
+
+ pass
+
+ #
\ No newline at end of file
diff --git a/muagent/service/ekg_reasoning/src/graph_search/task_node_prompt.py b/muagent/service/ekg_reasoning/src/graph_search/task_node_prompt.py
new file mode 100644
index 0000000..3f60932
--- /dev/null
+++ b/muagent/service/ekg_reasoning/src/graph_search/task_node_prompt.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+
+PARALLEL_RUNNING_PROMPT= \
+'''
+##输出##
+请以dict的形式,给出参与者的所有行动计划。行动计划表示为JSON,格式为
+ {"thought": str, "action_plan": [{"player_name":str, "agent_name":str}, {"player_name":str, "agent_name":str}], "Dungeon_Master": [{"memory_tag":str,"content":str}] }
+
+
+关键词含义如下:
+_ thought (str): 主持人的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断等。
+_ player_name (str): 玩家的 player_name
+_ agent_name (str): 玩家的 agent_name;
+_ content (str): 为主持人的告知信息,
+_ memory_tag (List[str]): memory_tag 固定为本条信息的可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]
+
+##example##
+{"thought": "str", "action_plan": [{"player_name":str, "agent_name":str}, {"player_name":str, "agent_name":str}, ... ], "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}
+
+##注意事项##
+1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。
+2. 整个dict是一个jsonstr,请输出jsonstr,不用输出markdown格式
+3. 结合已有的步骤,只输出一个 dict,不要包含其他信息
+'''
+
+
+
+PLANNING_RUNNING_PROMPT= \
+'''
+##输出##
+请以dict的形式,给出参与者的所有行动计划。行动计划表示为JSON,格式为
+ {"thought": str, "action_plan": [{"player_name":str, "agent_name":str, "task_description":str}, {"player_name":str, "agent_name":str,"task_description":str}], "Dungeon_Master": [{"memory_tag":str}] }
+
+
+关键词含义如下:
+_ thought (str): 主持人的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断等。
+_ player_name (str): 行动方的 player_name, 若行动方为玩家,否则为玩家的 player_name;如果行动方是主持人,为 "主持人",
+_ agent_name (str): 行动方的 agent_name, 若行动方为玩家,否则为玩家的 agent_name;如果行动方是主持人,为 "主持人",
+_ task_description (str): 为行动方本轮的任务. 只要写要做什么事儿即可,因为现在还处于任务规划阶段,事情还没有开始做。比如行动方本轮的目的是讨论,写出讨论的话题、目的等即可,不要把讨论的内容也写出来,因为现在还没有开始讨论,只是在规划行动计划。
+_ memory_tag (List[str]): memory_tag 固定为本条信息的可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]
+
+##example##
+{"thought": "str", "action_plan": [{"player_name":str, "agent_name":str, "task_description":str}, {"player_name":str, "agent_name":str, "task_description":str}, ... ], "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}
+
+##注意事项##
+1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。
+2. 整个dict是一个jsonstr,请输出jsonstr,不用输出markdown格式
+3. 结合已有的步骤,只输出一个 dict,不要包含其他信息
+'''
+
+
+PLANNING_RUNNING_PROMPT_DM_SPEECH= \
+'''
+##输出##
+本阶段行动计划已经给出了,根据行动计划,此时轮到主持人发言。主持人的发言表示为JSON,格式为
+{"thought": str, "content":str}]}
+
+主持人请尽量不要重复上一轮的发言
+
+关键词含义如下:
+_ thought (str): 主持人的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断等。
+_ content (str): 为主持人的具体发言信息,
+
+
+
+
+##注意事项##
+1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。
+2. 整个dict是一个jsonstr,请输出jsonstr,不用输出markdown格式
+3. 结合已有的步骤,只输出一个 dict,不要包含其他信息
+'''
+
+
+PLANNING_RUNNING_AGENT_REPLY_TEMPLATE= \
+{"thought": "None", "action": {"agent_name": "agent_name_c", "player_name":"player_name_d"},
+ "observation": [{ "memory_tag":["agent_name_a"],"content": "str"}]}
+
+PLANNING_RUNNING_PROMPT_SUFFIX_1 = \
+'\n##已有步骤##\n无'
+
+
+
+PLANNING_RUNNING_PROMPT_SUFFIX_2 = \
+'\n##请输出下一个步骤,切记只输出一个步骤,它应该只是一个dict ##\n'
+
+REACT_RUNNING_PROMPT = \
+"""
+##输出##
+请以列表的形式,给出参与者的所有行动。每个行动表示为JSON,格式为
+[{"thought": str, "action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}, ...]
+
+关键词含义如下:
+_ thought (str): 主持人执行行动的一些思考,包括分析玩家的存活状态,对历史对话信息的理解,对当前任务情况的判断。
+_ action (dict): 行动发起方的信息。
+_ player_name (str): 行动发起方的 player_name,若行动发起方为主持人,为空,否则为玩家的 player_name;
+_ agent_name (str): 行动发起方的 agent_name,若为主持人,则 agent_name 为 "主持人",否则为玩家的 agent_name。
+_ content (str): 行动发起方的具体行为,若为主持人,content 为告知信息;否则,content 为玩家的具体行动。
+_ memory_tag (List[str]): 无论行动发起方是主持人还是玩家,memory_tag 固定为所有信息可见对象的agent_name, 如果信息可见对象为所有玩家,固定为 ["all"]
+
+
+##example##
+如果是玩家发言,则用 {"thought": "str", "action": {"agent_name": "agent_name_c", "player_name":"player_name_d"}, "observation": [{ "memory_tag":["agent_name_a","agent_name_b"],"content": "str"}]} 格式表示。content是玩家发出的信息;memory_tag是这条信息可见的对象,需要填写agent名。不要填写 agent_description
+如果agent_name是主持人,则无需输入player_name, 且observation变为 Dungeon_Master。即{"thought": "str", "action": {"agent_name": "主持人", "player_name":""}, "Dungeon_Master": [{ "memory_tag":["agent_name_a","agent_name_b"], "content": "str",}]}
+
+##注意事项##
+1. 所有玩家的座位、身份、agent_name、存活状态等信息在开头部分已给出。
+2. "observation" or "Dungeon_Master"如何选择?若 agent_name 为"主持人",则为"Dungeon_Master",否则为 "observation"。
+3. 输出列表的最后一个元素一定是{"action": "taskend"}。
+4. 整个list是一个jsonstr,请输出jsonstr,不用输出markdown格式
+5. 结合已有的步骤,每次只输出下一个步骤,即一个 {"thought": str, "action": {"player_name":str, "agent_name":str}, "observation" or "Dungeon_Master": [{"memory_tag":str,"content":str}]}
+6. 如果是人类玩家发言, 一定要选择类似 人类agent 这样的agent_name
+"""
+
+
diff --git a/muagent/service/ekg_reasoning/src/intention_recognition/intention_recognition_tool.py b/muagent/service/ekg_reasoning/src/intention_recognition/intention_recognition_tool.py
index 2b81fe4..1d06723 100644
--- a/muagent/service/ekg_reasoning/src/intention_recognition/intention_recognition_tool.py
+++ b/muagent/service/ekg_reasoning/src/intention_recognition/intention_recognition_tool.py
@@ -7,7 +7,7 @@
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
-
+RETRY_MAX_NUM = 3
def intention_recognition_ekgfunc( root_node_id, rule, query, memory, start_from_root = True,
url= os.environ['intention_url'] ):
'''
@@ -45,15 +45,38 @@ def intention_recognition_ekgfunc( root_node_id, rule, query, memory, start_from
}
logging.info( body )
- r = requests.post(url, json=body, headers=headers)
-
- logging.info( str((r.json() )) ) #12:00:37
- output_1 = (r.json())
- # logging.info('============意图识别的结果是================')
- # logging.info(f'意图识别结果是 {output_1}')
- # logging.info('============================================')
- res = json.loads(output_1['resultMap']['algorithmResult'])
- return res
+ retry_num = 0
+ while retry_num <= RETRY_MAX_NUM:
+ retry_num = retry_num + 1
+ try:
+ r = requests.post(url, json=body, headers=headers)
+
+
+ logging.info( str((r.json() )) ) #12:00:37
+ output_1 = (r.json())
+ # logging.info('============意图识别的结果是================')
+ # logging.info(f'意图识别结果是 {output_1}')
+ # logging.info('============================================')
+ res = json.loads(output_1['resultMap']['algorithmResult'])
+ return res
+
+ except Exception as e:
+
+ logging.info(f'意图识别报错:{e}')
+ sleep(1)
+ raise ValueError(f'意图识别报错 超过了最大重试次数RETRY_MAX_NUM:{RETRY_MAX_NUM}')
+ return None
+
+
+ # r = requests.post(url, json=body, headers=headers)
+
+ # logging.info( str((r.json() )) ) #12:00:37
+ # output_1 = (r.json())
+ # # logging.info('============意图识别的结果是================')
+ # # logging.info(f'意图识别结果是 {output_1}')
+ # # logging.info('============================================')
+ # res = json.loads(output_1['resultMap']['algorithmResult'])
+ # return res
@@ -85,17 +108,31 @@ def intention_recognition_querypatternfunc( query,
}
logging.info( body )
- r = requests.post(url, json=body, headers=headers)
- logging.info( str((r.json() )) ) #12:00:37
- output_1 = (r.json())
- # logging.info('============意图识别的结果是================')
- # logging.info(f'意图识别结果是 {output_1}')
- # logging.info('============================================')
- res = json.loads(output_1['resultMap']['algorithmResult'])
- if type(res) == 'str':
- res = json.loads(res)
- return res['output']
+ retry_num = 0
+ while retry_num <= RETRY_MAX_NUM:
+ retry_num = retry_num + 1
+ try:
+ r = requests.post(url, json=body, headers=headers)
+
+
+ logging.info( str((r.json() )) ) #12:00:37
+ output_1 = (r.json())
+ # logging.info('============意图识别的结果是================')
+ # logging.info(f'意图识别结果是 {output_1}')
+ # logging.info('============================================')
+ res = json.loads(output_1['resultMap']['algorithmResult'])
+ if type(res) == 'str':
+ res = json.loads(res)
+ return res['output']
+ except Exception as e:
+
+ logging.info(f'意图识别报错:{e}')
+ sleep(1)
+ raise ValueError(f'意图识别报错 超过了最大重试次数RETRY_MAX_NUM:{RETRY_MAX_NUM}')
+ return None
+
+
@@ -133,17 +170,30 @@ def intention_recognition_querytypefunc( query,
}
logging.info( body )
- r = requests.post(url, json=body, headers=headers)
-
- logging.info( str((r.json() )) ) #12:00:37
- output_1 = (r.json())
- # logging.info('============意图识别的结果是================')
- # logging.info(f'意图识别结果是 {output_1}')
- # logging.info('============================================')
- res = json.loads(output_1['resultMap']['algorithmResult'])
- if type(res) == 'str':
- res = json.loads(res)
- return res['output']
+
+ retry_num = 0
+ while retry_num <= RETRY_MAX_NUM:
+ retry_num = retry_num + 1
+ try:
+ r = requests.post(url, json=body, headers=headers)
+
+
+ logging.info( str((r.json() )) ) #12:00:37
+ output_1 = (r.json())
+ # logging.info('============意图识别的结果是================')
+ # logging.info(f'意图识别结果是 {output_1}')
+ # logging.info('============================================')
+ res = json.loads(output_1['resultMap']['algorithmResult'])
+ if type(res) == 'str':
+ res = json.loads(res)
+ return res['output']
+ except Exception as e:
+
+ logging.info(f'意图识别报错:{e}')
+ sleep(1)
+ raise ValueError(f'意图识别报错 超过了最大重试次数RETRY_MAX_NUM:{RETRY_MAX_NUM}')
+ return None
+
diff --git a/muagent/service/ekg_reasoning/src/memory_handler/ekg_memory_handler.py b/muagent/service/ekg_reasoning/src/memory_handler/ekg_memory_handler.py
index aa6ae05..8f5eb51 100644
--- a/muagent/service/ekg_reasoning/src/memory_handler/ekg_memory_handler.py
+++ b/muagent/service/ekg_reasoning/src/memory_handler/ekg_memory_handler.py
@@ -5,6 +5,8 @@
#路径增加
import sys
import os
+from typing import Union, Optional
+from typing import List, Dict, Optional, Union
src_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
@@ -213,7 +215,7 @@ def nodeid_in_subtree_get(self, currentNodeId, sessionId):
hashpostfix = '-nodeid_in_subtree'
memory_manager_res= self.memory_manager.get_memory_pool_by_all({
"chat_index": sessionId,
- # "message_index" : hash_id( currentNodeId, sessionId, hashpostfix),
+ "message_index" : hash_id( currentNodeId, sessionId, hashpostfix),
# "role_type": "nodeid_in_subtree",
})
get_messages_res = memory_manager_res.get_messages()
@@ -345,6 +347,15 @@ def tool_nodecount_add_chapter(self, sessionId, currentNodeId):
logging.info(f'节点{sessionId} 的 count_info现在是{count_info}')
self.nodecount_set( sessionId, currentNodeId,count_info) #重写count数据,
+ ####### 泛化推理相关 ####
+ def gr_save_prompt_main(self, sessionId:str, currentNodeId:str, prompt_main:Dict, )->Dict:
+ '''
+ 存储泛化推理的当前 prompt_main 到 memory中
+ gr_count: 当前交互到第几步了
+ '''
+ return react_current_history_get
+
+
def tool_nodedescription_save(self, sessionId, currentNodeId, role_content, user_input_memory_tag=None):
'''
将 tool 的 description 存入到memory中, 分成 chapter
@@ -432,14 +443,20 @@ def react_nodedescription_save(self, sessionId, currentNodeId, role_content):
self.memory_manager.append(message)
+
+ #
+
+
+
def react_memory_save(self, sessionId, currentNodeId, llm_res):
'''
将一个node中 llm的返回值 解析成json,存储在memory里。 每次覆盖式存储。 存主持人说的话、其他agent说的话; 同时修改count中的计数
+ 格式为 [player_name, agent_name, content, memory_tag, section]
llm_str = llm_result_truncation + '":[]}]'
llm_result_truncation = '[\n {"action": {"agent_name": "主持人"}, "Dungeon_Master": [{"content": "狼人时刻开始,以下玩家是狼人:player_1(狼人-1),李四(人类玩家)(狼人-2)。", "memory_tag":["agent_1", "人类agent"]}]},\n {"action": {"agent_name": "agent_1", "player_name":"player_1"}, "observation'
'''
-
+
#1. 解析 llm的输出
if type(llm_res) == str:
llm_res_json = json.loads(llm_res)
@@ -450,35 +467,67 @@ def react_memory_save(self, sessionId, currentNodeId, llm_res):
section = 0
memory_save_info_list = []
for i in range(len(llm_res_json)):
- if llm_res_json[i]['action'] == 'taskend':
- #任务执行完毕
- break
- agent_name = llm_res_json[i]['action']['agent_name']
- if agent_name == '主持人':
- player_name = '主持人'
- for j in range(len(llm_res_json[i]['Dungeon_Master'])):
- content = llm_res_json[i]['Dungeon_Master'][j]['content']
- memory_tag = llm_res_json[i]['Dungeon_Master'][j]['memory_tag']
- section += 1
- memory_save_info_list.append([player_name, agent_name, content, memory_tag, section])
- else:
- logging.info(f' llm_res_json[i]是{llm_res_json[i]}')
- if llm_res_json[i]['observation'] == []:
+ if 'action' in llm_res_json[i].keys():
+ #react 类型 或者 是 parallel中电脑玩家的返回值
+
+ if llm_res_json[i]['action'] == 'taskend':
+ #任务执行完毕
+ break
+ agent_name = llm_res_json[i]['action']['agent_name']
+ if agent_name == '主持人':
+ player_name = '主持人'
+ for j in range(len(llm_res_json[i]['Dungeon_Master'])):
+ content = llm_res_json[i]['Dungeon_Master'][j]['content']
+ memory_tag = llm_res_json[i]['Dungeon_Master'][j]['memory_tag']
+ section += 1
+ memory_save_info_list.append([player_name, agent_name, content, memory_tag, section])
+ else:
+ #logging.info(f' llm_res_json[i]是{llm_res_json[i]}')
+ if llm_res_json[i]['observation'] == []:
+ continue
+ player_name = llm_res_json[i]['action']['player_name']
+ for j in range(len(llm_res_json[i]['observation'])):
+ content = llm_res_json[i]['observation'][j]['content']
+ memory_tag = llm_res_json[i]['observation'][j]['memory_tag']
+ section += 1
+ memory_save_info_list.append([player_name, agent_name, content, memory_tag, section])
+
+ elif 'action_plan' in llm_res_json[i].keys():
+ #此时为 PlanningRunningAgentReply 格式, 可能为 plan 或者 parallel的第一次返回值
+
+ if self.gb_handler.get_tag(rootNodeId = currentNodeId, rootNodeType = 'opsgptkg_task', key = 'action') == 'plan':
+ #plan模式会略过 制定plan时说的话//现在制定plan时也不再说话了
+ #parallel模式则不会略过制定action_plan是说的话
continue
- player_name = llm_res_json[i]['action']['player_name']
- for j in range(len(llm_res_json[i]['observation'])):
- content = llm_res_json[i]['observation'][j]['content']
- memory_tag = llm_res_json[i]['observation'][j]['memory_tag']
- section += 1
- memory_save_info_list.append([player_name, agent_name, content, memory_tag, section])
+ else:
+ if llm_res_json[i]['action_plan'] == 'taskend':
+ #任务执行完毕
+ break
+ agent_name = '主持人' #只有主持人能生成plan
+ player_name = '主持人'
+
+
+ for j in range(len(llm_res_json[i]['Dungeon_Master'])):
+ content = llm_res_json[i]['Dungeon_Master'][j]['content']
+ memory_tag = llm_res_json[i]['Dungeon_Master'][j]['memory_tag']
+ section += 1
+ memory_save_info_list.append([player_name, agent_name, content, memory_tag, section])
+
+
- if 'taskend' not in json.dumps(llm_res, ensure_ascii=False) and memory_save_info_list[-1][1] != '主持人':
- #现在还没有run到最后。那么 #最后一个observation,这个是幻觉,主持人替其他玩家说的话, 不能存入到memory里
- #如果run到最后了, 那么最后一个observation是本次填充的
- memory_save_info_list = memory_save_info_list[0:-1]
+ logging.info(f'memory_save_info_list 初步 is {memory_save_info_list}')
+
+
+ if 'taskend' not in json.dumps(llm_res, ensure_ascii=False) and memory_save_info_list!= [] and memory_save_info_list[-1][1] != '主持人' and \
+ self.gb_handler.get_tag(rootNodeId = currentNodeId, rootNodeType = 'opsgptkg_task', key = 'action') == 'react':
+ #现在还没有run到最后。那么 #最后一个observation,这个是幻觉,主持人替其他玩家说的话, 不能存入到memory里
+ #如果run到最后了, 那么最后一个observation是本次填充的
+ #这个只在react模式下生效
+ memory_save_info_list = memory_save_info_list[0:-1]
+
- logging.info(f'memory_save_info_list is {memory_save_info_list}')
+ logging.info(f'memory_save_info_list 最终经过删减(假如是react,如果不是则不删减) is {memory_save_info_list}')
#2. 将llm的输出存入到memory中,更新,已经写入的就不更新了
hashpostfix = '_reactmemory'
@@ -495,8 +544,9 @@ def react_memory_save(self, sessionId, currentNodeId, llm_res):
for i in range(len(memory_save_info_list)):
player_name, agent_name, content, memory_tag, section = memory_save_info_list[i]
- hashpostfix_all = f'_chapter{chapter}_section{section}' + hashpostfix
-
+ hashpostfix_all = f'_chapter{chapter}_section{section}_agentName{agent_name}' + hashpostfix
+ # 原来react_memory_save 的问题在于, 根据 _chapter{chapter}_section{section} 进行编码的情况会覆盖其他 并行的agent的结果
+ # 另外,取结果时,由于三个agent都是同时发生的,所以原来根据时间取结果的方式也会出现问题,造成三个agent的返回值都是一个
memory_manager_res= self.memory_manager.get_memory_pool_by_all({
# "chat_index": sessionId,
@@ -505,7 +555,7 @@ def react_memory_save(self, sessionId, currentNodeId, llm_res):
})
get_messages_res = memory_manager_res.get_messages()
if get_messages_res != []:
- #这一条数据已经有了,不用重复写入,只更新没有写入的
+ #这一条数据已经有了,不用重复写入,只更新没有写入的, 这里根据hashpostfix_all 中的 section和chapter 来进行判断
continue
user_input_memory_tag = self.gb_handler.user_input_memory_tag(currentNodeId, 'opsgptkg_task')
@@ -532,6 +582,223 @@ def react_memory_save(self, sessionId, currentNodeId, llm_res):
role_content_nodeinfo['section'] = len(memory_save_info_list)
self.nodecount_set( sessionId, currentNodeId, role_content_nodeinfo)
+ def message_get(self, sessionId:str, nodeId:str, hashpostfix:str,
+ role_name:str,role_type:str )-> Union[str, list]:
+ '''
+ 得到信息, 作为一个通用函数。如果没有检索到信息,则返回空的'', 如果检索到了信息,则返回str,只返回第0个message的str
+ 注意对hashpostfix_chapter进行了特殊处理
+ '''
+ node_count_res = self.nodecount_get( sessionId, nodeId)
+ if node_count_res == []:
+ chapter = 1
+ else:
+ chapter = json.loads(node_count_res[0].role_content)['chapter']
+
+ #hashpostfix = '_plan'
+ hashpostfix_chapter = f'_chapter{chapter}'
+ hashpostfix_all = hashpostfix_chapter + hashpostfix
+ #print(nodeId, sessionId, hashpostfix_all)
+ memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ # "chat_index": sessionId,
+ "message_index" : hash_id(nodeId, sessionId, hashpostfix_all),
+ "role_name" : role_name,#'plan',
+ "role_type" : role_type#"DM"
+ })
+ get_messages_res = memory_manager_res.get_messages()
+ if len(get_messages_res) == 0:
+ #raise ValueError("执行memory查询没有找到")
+ logging.info( "执行memory查询没有找到" )
+ return ''
+
+ return get_messages_res[0].role_content
+
+ def message_save(self, sessionId:str, nodeId:str, role_content:str,
+ hashpostfix:str, user_name:str, role_name:str, role_type:str)->None:
+ '''
+ 将message 存下来,作为一个通用函数
+ 注意对hashpostfix_chapter进行了特殊处理
+
+ for example
+ #hashpostfix = '_plan'
+ #user_name = currentNodeId
+ #role_name = 'plan'
+ #role_type = "DM"
+
+ '''
+
+
+
+ node_count_res = self.nodecount_get( sessionId, nodeId)
+ if node_count_res == []:
+ chapter = 1
+ else:
+ chapter = json.loads(node_count_res[0].role_content)['chapter']
+
+ hashpostfix_chapter = f'_chapter{chapter}'
+ hashpostfix_all = hashpostfix_chapter + hashpostfix
+ message = Message(
+ chat_index= sessionId,
+ message_index= hash_id(nodeId, sessionId, hashpostfix_all),
+ user_name = hash_id(user_name + str(chapter)),
+ role_name = role_name,
+ role_type = role_type,
+ role_content = role_content,
+
+ )
+
+ self.memory_manager.append(message)
+
+
+
+ def processed_agentname_get(self, sessionId:str, nodeId:str)-> str:
+ '''
+ 得到已经处理的agentname
+ '''
+
+
+ hashpostfix = '_processedAgentName'
+ role_name = 'PAN' #processedAgentName
+ role_type = "DM"
+
+ node_count_res = self.nodecount_get( sessionId, nodeId)
+ if node_count_res == []:
+ chapter = 1
+ else:
+ chapter = json.loads(node_count_res[0].role_content)['chapter']
+
+ #hashpostfix = '_plan'
+ #hashpostfix_chapter = f'_chapter{chapter}'
+ #hashpostfix_all = hashpostfix_chapter + hashpostfix
+ #print(nodeId, sessionId, hashpostfix_all)
+ memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ "chat_index": sessionId,
+ "user_name" : hash_id(nodeId + str(chapter)),
+ "role_name" : role_name,#'plan',
+ "role_type" : role_type#"DM"
+ })
+ get_messages_res = memory_manager_res.get_messages()
+ if len(get_messages_res) == 0:
+ #raise ValueError("执行memory查询没有找到")
+ logging.info( "执行memory查询没有找到" )
+ return ''
+
+ processed_agentname_list_all = []
+ for i in range(len(get_messages_res)):
+ processed_agentname_list_all.append( get_messages_res[i].role_content)
+
+
+ return json.dumps(processed_agentname_list_all, ensure_ascii=False)
+
+
+
+ def processed_agentname_save(self, sessionId:str, nodeId:str, agentName:str)->None:
+ '''
+ 将这次执行完的(lingsi返回的)agentname,存下来,
+ 注意,同名的agentname 会被覆盖
+ '''
+ # processed_agentname_get_str = self.processed_agentname_get(sessionId, nodeId)
+ # logging.info(f'已有的processed_agentname_get_str is {processed_agentname_get_str}, 当前要存入的agentName is {agentName}')
+ # if processed_agentname_get_str == '': #第一次有工具返回时,processed_agentname_get_str 值为’‘
+ # processed_agentname_list = []
+ # else:
+ # processed_agentname_list = json.loads( processed_agentname_get_str )
+
+ # processed_agentname_list.append(agentName)
+ # processed_agentname_list_str = json.dumps(processed_agentname_list, ensure_ascii=False)
+
+ processed_agentname_list = agentName
+ hashpostfix = f'_processedAgentName_{agentName}'
+ user_name = nodeId
+ role_name = 'PAN'
+ role_type = "DM"
+
+ return self.message_save( sessionId, nodeId, processed_agentname_list,
+ hashpostfix, user_name, role_name, role_type)
+
+
+
+ def current_plan_save(self, sessionId:str, currentNodeId:str, role_content:str)->None:
+ '''
+ 将current_plan 存下来
+
+ Parameters
+ ----------
+ sessionId : str
+ DESCRIPTION.
+ currentNodeId : str
+ DESCRIPTION.
+ role_content : str
+ DESCRIPTION.
+
+ Returns
+ -------
+ None
+ DESCRIPTION.
+
+ '''
+
+ hashpostfix = '_plan'
+ user_name = currentNodeId
+ role_name = 'plan'
+ role_type = "DM"
+
+ node_count_res = self.nodecount_get( sessionId, currentNodeId)
+ if node_count_res == []:
+ chapter = 1
+ else:
+ chapter = json.loads(node_count_res[0].role_content)['chapter']
+
+ hashpostfix_all = f'_chapter{chapter}' + hashpostfix
+ message = Message(
+ chat_index= sessionId,
+ message_index= hash_id(currentNodeId, sessionId, hashpostfix_all),
+ user_name = hash_id(user_name),
+ role_name = role_name,
+ role_type = role_type,
+ role_content = role_content,
+
+ )
+
+ self.memory_manager.append(message)
+
+ def current_plan_get(self, sessionId:str, currentNodeId:str)-> str:
+ '''
+
+ 得到当前的current_plan
+ Parameters
+ ----------
+ sessionId : str
+ DESCRIPTION.
+ currentNodeId : str
+ DESCRIPTION.
+
+ Returns
+ -------
+ str
+ DESCRIPTION.
+
+ '''
+ node_count_res = self.nodecount_get( sessionId, currentNodeId)
+ if node_count_res == []:
+ chapter = 1
+ else:
+ chapter = json.loads(node_count_res[0].role_content)['chapter']
+
+ hashpostfix = '_plan'
+ hashpostfix_all = f'_chapter{chapter}' + hashpostfix
+ print(currentNodeId, sessionId, hashpostfix_all)
+ memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ # "chat_index": sessionId,
+ "message_index" : hash_id(currentNodeId, sessionId, hashpostfix_all),
+ "role_name" : 'plan',
+ "role_type" : "DM"
+ })
+ get_messages_res = memory_manager_res.get_messages()
+ return get_messages_res[0].role_content
+
+
+
+
def react_current_history_save(self, sessionId, currentNodeId, role_content):
hashpostfix = '_his'
user_name = currentNodeId
@@ -557,7 +824,7 @@ def react_current_history_save(self, sessionId, currentNodeId, role_content):
self.memory_manager.append(message)
- def react_current_history_get(self, sessionId, currentNodeId):
+ def react_current_history_get(self, sessionId:str, currentNodeId:str)->str:
'''
得到当前的current_history
'''
@@ -569,7 +836,7 @@ def react_current_history_get(self, sessionId, currentNodeId):
hashpostfix = '_his'
hashpostfix_all = f'_chapter{chapter}' + hashpostfix
- print(currentNodeId, sessionId, hashpostfix_all)
+ #print(currentNodeId, sessionId, hashpostfix_all)
memory_manager_res= self.memory_manager.get_memory_pool_by_all({
# "chat_index": sessionId,
"message_index" : hash_id(currentNodeId, sessionId, hashpostfix_all),
@@ -634,19 +901,63 @@ def tool_observation_save(self, sessionId, currentNodeId, tool_information, user
#logging.info(f'sessionId {sessionId} start_nodeid {start_nodeid} 的 memory 是 {memory}')
- def get_output(self,sessionId, start_datetime, end_datetime):
+ def get_output(self,sessionId, start_datetime, end_datetime, agent_name):
+
+ #首先提取主持人可能的返回
+ get_messages_res_all = []
memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ "role_name" : '主持人',
"chat_index": sessionId,
'role_tags': ['all', '人类'],
"start_datetime":[start_datetime, end_datetime],
})
get_messages_res = memory_manager_res.get_messages()
- if get_messages_res == []:
+ get_messages_res_all = get_messages_res_all + get_messages_res
+
+ #再提取 标题 user可能的返回
+ get_messages_res = []
+ memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ "role_name" : 'user',
+ "chat_index": sessionId,
+ 'role_tags': ['all', '人类'],
+ "start_datetime":[start_datetime, end_datetime],
+ })
+ get_messages_res = memory_manager_res.get_messages()
+ get_messages_res_all = get_messages_res_all + get_messages_res
+
+
+ #再提取 agent 可能的返回
+ if agent_name != None and type(agent_name) == str:
+ get_messages_res = []
+ memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ "role_name" : agent_name,
+ "chat_index": sessionId,
+ 'role_tags': ['all', '人类'],
+ "start_datetime":[start_datetime, end_datetime],
+ })
+ get_messages_res = memory_manager_res.get_messages()
+ get_messages_res_all = get_messages_res_all + get_messages_res
+
+
+ #再提取 tool 可能的返回
+ get_messages_res = []
+ memory_manager_res= self.memory_manager.get_memory_pool_by_all({
+ "role_name" : 'function_caller',
+ "chat_index": sessionId,
+ 'role_tags': ['all', '人类'],
+ "start_datetime":[start_datetime, end_datetime],
+ })
+ get_messages_res = memory_manager_res.get_messages()
+ get_messages_res_all = get_messages_res_all + get_messages_res
+
+ get_messages_res_all = sorted(get_messages_res_all, key=lambda msg: msg.start_datetime)#按照时间进行排序
+
+ if get_messages_res_all == []:
logging.info('本阶段没有能给用户看的信息')
return None
outputinfo = []
- for i in range(len(get_messages_res)):
- outputinfo.append( get_messages_res[i].role_content )
+ for i in range(len(get_messages_res_all)):
+ outputinfo.append( get_messages_res_all[i].role_content )
logging.info(f'outputinfo is {outputinfo}')
# outputinfo_str = json.dumps(outputinfo, ensure_ascii=False)
diff --git a/muagent/service/ekg_reasoning/src/question_answer/qa_function.py b/muagent/service/ekg_reasoning/src/question_answer/qa_function.py
index 78d0d7c..8ebcf32 100644
--- a/muagent/service/ekg_reasoning/src/question_answer/qa_function.py
+++ b/muagent/service/ekg_reasoning/src/question_answer/qa_function.py
@@ -65,10 +65,9 @@ def full_link_summary(self):
resstr = self.geabase_nodediffusion_qa()
print(resstr)
print(f'full_link_summary prompt的长度为 {len(resstr)}')
- resstr_llm_summary = call_llm(input_content = resstr, llm_model = 'Qwen2_72B_Instruct_OpsGPT',llm_config=self.llm_config)
-
- visualization_url = self.get_visualization_url()
+ resstr_llm_summary = call_llm(input_content = resstr, llm_model = None,llm_config=self.llm_config)
+ #visualization_url = self.get_visualization_url()
return resstr_llm_summary
@@ -96,7 +95,7 @@ def next_step_summary(self):
print(prompt)
print(f'prompt的长度为 {len(prompt)}')
- res = call_llm(input_content = prompt, llm_model = 'Qwen2_72B_Instruct_OpsGPT',llm_config=self.llm_config)
+ res = call_llm(input_content = prompt, llm_model = None, llm_config=self.llm_config)
return res
@@ -122,7 +121,7 @@ def get_referred_node(self):
'''
print(prompt)
print(f'prompt的长度为 {len(prompt)}')
- res = call_llm(input_content = prompt, llm_model = 'Qwen2_72B_Instruct_OpsGPT',llm_config=self.llm_config)
+ res = call_llm(input_content = prompt, llm_model = None,llm_config=self.llm_config)
# res = robust_call_llm(prompt)
return res
@@ -141,137 +140,153 @@ def geabase_nodediffusion_qa(self):
'''
- #resstr = '你是一个计划总结大师,可以用比较通俗易懂的语言总结一个计划。 以下计划由一个知识图谱表示。请将其总结为自然语言的方式 \n'
-
+ resstr = '你是一个计划总结大师,可以用比较通俗易懂的语言总结一个计划。 以下计划由一个知识图谱表示。请将其总结为自然语言的方式 \n'
- # #1.假设当前节点已经运行完,得到后面的tool. 如果为事实节点,则需要采用大模型进行判断
- # tool_plan = []
- # nodeid_in_search = [{'nodeId':self.start_nodeid, 'nodeType':self.start_nodetype}]
- # while len(nodeid_in_search)!= 0:
- # nodedict_now = nodeid_in_search.pop()
- # nodeid_now = nodedict_now['nodeId']
- # nodetype_now = nodedict_now['nodeType']
+ #1.假设当前节点已经运行完,得到后面的tool. 如果为事实节点,则需要采用大模型进行判断
+ tool_plan = []
+ nodeid_in_search = [{'nodeId':self.start_nodeid, 'nodeType':self.start_nodetype}]
+ nodeid_in_search_all = []
- # if nodetype_now == 'opsgptkg_analysis':
- # if self.gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = nodeid_now, rootNodeType = nodetype_now, key = 'accesscriteria') == 'or':
- # #当链接到node_result_1节点的node_phenomena都满足时,才激活node_result_1
- # resstr += f'当链接到{nodeid_now}节点的node_phenomena只要满足一个时,就能激活{nodeid_now}'
- # else:
- # resstr += f'当链接到{nodeid_now}节点的node_phenomena都满足时,才能激活{nodeid_now}'
+ while len(nodeid_in_search)!= 0:
+ nodedict_now = nodeid_in_search.pop()
+ nodeid_now = nodedict_now['nodeId']
+ nodetype_now = nodedict_now['nodeType']
- # #nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
- # if nodetype_now == 'opsgptkg_phenomenon':
- # '''
- # node_phenomena_1 与 结论 node_result_1 相连,node_result_1的内容为 热点账户问题,无需应急。
- # node_phenomena_1 与 结论 node_result_4 相连,node_result_4的内容为 请排查其他原因
- # '''
- # neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
- # next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
- # for i in range(len(neighbor_node_id_list)):
- # resstr += f'{nodeid_now}节点 与 {neighbor_node_id_list[i]} 相连,节点{neighbor_node_id_list[i]}的内容为: {next_node_description_list[i]}'
- # # print('==================')
- # # print(f'nodeid_in_search is {nodeid_in_search}')
- # # print('=================')
+ if nodetype_now == 'opsgptkg_analysis':
+ if self.gb_handler.geabaseGetOnlyOneNodeInfoWithKey( rootNodeId = nodeid_now, rootNodeType = nodetype_now, key = 'accesscriteria') == 'or':
+ #当链接到node_result_1节点的node_phenomena都满足时,才激活node_result_1
+ # nodeid_in_search_all.append(nodeid_now)
+ resstr += f'当链接到{nodeid_now}节点的node_phenomena只要满足一个时,就能激活{nodeid_now}'
+
+ else:
+ # nodeid_in_search_all.append(nodeid_now)
+ resstr += f'当链接到{nodeid_now}节点的node_phenomena都满足时,才能激活{nodeid_now}'
+
+ #nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ if nodetype_now == 'opsgptkg_phenomenon':
+ '''
+ node_phenomena_1 与 结论 node_result_1 相连,node_result_1的内容为 热点账户问题,无需应急。
+ node_phenomena_1 与 结论 node_result_4 相连,node_result_4的内容为 请排查其他原因
+ '''
+ neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
+ next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
+ for i in range(len(neighbor_node_id_list)):
+ resstr += f'{nodeid_now}节点 与 {neighbor_node_id_list[i]} 相连,节点{neighbor_node_id_list[i]}的内容为: {next_node_description_list[i]}'
+ # print('==================')
+ # print(f'nodeid_in_search is {nodeid_in_search}')
+ # print('=================')
- # if self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opsgptkg_task') == True:
- # # 后续节点都是task节点,
- # neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
- # next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
-
- # if nodetype_now == 'opsgptkg_schedule': #操作计划节点后第一次出现任务节点
- # if len(neighbor_node_id_list) >= 2:
- # resstr += f'并行执行{len(neighbor_node_id_list)}个任务:\n'
- # else:
- # resstr += f'执行{len(neighbor_node_id_list)}个任务:\n '
- # for i in range(len(neighbor_node_id_list)):
- # resstr += f' {neighbor_node_id_list[i]} 的内容是 : {next_node_description_list[i]} \n'
- # else:
- # if len(neighbor_node_id_list) >= 2:
- # resstr += f'{nodeid_now}后面是并行执行{len(neighbor_node_id_list)}个任务:\n'
- # else:
- # resstr += f'{nodeid_now}后面是执行{len(neighbor_node_id_list)}个任务:\n '
+ if self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opsgptkg_task') == True:
+ # 后续节点都是task节点,
+ neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
+ next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
+
+ if nodetype_now == 'opsgptkg_schedule': #操作计划节点后第一次出现任务节点
+ if len(neighbor_node_id_list) >= 2:
+ resstr += f'并行执行{len(neighbor_node_id_list)}个任务:\n'
+ else:
+ resstr += f'执行{len(neighbor_node_id_list)}个任务:\n '
+ for i in range(len(neighbor_node_id_list)):
+ resstr += f' {neighbor_node_id_list[i]} 的内容是 : {next_node_description_list[i]} \n'
+ else:
+ if len(neighbor_node_id_list) >= 2:
+ resstr += f'{nodeid_now}后面是并行执行{len(neighbor_node_id_list)}个任务:\n'
+ else:
+ resstr += f'{nodeid_now}后面是执行{len(neighbor_node_id_list)}个任务:\n '
- # for i in range(len(neighbor_node_id_list)):
- # resstr += f'{nodeid_now}后面是{neighbor_node_id_list[i]}, {neighbor_node_id_list[i]}的内容是:{next_node_description_list[i]} \n'
+ for i in range(len(neighbor_node_id_list)):
+ resstr += f'{nodeid_now}后面是{neighbor_node_id_list[i]}, {neighbor_node_id_list[i]}的内容是:{next_node_description_list[i]} \n'
- # for i in range(len(neighbor_node_id_list)): #往后扩展
- # nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_task'})
+ for i in range(len(neighbor_node_id_list)): #往后扩展
+ if neighbor_node_id_list[i] not in nodeid_in_search_all:
+ nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_task'})
+ nodeid_in_search_all.append(neighbor_node_id_list[i])
- # elif self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opsgptkg_phenomenon') == True:
- # #后续所有节点都是判断节点, 则进行大模型判断,选中其中一个 phenomenon 进入后续,同时在phenomenons节点上记录memory
- # neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
- # next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
+ elif self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opsgptkg_phenomenon') == True:
+ #后续所有节点都是判断节点, 则进行大模型判断,选中其中一个 phenomenon 进入后续,同时在phenomenons节点上记录memory
+ neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
+ next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
- # resstr += f'{nodeid_now}后面有{len(neighbor_node_id_list)}种可能 '
- # for i in range(len(neighbor_node_id_list)): #往后扩展
- # resstr += f'{neighbor_node_id_list[i]}:{next_node_description_list[i]}; '
- # resstr += '\n'
+ resstr += f'{nodeid_now}后面有{len(neighbor_node_id_list)}种可能 '
+ for i in range(len(neighbor_node_id_list)): #往后扩展
+ resstr += f'{neighbor_node_id_list[i]}:{next_node_description_list[i]}; '
+ resstr += '\n'
- # for i in range(len(neighbor_node_id_list)): #往后扩展
- # nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_phenomenon'})
+ for i in range(len(neighbor_node_id_list)): #往后扩展
+ if neighbor_node_id_list[i] not in nodeid_in_search_all:
+ nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_phenomenon'})
+ nodeid_in_search_all.append(neighbor_node_id_list[i])
- # elif self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opsgptkg_schedule') == True:
- # #后面都是操作计划节点,假设只有一个操作计划节点
- # neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
- # next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
- # # neighbor_node_id_list = get_nodeid_from_res(res)
- # # next_node_description_list = get_nodedescription_from_res(res)
- # for i in range(len(neighbor_node_id_list)):
- # resstr = resstr + '操作计划名:' + self.gb_handler.geabase_getDescription(neighbor_node_id_list[i], 'opsgptkg_schedule') + '\n' + '最开始,'
- # nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_schedule'})
+ elif self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opsgptkg_schedule') == True:
+ #后面都是操作计划节点,假设只有一个操作计划节点
+ neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
+ next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
+ # neighbor_node_id_list = get_nodeid_from_res(res)
+ # next_node_description_list = get_nodedescription_from_res(res)
+ for i in range(len(neighbor_node_id_list)):
+ resstr = resstr + '操作计划名:' + self.gb_handler.geabase_getDescription(neighbor_node_id_list[i], 'opsgptkg_schedule') + '\n' + '最开始,'
+ if neighbor_node_id_list[i] not in nodeid_in_search_all:
+ nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_schedule'})
+ nodeid_in_search_all.append(neighbor_node_id_list[i])
- # elif self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opstpgkg_analysis') == True:
- # neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
- # next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
+ elif self.gb_handler.all_nodetype_check(nodeid_now, nodetype_now, 'opstpgkg_analysis') == True:
+ neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
+ next_node_description_list = self.gb_handler.get_children_description( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
- # # neighbor_node_id_list = get_nodeid_from_res(res)
- # # next_node_description_list = get_nodedescription_from_res(res)
- # for i in range(len(neighbor_node_id_list)):
+ # neighbor_node_id_list = get_nodeid_from_res(res)
+ # next_node_description_list = get_nodedescription_from_res(res)
+ for i in range(len(neighbor_node_id_list)):
- # resstr += f'{nodeid_now} 与 {neighbor_node_id_list[i]} 相连,{neighbor_node_id_list[i]}的内容为 {next_node_description_list[i]} '
- # nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_schedule'})
+ resstr += f'{nodeid_now} 与 {neighbor_node_id_list[i]} 相连,{neighbor_node_id_list[i]}的内容为 {next_node_description_list[i]} '
+ if neighbor_node_id_list[i] not in nodeid_in_search_all:
+ nodeid_in_search.append({'nodeId':neighbor_node_id_list[i], 'nodeType':'opsgptkg_schedule'})
+ nodeid_in_search_all.append(neighbor_node_id_list[i])
- # else:
- # neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
- # neighbor_node_type_list = self.gb_handler.get_children_type( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
+ else:
+ neighbor_node_id_list = self.gb_handler.get_children_id( nodeid_now, nodetype_now) #get_nodeid_from_res(res)
+ neighbor_node_type_list = self.gb_handler.get_children_type( nodeid_now, nodetype_now) #get_nodedescription_from_res(res)
- # for i in range(len(neighbor_node_id_list) ):
+ for i in range(len(neighbor_node_id_list) ):
- # nodeid_new = neighbor_node_id_list[i]
- # nodetype_new = neighbor_node_type_list[i]
+ nodeid_new = neighbor_node_id_list[i]
+ nodetype_new = neighbor_node_type_list[i]
- # #只要后续有节点就继续扩展
- # nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ #只要后续有节点就继续扩展
+ if nodeid_new not in nodeid_in_search_all:
+ nodeid_in_search.append({'nodeId':nodeid_new, 'nodeType':nodetype_new})
+ nodeid_in_search_all.append( nodeid_new )
- full_graph_info = self.geabase_handler.get_hop_infos(attributes={"id": self.start_nodeid,}, node_type="opsgptkg_intent", hop = 15 )
- full_graph_info_str = str(full_graph_info.nodes) + '\n' + str(full_graph_info.edges) + '\n'
- resstr = \
- f'''
- 你是一个计划总结大师,可以用比较通俗易懂的语言总结一个计划。
- 计划由知识图谱的形式表示,即full_graph_info。
+ # full_graph_info = self.geabase_handler.get_hop_infos(attributes={"id": self.start_nodeid,}, node_type="opsgptkg_intent", hop = 15 )
+ # full_graph_info_str = str(full_graph_info.nodes) + '\n' + str(full_graph_info.edges) + '\n'
+ # resstr = \
+ # f'''
+ # 你是一个计划总结大师,可以用比较通俗易懂的语言总结一个计划。
+ # 计划由知识图谱的形式表示,即full_graph_info。
- ##注意##
- full_graph_info 是一个具体的流程信息,包含了节点和边的信息
+ # ##注意##
+ # full_graph_info 是一个具体的流程信息,包含了节点和边的信息
- full_graph_info:{full_graph_info_str}
+ # full_graph_info:{full_graph_info_str}
- '''
+ # '''
+
+ # resstr += ' \n \n 请总结上述排查计划,注意有些情况下可以概况总结处理.注意,请直接输出总结结果,不要输出其他的话:'
+
- resstr += ' \n \n 请总结上述排查计划,注意有些情况下可以概况总结处理.注意,请直接输出总结结果,不要输出其他的话:'
return resstr
diff --git a/muagent/service/ekg_reasoning/src/utils/__init__.py b/muagent/service/ekg_reasoning/src/utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/muagent/service/ekg_reasoning/src/utils/call_llm.py b/muagent/service/ekg_reasoning/src/utils/call_llm.py
index c32ac0e..3b8c4cf 100644
--- a/muagent/service/ekg_reasoning/src/utils/call_llm.py
+++ b/muagent/service/ekg_reasoning/src/utils/call_llm.py
@@ -21,11 +21,9 @@
from colorama import Fore
from Crypto.Cipher import AES
from loguru import logger
+import logging
+
-try:
- from call_antgroup_llm import call_antgroup_llm
-except:
- logger.warning("it's ok")
MOST_RETRY_TIMES = 5
@@ -37,6 +35,11 @@
sys.path.append(src_dir)
sys.path.append(src_dir + '/examples/')
print(src_dir)
+try:
+ from muagent.service.ekg_reasoning.src.utils.call_antgroup_llm import call_antgroup_llm
+except:
+ logger.warning("加载 call_antgroup_llm 失败")
+
from muagent.llm_models.llm_config import LLMConfig
from muagent.llm_models import getChatModelFromConfig
@@ -44,8 +47,8 @@
def call_llm(
input_content = '中国的首都是哪儿',
- llm_model = 'qwen_chat_14b',
- stop = None,
+ llm_model = None,
+ stop = [],
temperature = 0.1,
presence_penalty=0,
llm_config=None
@@ -53,7 +56,7 @@ def call_llm(
if os.environ['operation_mode'] == 'open_source': # 'open_source' or 'antcode'
- #开源环境,call_llm 依靠用户的配置
+ logging.info('开源环境,call_llm 依靠用户的配置')
try:
src_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -85,14 +88,35 @@ def call_llm(
#logger.error(f"{e}")
- print(f'os.environ["model_name"] is {os.environ["model_name"]}, llm_model is {llm_model}')
+ logger.info(f'os.environ["model_name"] is {os.environ["model_name"]}, llm_model is {llm_model}, llm_config is {llm_config}')
- if llm_config is None or llm_model == 'gpt-4' or llm_model == 'gpt_4':
+ if ( llm_model == 'gpt-4' or llm_model == 'gpt_4'):
logger.info("强制调用gpt-4 的配置")
llm_config = LLMConfig(
- model_name=model_name, model_engine=model_engine, api_key=api_key, api_base_url=api_base_url,
+ model_name=model_name, model_engine=model_engine,
+ api_key=api_key, api_base_url=api_base_url,
temperature=llm_temperature)
+ elif llm_model == None:
+ logger.info("llm_config 未输入, 强制调用默认大模型配置")
+ llm_config = LLMConfig(
+ model_name =os.environ["model_name"],
+ model_engine =os.environ["model_engine"],
+ api_key =os.environ["OPENAI_API_KEY"],
+ api_base_url =os.environ["API_BASE_URL"],
+ temperature =os.environ["llm_temperature"] )
+ # elif ( llm_model == 'qwen-72B' or llm_model == 'Qwen2_72B_Instruct_OpsGPT'):
+ # logger.info("强制调用 Qwen2_72B_Instruct_OpsGPT 的配置")
+ # llm_config = LLMConfig(
+ # model_name =os.environ["qwen-model_name"],
+ # model_engine =os.environ["qwen-model_engine"],
+ # api_key =os.environ["qwen-OPENAI_API_KEY"],
+ # api_base_url =os.environ["qwen-API_BASE_URL"],
+ # temperature =os.environ["qwen-llm_temperature"] )
+ else:
+ logger.info("使用默认 llm_config 的配置")
+
+ logger.info(f'llm_config is {llm_config}')
llm_model = getChatModelFromConfig(llm_config) if llm_config else None
@@ -139,9 +163,9 @@ def extract_final_result(input_string, special_str = "最终结果为:" ):
#return jiequ_str
-def robust_call_llm(prompt_temp, llm_model = 'useless', stop = None, temperature = 0, presence_penalty = 0):
+def robust_call_llm(prompt_temp, llm_model = None, stop = None, temperature = 0, presence_penalty = 0):
if os.environ['operation_mode'] != 'antcode':
- res = call_llm(input_content = prompt_temp, llm_model = 'gpt_4', stop = stop,temperature=temperature, presence_penalty=presence_penalty)
+ res = call_llm(input_content = prompt_temp, llm_model = llm_model , stop = stop,temperature=temperature, presence_penalty=presence_penalty)
return res
else:
try:
@@ -155,5 +179,5 @@ def robust_call_llm(prompt_temp, llm_model = 'useless', stop = None, temperature
if __name__ == '__main__':
import test_config
- res = call_llm("你的名字是什么?")
+ res = call_llm("你的名字是什么?", llm_model = 'Qwen2_72B_Instruct_OpsGPT')
print(res)
diff --git a/requirements.txt b/requirements.txt
index 84f0432..115cf6a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -29,4 +29,5 @@ urllib3==1.26.6
sseclient
ollama
colorama
-pycryptodome
\ No newline at end of file
+pycryptodome
+dashscope
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/config/application-default.properties b/runtime/bootstrap/src/main/resources/config/application-default.properties
index 77805b9..cf8ef39 100644
--- a/runtime/bootstrap/src/main/resources/config/application-default.properties
+++ b/runtime/bootstrap/src/main/resources/config/application-default.properties
@@ -14,4 +14,7 @@ ekg.chat.url=http://ekgservice:3737/ekg/graph/ekg_migration_reasoning
ekg.candidate.tools=undercover.dispatch_keyword,undercover.dispatch_position,\
undercover.judge,undercover.lijing,undercover.summary,undercover.wangpeng,\
- undercover.zhangwei,undercover.show_key_information
\ No newline at end of file
+ undercover.zhangwei,undercover.show_key_information,\
+ werewolf.dispatch_keyword,werewolf.dispatch_position,werewolf.hangang,\
+ werewolf.hezixuan,werewolf.judge,werewolf.liangjun,werewolf.shenqiang,\
+ werewolf.zhoujie,werewolf.zhouxinyi,werewolf.zhuli,werewolf.summary
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/log4j2-spring.xml b/runtime/bootstrap/src/main/resources/log4j2-spring.xml
index 3e56422..66b92d9 100644
--- a/runtime/bootstrap/src/main/resources/log4j2-spring.xml
+++ b/runtime/bootstrap/src/main/resources/log4j2-spring.xml
@@ -2,18 +2,18 @@
-
+
-
+
@@ -24,7 +24,7 @@
filePattern="${spring:logging.path}/${spring:spring.application.name}/app-default.log.%d{yyyy-MM-dd}"
append="true">
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/hangang.png b/runtime/bootstrap/src/main/resources/static/avatar/hangang.png
new file mode 100644
index 0000000..43b7273
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/hangang.png differ
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/hezixuan.png b/runtime/bootstrap/src/main/resources/static/avatar/hezixuan.png
new file mode 100644
index 0000000..2bbadc2
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/hezixuan.png differ
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/liangjun.png b/runtime/bootstrap/src/main/resources/static/avatar/liangjun.png
new file mode 100644
index 0000000..9d074f7
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/liangjun.png differ
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/shenqiang.png b/runtime/bootstrap/src/main/resources/static/avatar/shenqiang.png
new file mode 100644
index 0000000..297eb4e
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/shenqiang.png differ
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/zhoujie.png b/runtime/bootstrap/src/main/resources/static/avatar/zhoujie.png
new file mode 100644
index 0000000..befff2b
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/zhoujie.png differ
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/zhouxinyi.png b/runtime/bootstrap/src/main/resources/static/avatar/zhouxinyi.png
new file mode 100644
index 0000000..8612dd2
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/zhouxinyi.png differ
diff --git a/runtime/bootstrap/src/main/resources/static/avatar/zhuli.png b/runtime/bootstrap/src/main/resources/static/avatar/zhuli.png
new file mode 100644
index 0000000..e9f7b79
Binary files /dev/null and b/runtime/bootstrap/src/main/resources/static/avatar/zhuli.png differ
diff --git a/runtime/bootstrap/src/main/resources/tools/system.llm_query.json b/runtime/bootstrap/src/main/resources/tools/system.llm_query.json
new file mode 100644
index 0000000..1b02a1d
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/system.llm_query.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "system.llm_query",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "llm_query",
+ "toolName": "llm_query",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n\n Content content = new Content()\n content.text = request\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/system.select_tool.json b/runtime/bootstrap/src/main/resources/tools/system.select_tool.json
index 07eddc4..7e7e5a2 100644
--- a/runtime/bootstrap/src/main/resources/tools/system.select_tool.json
+++ b/runtime/bootstrap/src/main/resources/tools/system.select_tool.json
@@ -25,7 +25,7 @@
"operatorCreate": "169704",
"operatorModified": "169704",
"version": "3",
- "owner": "你现在是一个插件选择助手,需要理解问题描述,然后从以下给出的插件中选择一个可以解决问题描述的插件\n\n##插件列表:\n###插件名称:system.select_tool\n插件描述:select_tool\n###插件名称:undercover.zhangwei\n插件描述:agent_张伟\n###插件名称:ekg-query\n插件描述:ekg-query\n\n##约束条件:\n-你必须按照以下JSON格式返回结果,并且不要给出问题分析的过程,{\"toolKey\":\"插件名称\"}\n-请尽可能从插件列表中选择合适的插件\n-如果插件列表中没有与问题描述匹配的插件请返回\"null\"\n-如果是模型不能回答的问题请返回\"null\"\n\n##问题描述:\n选择某个tool回答问题,今天是几号",
+ "owner": "",
"deleted": null,
"type": "OPEN",
"status": "PROD_PUBLISHED",
diff --git a/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_keyword.json b/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_keyword.json
index 33ad59f..52c1b8c 100644
--- a/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_keyword.json
+++ b/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_keyword.json
@@ -5,8 +5,8 @@
"toolKey": "undercover.dispatch_keyword",
"toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
"toolId": null,
- "description": "角色分配和单词分配",
- "toolName": "角色分配和单词分配",
+ "description": "谁是卧底角色分配和单词分配",
+ "toolName": "谁是卧底角色分配和单词分配",
"pauseStatus": "EXECUTING",
"transparentInfo": null,
"intention": null,
diff --git a/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_position.json b/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_position.json
index 4ee8cd0..5a41884 100644
--- a/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_position.json
+++ b/runtime/bootstrap/src/main/resources/tools/undercover.dispatch_position.json
@@ -5,8 +5,8 @@
"toolKey": "undercover.dispatch_position",
"toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
"toolId": null,
- "description": "分配座位",
- "toolName": "分配座位",
+ "description": "谁是卧底分配座位",
+ "toolName": "谁是卧底分配座位",
"pauseStatus": "EXECUTING",
"transparentInfo": null,
"intention": null,
diff --git a/runtime/bootstrap/src/main/resources/tools/undercover.judge.json b/runtime/bootstrap/src/main/resources/tools/undercover.judge.json
index 156c13f..51ed0ff 100644
--- a/runtime/bootstrap/src/main/resources/tools/undercover.judge.json
+++ b/runtime/bootstrap/src/main/resources/tools/undercover.judge.json
@@ -5,8 +5,8 @@
"toolKey": "undercover.judge",
"toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
"toolId": null,
- "description": "判断游戏是否结束",
- "toolName": "判断游戏是否结束",
+ "description": "谁是卧底判断游戏是否结束",
+ "toolName": "谁是卧底判断游戏是否结束",
"pauseStatus": "EXECUTING",
"transparentInfo": null,
"intention": null,
diff --git a/runtime/bootstrap/src/main/resources/tools/undercover.summary.json b/runtime/bootstrap/src/main/resources/tools/undercover.summary.json
index e7c5491..c71a562 100644
--- a/runtime/bootstrap/src/main/resources/tools/undercover.summary.json
+++ b/runtime/bootstrap/src/main/resources/tools/undercover.summary.json
@@ -5,8 +5,8 @@
"toolKey": "undercover.summary",
"toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
"toolId": null,
- "description": "给出每个人的单词以及最终胜利者",
- "toolName": "给出每个人的单词以及最终胜利者",
+ "description": "谁是卧底给出每个人的单词以及最终胜利者",
+ "toolName": "谁是卧底给出每个人的单词以及最终胜利者",
"pauseStatus": "EXECUTING",
"transparentInfo": null,
"intention": null,
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.dispatch_keyword.json b/runtime/bootstrap/src/main/resources/tools/werewolf.dispatch_keyword.json
new file mode 100644
index 0000000..ef3d76f
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.dispatch_keyword.json
@@ -0,0 +1,50 @@
+
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.dispatch_keyword",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀角色分配和单词分配",
+ "toolName": "狼人杀角色分配和单词分配",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef randRole() {\n def player = [[\"朱丽\", \"agent_朱丽\"], [ \"周杰\", \"agent_周杰\"], [\"沈强\", \"agent_沈强\"], [ \"韩刚\", \"agent_韩刚\"], [\"梁军\", \"agent_梁军\"], [ \"周欣怡\", \"agent_周欣怡\"], [\"贺子轩 \", \"agent_贺子轩\"], [\"人类玩家\", \"agent_人类玩家\"]]\n player.shuffle()\n def role = Arrays.asList(\"平民_1\", \"平民_2\", \"平民_3\", \"狼人_1\", \"狼人_2\", \"狼人_3\", \"女巫\", \"预言家\")\n role.shuffle()\n return (0..7).collect { i ->\n {\n def r = new HashMap()\n r.put(\"player_name\", player.get(i).get(0))\n r.put(\"agent_name\", player.get(i).get(1))\n r.put(\"agent_description\", role.get(i))\n return r\n }\n }\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n def roles = randRole()\n return gson.toJson(roles)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n \n int successCode;\n \n String errorMessage;\n \n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n \n if (lr.getSuccessCode() == 0) {\n return lr.getAnswer()\n }\n \n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "GROOVY",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"https://opsgptcore-pre.alipay.com/api/tool/sample/{pathVariable}"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.dispatch_position.json b/runtime/bootstrap/src/main/resources/tools/werewolf.dispatch_position.json
new file mode 100644
index 0000000..e70dd88
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.dispatch_position.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.dispatch_position",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀分配座位",
+ "toolName": "狼人杀分配座位",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "requestGroovy": "def randPos(Integer n) {\n def player = [[\"朱丽\", \"agent_朱丽\"], [ \"周杰\", \"agent_周杰\"], [\"沈强\", \"agent_沈强\"], [ \"韩刚\", \"agent_韩刚\"], [\"梁军\", \"agent_梁军\"], [ \"周欣怡\", \"agent_周欣怡\"], [\"贺子轩 \", \"agent_贺子轩\"], [\"人类玩家\", \"agent_人类玩家\"]]\n player.shuffle()\n\n return \"\\n\\n| 座位 | 玩家 |\\n|---|---|\\n\" + (1..n).toList().collect { i ->\n {\n return \"| \" + i + \" | **\" + player.get(i-1).get(0) + \"** |\"\n }\n }.join(\"\\n\")\n}\n\ndef convertRequest(String request) {\n return randPos(8)\n}\n\nconvertRequest(request)",
+ "message": null,
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n \n int successCode;\n \n String errorMessage;\n \n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n \n if (lr.getSuccessCode() == 0) {\n return lr.getAnswer()\n }\n \n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "GROOVY",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"https://opsgptcore-pre.alipay.com/api/tool/sample/{pathVariable}"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.hangang.json b/runtime/bootstrap/src/main/resources/tools/werewolf.hangang.json
new file mode 100644
index 0000000..1054e5a
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.hangang.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.hangang",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_韩刚",
+ "toolName": "agent_韩刚",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.hezixuan.json b/runtime/bootstrap/src/main/resources/tools/werewolf.hezixuan.json
new file mode 100644
index 0000000..ded4942
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.hezixuan.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.hezixuan",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_贺子轩",
+ "toolName": "agent_贺子轩",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.judge.json b/runtime/bootstrap/src/main/resources/tools/werewolf.judge.json
new file mode 100644
index 0000000..6f95558
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.judge.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.judge",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀判断游戏是否结束",
+ "toolName": "狼人杀判断游戏是否结束",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##本局游戏历史记录##\\n{memory}\\n\\n##背景##\\n你是一个逻辑判断大师,你正在参与“狼人杀”这个游戏,你的角色是[主持人]。你熟悉“狼人杀”游戏的完整流程,现在需要判断当前游戏是否结束。\\n\\n##任务##\\n你的任务是判断当前游戏是否结束,规则如下:\\n根据【狼人杀角色分配和单词分配】感知每一个玩家属于“好人”或者属于“狼人”,根据【本局游戏历史记录】和【重要通知】感知每一轮被投票死亡、被狼人杀死、被女巫毒死的玩家。 统计目前存活的[好人]玩家数量、[狼人]玩家数量。格式{\\\"存活的好人\\\":[player_name], \\\"存活的狼人\\\":[player_name]},判断以下条件中的一个是否满足:\\n1. 存活的“狼人”玩家数量为0。\\n2. “狼人”数量大于了“好人”数量。\\n3. “狼人”数量等于“好人”数量,“女巫”已死亡或者她的毒药已经使用。\\n若某个条件满足,游戏结束;否则游戏没有结束。\\n\\n##输出##\\n返回JSON格式,格式为:{\\\"thought\\\": str, \\\"存活的玩家信息\\\": {\\\"存活的好人\\\":[player_name], \\\"存活的狼人\\\":[player_name]}, \\\"isEnd\\\": \\\"是\\\" or \\\"否\\\"}\\n-thought **根据本局游戏历史记录** 分析 游戏最开始有哪些玩家, 他们的身份是什么, 投票导致死亡的玩家有哪些? 被狼人杀死的玩家有哪些? 被女巫毒死的玩家是谁? 析当前存活的玩家有哪些? 是否触发了游戏结束条件? 等等。\\n\\n##example##\\n{\\\"thought\\\": \\\"**游戏开始时** 有 小杭、小北、小赵、小钱、小孙、小李、小夏、小张 八位玩家, 其中 小杭、小北、小赵是[狼人], 小钱、小孙,小张是[平民], 小李是[预言家],小夏是[女巫],小张在第一轮被狼人杀死了,[狼人]数量大于0,且[好人]数量大于[狼人]数量, 因此游戏未结束。\\\", \\\"存活的玩家信息\\\": {\\\"存活的狼人\\\":[\\\"小杭\\\", \\\"小北\\\", \\\"小赵\\\"], \\\"存活的好人\\\":[\\\"小钱\\\", \\\"小孙\\\", \\\"小李\\\", \\\"小夏\\\"]}, \\\"isEnd\\\": \\\"否\\\" }\\n\\n##example##\\n{\\\"thought\\\": \\\"**游戏开始时** 有 小杭、小北、小赵、小钱、小孙、小李、小夏、小张 八位玩家, 其中 小杭、小北、小赵是[狼人], 小钱、小孙,小张是[平民], 小李是[预言家],小夏是[女巫],小张在第一轮被狼人杀死了, 小夏被第一轮投票投死了,[狼人]数量等于[好人]数量额等于[狼人]数量,女巫已死亡, 因此游戏结束。\\\", \\\"存活的玩家信息\\\": {\\\"存活的狼人\\\":[\\\"小杭\\\", \\\"小北\\\", \\\"小赵\\\"], \\\"存活的好人\\\":[\\\"小钱\\\", \\\"小孙\\\", \\\"小李\\\"]}, \\\"isEnd\\\": \\\"是\\\" }\\n\\n{\\\"thought\\\": \\\"**游戏开始时** 有 小杭、小北、小赵、小钱、小孙、小李、小夏、小张 八位玩家, 其中 小杭、小北、小赵是[狼人], 小钱、小孙,小张是[平民], 小李是[预言家],小夏是[女巫],小张在第一轮被狼人杀死了, 女巫毒死了小孙,小钱被投票死亡了。[狼人]数量等于3,[好人]数量等于2,[狼人]数量大于[好人]数量,因此游戏结束。\\\", \\\"存活的玩家信息\\\": {\\\"存活的狼人\\\":[\\\"小杭\\\", \\\"小北\\\", \\\"小赵\\\"], \\\"存活的好人\\\":[\\\"小李\\\", \\\"小夏\\\"]}, \\\"isEnd\\\": \\\"是\\\" }\\n\\n##注意事项##\\n1. 所有玩家的座位、身份、agent_name、存活状态、游戏进展等信息在开头部分已给出。\\n2. \\\"是\\\" or \\\"否\\\" 如何选择?若游戏结束,则为\\\"是\\\",否则为\\\"否\\\"。\\n3. 请直接输出jsonstr,不用输出markdown格式。\\n4. 游戏可能进行了不只一轮,可能有1个或者2个玩家已经死亡,请注意感知。\\n\\n##结果##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "summaryGroovy": null,
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.liangjun.json b/runtime/bootstrap/src/main/resources/tools/werewolf.liangjun.json
new file mode 100644
index 0000000..bc21249
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.liangjun.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.liangjun",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_梁军",
+ "toolName": "agent_梁军",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.shenqiang.json b/runtime/bootstrap/src/main/resources/tools/werewolf.shenqiang.json
new file mode 100644
index 0000000..f7a0e87
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.shenqiang.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.shenqiang",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_沈强",
+ "toolName": "agent_沈强",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.summary.json b/runtime/bootstrap/src/main/resources/tools/werewolf.summary.json
new file mode 100644
index 0000000..c8d378f
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.summary.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.summary",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀给出每个人的单词以及最终胜利者",
+ "toolName": "狼人杀给出每个人的单词以及最终胜利者",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##本局游戏历史记录##\\n{memory}\\n\\n##背景##\\n您正在参与“狼人杀”这个游戏,角色是[主持人]。现在游戏已经结束,您需要判断胜利的一方是谁。\\n\\n##任务##\\n统计目前存活的[好人]玩家数量、[狼人]玩家数量。判断以下条件中的哪一个满足:\\n1. 存活的“狼人”玩家数量为0。\\n2. “狼人”数量超过了“好人”数量。\\n3. “狼人”数量等于“好人”数量,“女巫”已死亡或者她的毒药已经使用。\\n如果条件1满足,则[好人]胜利;如果条件2或者条件3满足,则[狼人]胜利。\\n\\n##输出##\\nPython可直接解析的jsonstr,格式如下:\\n{{\\\"原因是\\\": 获胜者为[好人]或[狼人]的原因, \\\"角色分配结果为\\\": 所有玩家的角色(根据本局游戏历史记录), \\\"获胜方为\\\": \\\"好人\\\" or \\\"狼人\\\"}}\\n以{{开头,任何其他内容都是不允许的!\\n\\n##输出示例##\\n{{\\\"原因是\\\": \\\"狼人数量为0\\\", \\\"角色分配结果为\\\": \\\"沈强:身份为狼人_1;周欣怡:身份为狼人_2;梁军:身份为狼人_3;贺子轩:身份为平民_1;人类玩家:身份为平民_2;朱丽:身份为预言家;韩刚:身份为女巫;周杰:身份为猎人。\\\", \\\"获胜方为\\\": \\\"好人\\\"}}\\n\\n##注意##\\n请输出所有玩家的角色分配结果,不要遗漏信息。\\n\\n##结果##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.zhoujie.json b/runtime/bootstrap/src/main/resources/tools/werewolf.zhoujie.json
new file mode 100644
index 0000000..23ee163
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.zhoujie.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.zhoujie",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_周杰",
+ "toolName": "agent_周杰",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.zhouxinyi.json b/runtime/bootstrap/src/main/resources/tools/werewolf.zhouxinyi.json
new file mode 100644
index 0000000..57a9d93
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.zhouxinyi.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.zhouxinyi",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_周心怡",
+ "toolName": "agent_周心怡",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/bootstrap/src/main/resources/tools/werewolf.zhuli.json b/runtime/bootstrap/src/main/resources/tools/werewolf.zhuli.json
new file mode 100644
index 0000000..50f1b8a
--- /dev/null
+++ b/runtime/bootstrap/src/main/resources/tools/werewolf.zhuli.json
@@ -0,0 +1,49 @@
+{
+ "id": 2,
+ "gmtCreate": "2024-07-17T07:20:35.000+00:00",
+ "gmtModified": "2024-07-17T07:20:35.000+00:00",
+ "toolKey": "werewolf.zhuli",
+ "toolDefinition": {"description":"演示tool的返回结果","name":"OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response","parameters":{"description":"演示tool的返回结果","properties":{"button":{"description":"非必填,按钮名称,默认值为「重试」","requestType":"body","type":"string"},"pathVariable":{"description":"路径参数","requestType":"path","type":"string"},"requestParam":{"description":"请求参数","requestType":"query","type":"string"}},"required":[],"type":"object"},"result":{"description":"返回信息","properties":{},"type":"object"}},
+ "toolId": null,
+ "description": "狼人杀agent_朱丽",
+ "toolName": "agent_朱丽",
+ "pauseStatus": "EXECUTING",
+ "transparentInfo": null,
+ "intention": null,
+ "input": null,
+ "output": null,
+ "exeNormal": true,
+ "message": null,
+ "requestGroovy": "import com.alipay.muagent.model.connector.http.HttpParameters\nimport com.google.gson.Gson\n\nclass Content {\n String text;\n}\n\ndef getMemory() {\n return binding.hasVariable(\"stepMemory\") ? (String) binding.getVariable(\"stepMemory\") : \"[null]\"\n}\n\ndef convertRequest(String request) {\n def gson = new Gson()\n String template = \"##狼人杀游戏说明##\\n这个游戏基于文字交流, 以下是游戏规则:\\n角色:\\n主持人也是游戏的组织者,你需要正确回答他的指示。游戏中有五种角色:狼人、平民、预言家、女巫,三个狼人,一个预言家,一个女巫,三个平民。\\n好人阵营: 平民、预言家和女巫。\\n游戏阶段:游戏分为两个交替的阶段:白天和黑夜。\\n黑夜:\\n在黑夜阶段,你与主持人的交流内容是保密的,你无需担心其他玩家和主持人知道你说了什么或做了什么。\\n- 如果你是狼人,你需要和队友一起选择袭击杀死一个玩家,在投票阶段你需要明确指出击杀对象,比如【我决定今晚击杀X号玩家XX】,不可以击杀狼人队友。\\n- 如果你是女巫,你有一瓶解药,可以拯救被狼人袭击的玩家,以及一瓶毒药,可以在黑夜后毒死一个玩家。解药和毒药只能使用一次。\\n- 如果你是预言家,你可以在每个晚上检查一个玩家是否是狼人,你应该明确告诉主持人你想要查验的玩家,比如【我想要查验X号玩家XX的身份】。\\n- 如果你是村民,你在夜晚无法做任何事情。\\n白天:\\n你与存活所有玩家(包括敌人)讨论。讨论结束后,玩家投票来淘汰一个自己怀疑是狼人的玩家。玩家必须明确投票,比如【我认为X号玩家XX是狼人,我投票给X号玩家XX】,获得最多票数的玩家将被淘汰,主持人将宣布XX玩家死亡。\\n游戏目标:\\n狼人的目标是杀死所有的好人阵营中的玩家,并且不被好人阵营的玩家识别出狼人身份;\\n好人阵营的玩家,需要找出并杀死所有的狼人玩家。\\n\\n##注意##\\n你正在参与狼人杀这个游戏,你应该感知自己的名字、座位和角色。\\n1. 若你的角色为狼人,白天的发言应该尽可能隐藏身份,夜晚则要明确击杀对象,不可击杀狼人队友。\\n2. 若你的角色属于好人阵营,白天的发言应该根据游戏进展尽可能分析出谁是狼人。\\n\\n##以下为目前游戏进展##\\n{memory}\\n\\n##发言格式##\\n你的回答中需要包含你的想法并给出简洁的理由,注意请有理有据,白天的发言尽量不要与别人的发言内容重复。发言的格式应该为Python可直接解析的jsonstr,格式如下:\\n{\\\"thought\\\": 以“我是【座位号】号玩家【名字】【角色】”开头,根据主持人的通知感知自己的【名字】、【座位号】、【角色】,根据游戏进展和自己游戏角色的当前任务分析如何发言,字数不超过150字, \\\"output\\\": 您的发言应该符合目前游戏进展和自己角色的逻辑,白天投票环节不能投票给自己。}\\n##开始发言##\\n\"\n\n Content content = new Content()\n content.text = template.replace(\"{memory}\", getMemory())\n\n HttpParameters parameters = HttpParameters.builder().build()\n parameters.setRequestBody(gson.toJson(content))\n return gson.toJson(parameters)\n}\n\nconvertRequest(request)",
+ "responseGroovy": "import com.google.gson.Gson\n\nclass LLMResponse {\n\n int successCode;\n\n String errorMessage;\n\n String answer;\n}\n\ndef convertResponse(String response) {\n def gson = new Gson()\n LLMResponse lr = gson.fromJson(response, LLMResponse.class)\n\n if (lr.getSuccessCode() == 1) {\n return lr.getAnswer()\n }\n\n throw new RuntimeException(lr.getErrorMessage())\n}\n\nconvertResponse(response)",
+ "summaryGroovy": null,
+ "manifestSchema": {"api":{"type":"http"},"auth":{"type":"none"},"description_for_human":"演示tool的返回结果","description_for_model":"演示tool的返回结果","headers":{},"name_for_human":"ToolSampleController.queryEditableResponse_response","schema_version":"v1"},
+ "toolApiPath": "OPSGPT./.POST",
+ "toolProtocol": "HTTP",
+ "serverUrl": null,
+ "apiSchema": {"info":{"description":"演示tool的返回结果","title":"ToolSampleController.queryEditableResponse_response","version":"0.0.1"},"openapi":"3.0.0","paths":{"http":{"method":"POST","parameters":[{"in":"body","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-0"}},{"in":"query","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-1"}},{"in":"path","schema":{"$ref":"#ToolSampleController-queryEditableResponse-request-2"}}],"path":"/api/tool/sample/{pathVariable}","responses":{"200":{"application/json":{"$ref":"#/definitions/BaseResult_ToolResponse_"}}}}},"servers":[{"url":"http://ekgservice:3737/llm/generate"}]},
+ "operatorCreate": "169704",
+ "operatorModified": "169704",
+ "version": "3",
+ "owner": "169704",
+ "deleted": null,
+ "type": "OPEN",
+ "status": "PROD_PUBLISHED",
+ "vdbPk": "447890596018669520",
+ "selectSamples": "[null]",
+ "selectVars": "[{}]",
+ "invokeType": "SYNC",
+ "tag": "SINGLE_TOOL:OPSGPT:MAIN_SITE:ToolSampleController.queryEditableResponse_response",
+ "toolExtraInfo": {
+ "errMessage": null,
+ "devVdbPk": "447890596018692596",
+ "prodVdbPk": null,
+ "summaryModel": null,
+ "stepConfigList": null,
+ "nexExtraInfo": null,
+ "ispInfo": null,
+ "rpcUniqueId": null
+ },
+ "pauseStatusList": null,
+ "configMap": null
+}
\ No newline at end of file
diff --git a/runtime/model/src/main/java/com/alipay/muagent/model/chat/content/RoleResponseContent.java b/runtime/model/src/main/java/com/alipay/muagent/model/chat/content/RoleResponseContent.java
index eb23f96..df7a87a 100644
--- a/runtime/model/src/main/java/com/alipay/muagent/model/chat/content/RoleResponseContent.java
+++ b/runtime/model/src/main/java/com/alipay/muagent/model/chat/content/RoleResponseContent.java
@@ -53,6 +53,62 @@ public static RoleResponseContent buildZhangwei(String rsp) {
return rrc;
}
+ public static RoleResponseContent buildZhuli(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("朱丽");
+ rrc.setUrl("http://localhost:8080/avatar/zhuli.png");
+
+ return rrc;
+ }
+
+ public static RoleResponseContent buildZhoujie(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("周杰");
+ rrc.setUrl("http://localhost:8080/avatar/zhoujie.png");
+
+ return rrc;
+ }
+
+ public static RoleResponseContent buildShenqiang(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("沈强");
+ rrc.setUrl("http://localhost:8080/avatar/shenqiang.png");
+
+ return rrc;
+ }
+
+ public static RoleResponseContent buildHangang(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("韩刚");
+ rrc.setUrl("http://localhost:8080/avatar/hangang.png");
+
+ return rrc;
+ }
+
+ public static RoleResponseContent buildLiangjun(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("梁军");
+ rrc.setUrl("http://localhost:8080/avatar/liangjun.png");
+
+ return rrc;
+ }
+
+ public static RoleResponseContent buildZhouxinyi(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("周心怡");
+ rrc.setUrl("http://localhost:8080/avatar/zhouxinyi.png");
+
+ return rrc;
+ }
+
+ public static RoleResponseContent buildHezixuan(String rsp) {
+ RoleResponseContent rrc = getRoleResponseContent(rsp);
+ rrc.setName("贺子轩");
+ rrc.setUrl("http://localhost:8080/avatar/hezixuan.png");
+
+ return rrc;
+ }
+
private static RoleResponseContent getRoleResponseContent(String response) {
RoleResponseContent rrc = new RoleResponseContent();
rrc.setResponse(ChatResponse.buildTextResponse(response));
diff --git a/runtime/model/src/main/java/com/alipay/muagent/model/exception/EkgToolNotFindException.java b/runtime/model/src/main/java/com/alipay/muagent/model/exception/EkgToolNotFindException.java
new file mode 100644
index 0000000..d74c97d
--- /dev/null
+++ b/runtime/model/src/main/java/com/alipay/muagent/model/exception/EkgToolNotFindException.java
@@ -0,0 +1,7 @@
+package com.alipay.muagent.model.exception;
+
+public class EkgToolNotFindException extends RuntimeException {
+ public EkgToolNotFindException(String message) {
+ super(message);
+ }
+}
diff --git a/runtime/model/src/main/java/com/alipay/muagent/model/scheduler/SubmitTaskRequest.java b/runtime/model/src/main/java/com/alipay/muagent/model/scheduler/SubmitTaskRequest.java
index 3371192..6f18b1f 100644
--- a/runtime/model/src/main/java/com/alipay/muagent/model/scheduler/SubmitTaskRequest.java
+++ b/runtime/model/src/main/java/com/alipay/muagent/model/scheduler/SubmitTaskRequest.java
@@ -25,6 +25,8 @@ public class SubmitTaskRequest {
private String memory;
+ private boolean ekgRequest;
+
/**
* the range of tools needed to execute
*/
diff --git a/runtime/service/src/main/java/com/alipay/muagent/service/chat/impl/EkgChatServiceImpl.java b/runtime/service/src/main/java/com/alipay/muagent/service/chat/impl/EkgChatServiceImpl.java
index a481e6f..91b2187 100644
--- a/runtime/service/src/main/java/com/alipay/muagent/service/chat/impl/EkgChatServiceImpl.java
+++ b/runtime/service/src/main/java/com/alipay/muagent/service/chat/impl/EkgChatServiceImpl.java
@@ -20,6 +20,7 @@
import com.alipay.muagent.model.ekg.configuration.Config;
import com.alipay.muagent.model.enums.ekg.ToolPlanTypeEnum;
import com.alipay.muagent.model.enums.scheduler.TaskSchedulerTypeEnum;
+import com.alipay.muagent.model.exception.EkgToolNotFindException;
import com.alipay.muagent.model.scheduler.SubmitTaskRequest;
import com.alipay.muagent.model.scheduler.TaskExeResponse;
import com.alipay.muagent.service.chat.ChatService;
@@ -34,22 +35,39 @@
import io.micrometer.common.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;
+import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.Optional;
import java.util.UUID;
-
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildHangang;
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildHezixuan;
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildLiangjun;
import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildLijing;
import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildReferee;
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildShenqiang;
import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildWangpeng;
import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildZhangwei;
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildZhoujie;
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildZhouxinyi;
+import static com.alipay.muagent.model.chat.content.RoleResponseContent.buildZhuli;
import static com.alipay.muagent.model.enums.chat.ChatExtendedKeyEnum.CHAT_UNIQUE_ID;
import static com.alipay.muagent.model.enums.chat.ChatExtendedKeyEnum.EKG_NODE;
@@ -67,7 +85,6 @@ public class EkgChatServiceImpl implements ChatService {
@Override
public void chat(SseEmitter emitter, ChatRequest request) {
- boolean wait = true;
try {
TextContent content = GsonUtils.fromString(TextContent.class, GsonUtils.toString(request.getContent()));
@@ -102,68 +119,138 @@ public void chat(SseEmitter emitter, ChatRequest request) {
ekgRequest.setIntentionRule(Lists.newArrayList("nlp"));
ekgRequest.setScene("NEXA");
- while (wait) {
-
- EkgAlgorithmResult response = sendRequestToEkgPlanner(ekgRequest);
+ ekgHandler(emitter, ekgRequest, "1", null);
+ } catch (Exception e) {
+ LoggerUtil.error(LOGGER, e, "[EkgServiceImpl call error][{}]", request.getChatUniqueId());
+ }
+ }
- if (Objects.isNull(response)) {
- emitter.send(SseEmitter.event().data(GsonUtils.toString(ChatResponse.buildTextResponses("图谱执行异常"))));
- LoggerUtil.warn(LOGGER, "图谱执行异常");
- break;
- }
+ private static final ExecutorService EKG_NODE_EXE = new ThreadPoolExecutor(
+ 100,
+ 150,
+ 60L,
+ TimeUnit.SECONDS,
+ new ArrayBlockingQueue<>(1),
+ new ThreadPoolExecutor.AbortPolicy());
- if (StringUtils.isNotBlank(response.getSummary())) {
- emitter.send(SseEmitter.event().data(GsonUtils.toString(ChatResponse.buildTextResponses(response.getSummary()))));
- LoggerUtil.info(LOGGER, "游戏结束");
- break;
- }
+ private boolean ekgHandler(SseEmitter emitter, EkgQueryRequest ekgRequest, String stepNum, HashMap exContext) {
+ try {
+ MDC.put("stepNodeId", stepNum);
+ EkgAlgorithmResult response = sendRequestToEkgPlanner(ekgRequest);
- // 需要给用户反馈信息
- if (StringUtils.isNotBlank(response.getUserInteraction())) {
- RoleResponseContent rrc = buildRoleResponseContent(response.getUserInteraction());
- String userMsg = GsonUtils.toString(ChatResponse.buildRoleResponses(rrc));
- LoggerUtil.info(LOGGER, "notifyUser:{}", userMsg);
- emitter.send(SseEmitter.event().data(userMsg));
- }
+ if (Objects.isNull(response)) {
+ emitter.send(SseEmitter.event().data(GsonUtils.toString(ChatResponse.buildTextResponses("图谱执行异常"))));
+ LoggerUtil.warn(LOGGER, "图谱执行异常");
+ return false;
+ }
- EkgNode node = response.getToolPlan().get(0);
+ if (StringUtils.isNotBlank(response.getSummary())) {
+ emitter.send(SseEmitter.event().data(GsonUtils.toString(ChatResponse.buildTextResponses(response.getSummary()))));
+ LoggerUtil.info(LOGGER, "游戏结束");
+ return false;
+ }
- // 需要等待用户回答,通知用户后结束轮询
- if (ToolPlanTypeEnum.USER_PROBLEM.getCode().equals(node.getType())) {
- EkgQuestionDescription ques = node.getQuestionDescription();
- RoleResponseContent rrc = buildRoleResponseContent(" *请回答:* \n\n >" + ques.getQuestionContent().getQuestion());
+ if (CollectionUtils.isEmpty(response.getToolPlan())) {
+ response.setToolPlan(Lists.newArrayList());
+ }
- List crs = ChatResponse.buildRoleResponses(rrc);
+ // 需要给用户反馈信息
+ if (StringUtils.isNotBlank(response.getUserInteraction())) {
+ RoleResponseContent rrc = buildRoleResponseContent(response.getUserInteraction());
+ List crs = ChatResponse.buildRoleResponses(rrc);
+ crs.get(0).setExtendContext(exContext);
+ String userMsg = GsonUtils.toString(crs);
+ LoggerUtil.info(LOGGER, "notifyUser:{}", userMsg);
+ emitter.send(SseEmitter.event().data(userMsg));
+ }
- // 将 node 和 对话id 放入上下文
- HashMap exContext = Maps.newHashMap();
- exContext.put(EKG_NODE.name(), GsonUtils.toString(node));
- exContext.put(CHAT_UNIQUE_ID.name(), ekgRequest.getSessionId());
- crs.get(0).setExtendContext(exContext);
+ Optional userNode = response.getToolPlan().stream().filter(
+ node -> ToolPlanTypeEnum.USER_PROBLEM.getCode().equals(node.getType())).findAny();
- String userMsg = GsonUtils.toString(crs);
- LoggerUtil.info(LOGGER, "notifyUser:{}", userMsg);
- emitter.send(SseEmitter.event().data(userMsg));
- break;
- }
+ HashMap nodeContext = Maps.newHashMap();
+ if (userNode.isPresent()) {
+ // 将 node 和 对话id 放入上下文
+ nodeContext.put(EKG_NODE.name(), GsonUtils.toString(userNode.get()));
+ nodeContext.put(CHAT_UNIQUE_ID.name(), ekgRequest.getSessionId());
+ }
- // 执行当前 node
- ExeNodeResponse enr = executeNode(node);
- EkgToolResponse toolResponse = EkgToolResponse.builder()
- .toolKey(enr.getToolKey())
- .toolResponse(enr.getOutput())
- .toolParam(node.getToolDescription())
- .build();
- ekgRequest.setCurrentNodeId(node.getCurrentNodeId());
- ekgRequest.setObservation(GsonUtils.toString(toolResponse));
- ekgRequest.setType(node.getType());
- ekgRequest.setIntentionData(null);
- ekgRequest.setIntentionRule(null);
- }
- } catch (Exception e) {
- LoggerUtil.error(LOGGER, e, "[EkgServiceImpl call error][{}][{}][{}][{}]", request.getChatUniqueId());
+ AtomicInteger step = new AtomicInteger(1);
+ List> futures = response.getToolPlan().stream().map(node ->
+ EKG_NODE_EXE.submit(() -> {
+ try {
+ String nodeStepNum = String.format("%s.%s", stepNum, step.getAndAdd(1));
+ MDC.put("stepNodeId", nodeStepNum);
+ LoggerUtil.info(LOGGER, "exeNode:{}", GsonUtils.toString(node));
+
+ EkgQueryRequest copyRequest = GsonUtils.fromString(EkgQueryRequest.class, GsonUtils.toString(ekgRequest));
+
+ // 需要等待用户回答,通知用户后结束任务
+ if (ToolPlanTypeEnum.USER_PROBLEM.getCode().equals(node.getType())) {
+ EkgQuestionDescription ques = node.getQuestionDescription();
+ RoleResponseContent rrc = buildRoleResponseContent(
+ " *请回答:* \n\n >" + ques.getQuestionContent().getQuestion());
+
+ List crs = ChatResponse.buildRoleResponses(rrc);
+ crs.get(0).setExtendContext(nodeContext);
+
+ String userMsg = GsonUtils.toString(crs);
+ LoggerUtil.info(LOGGER, "notifyUser:{}", userMsg);
+ emitter.send(SseEmitter.event().data(userMsg));
+ return false;
+ }
+
+ // 执行当前 node
+ ExeNodeResponse enr = executeNode(node);
+
+ EkgToolResponse toolResponse = EkgToolResponse.builder()
+ .toolKey(enr.getToolKey())
+ .toolResponse(enr.getOutput())
+ .toolParam(node.getToolDescription())
+ .build();
+ copyRequest.setCurrentNodeId(node.getCurrentNodeId());
+ copyRequest.setObservation(GsonUtils.toString(toolResponse));
+ copyRequest.setType(node.getType());
+ copyRequest.setIntentionData(null);
+ copyRequest.setIntentionRule(null);
+
+ return ekgHandler(emitter, copyRequest, nodeStepNum, nodeContext);
+ } catch (EkgToolNotFindException eetnfe) {
+ try {
+ emitter.send(
+ SseEmitter.event().data(GsonUtils.toString(ChatResponse.buildTextResponses(eetnfe.getMessage()))));
+ } catch (Exception e) {
+ LoggerUtil.error(LOGGER, e, "[EkgServiceImpl send exception to emitter error][{}]",
+ ekgRequest.getSessionId());
+ }
+ return false;
+ } catch (Exception e) {
+ LoggerUtil.error(LOGGER, e, "exeNodeError:{}", GsonUtils.toString(node));
+ return false;
+ }
+ })
+ ).toList();
+
+ futures.parallelStream().forEach(future -> {
+ try {
+ future.get();
+ } catch (InterruptedException e) {
+ LoggerUtil.error(LOGGER, e, "exeNodeError:InterruptedException");
+ throw new RuntimeException(e);
+ } catch (ExecutionException e) {
+ LoggerUtil.error(LOGGER, e, "exeNodeError:ExecutionException");
+ throw new RuntimeException(e);
+ } catch (Exception e) {
+ LoggerUtil.error(LOGGER, e, "exeNodeError:Exception");
+ throw new RuntimeException(e);
+ }
+ });
+ } catch (IOException e) {
+ LoggerUtil.error(LOGGER, e, "[EkgServiceImpl call error]");
+ return false;
}
+
+ return true;
}
private RoleResponseContent buildRoleResponseContent(String userInteraction) {
@@ -174,6 +261,20 @@ private RoleResponseContent buildRoleResponseContent(String userInteraction) {
return buildLijing(userInteraction.replace("**李静:**
", ""));
} else if (userInteraction.contains("**张伟:**")) {
return buildZhangwei(userInteraction.replace("**张伟:**
", ""));
+ } else if (userInteraction.contains("**朱丽:**")) {
+ return buildZhuli(userInteraction.replace("**朱丽:**
", ""));
+ } else if (userInteraction.contains("**周杰:**")) {
+ return buildZhoujie(userInteraction.replace("**周杰:**
", ""));
+ } else if (userInteraction.contains("**沈强:**")) {
+ return buildShenqiang(userInteraction.replace("**沈强:**
", ""));
+ } else if (userInteraction.contains("**韩刚:**")) {
+ return buildHangang(userInteraction.replace("**韩刚:**
", ""));
+ } else if (userInteraction.contains("**梁军:**")) {
+ return buildLiangjun(userInteraction.replace("**梁军:**
", ""));
+ } else if (userInteraction.contains("**周欣怡:**")) {
+ return buildZhouxinyi(userInteraction.replace("**周欣怡:**
", ""));
+ } else if (userInteraction.contains("**贺子轩:**")) {
+ return buildHezixuan(userInteraction.replace("**贺子轩:**
", ""));
}
return buildReferee(userInteraction);
@@ -183,10 +284,13 @@ private RoleResponseContent buildRoleResponseContent(String userInteraction) {
private SchedulerManager schedulerManager;
private ExeNodeResponse executeNode(EkgNode node) {
+ Scheduler scheduler = schedulerManager.getScheduler(TaskSchedulerTypeEnum.COMMON);
+
SubmitTaskRequest request = SubmitTaskRequest.builder().intention(node.getToolDescription()).build();
request.setMemory(node.getMemory());
- Scheduler scheduler = schedulerManager.getScheduler(TaskSchedulerTypeEnum.COMMON);
request.setTools(config.getToolKeys());
+ request.setEkgRequest(true);
+
TaskExeResponse toolExeResponse = scheduler.submitTask(request);
ExeNodeResponse exeNodeResponse = ExeNodeResponse.builder()
diff --git a/runtime/service/src/main/java/com/alipay/muagent/service/sheduler/impl/BaseScheduler.java b/runtime/service/src/main/java/com/alipay/muagent/service/sheduler/impl/BaseScheduler.java
index 37eb32f..a81fded 100644
--- a/runtime/service/src/main/java/com/alipay/muagent/service/sheduler/impl/BaseScheduler.java
+++ b/runtime/service/src/main/java/com/alipay/muagent/service/sheduler/impl/BaseScheduler.java
@@ -5,6 +5,7 @@
package com.alipay.muagent.service.sheduler.impl;
import com.alipay.muagent.model.enums.scheduler.TaskSchedulerTypeEnum;
+import com.alipay.muagent.model.exception.EkgToolNotFindException;
import com.alipay.muagent.model.scheduler.SubmitTaskRequest;
import com.alipay.muagent.model.scheduler.TaskExeResponse;
import com.alipay.muagent.model.tool.TaskExeContext;
@@ -16,6 +17,7 @@
import com.alipay.muagent.service.tool.loader.ToolLoader;
import com.alipay.muagent.util.GsonUtils;
import com.alipay.muagent.util.LoggerUtil;
+import com.alipay.muagent.util.StringUtils;
import lombok.Data;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
@@ -72,13 +74,33 @@ protected Tool selectTool(TaskExeContext request) {
LoggerUtil.info(LOGGER, "selectedTool:{}", GsonUtils.toString(selectResponse));
String resultStr = selectResponse.getResult();
try {
+ if (StringUtils.equalNull(resultStr)) {
+ return getTool(request);
+ }
SelectToolResult result = GsonUtils.fromString(SelectToolResult.class, resultStr);
+ if (StringUtils.isEmpty(result.getToolKey()) || StringUtils.equalNull(resultStr)) {
+ return getTool(request);
+ }
return toolLoader.queryToolByKey(result.getToolKey());
+ } catch (EkgToolNotFindException e) {
+ throw e;
} catch (Exception e) {
throw new RuntimeException("selectToolResponseInvalid:" + resultStr, e);
}
}
+ private Tool getTool(TaskExeContext request) {
+ if (request.getTaskRequest().isEkgRequest()) {
+ // ekg 不能兜底,直接报错提示 tool 找不到
+ //throw new EkgToolNotFindException("can not select a tool with intention " + request.getTaskRequest().getIntention());
+ throw new EkgToolNotFindException("对不起,根据您的意图,无法找到对应的工具来帮助您,请添加相关工具后再使用图谱。");
+ } else {
+ // 闲聊大模型兜底
+ Tool tool = toolLoader.queryToolByKey("system.llm_query");
+ return tool;
+ }
+ }
+
private String generateSelectPrompt(List tools, String intention) {
StringBuilder sb = new StringBuilder("你现在是一个插件选择助手,需要理解问题描述,然后从以下给出的插件中选择一个可以解决问题描述的插件").append("\n\n");
sb.append("##插件列表:").append("\n");
@@ -112,4 +134,4 @@ class SelectToolResult {
private String toolKey;
}
-}
\ No newline at end of file
+}
diff --git a/runtime/util/src/main/java/com/alipay/muagent/util/StringUtils.java b/runtime/util/src/main/java/com/alipay/muagent/util/StringUtils.java
index 9e9c174..4c4d4ee 100644
--- a/runtime/util/src/main/java/com/alipay/muagent/util/StringUtils.java
+++ b/runtime/util/src/main/java/com/alipay/muagent/util/StringUtils.java
@@ -41,6 +41,10 @@ public static String truncate(String string, int maxLength) {
return string.length() > maxLength ? string.substring(0, maxLength) : string;
}
+ public static boolean equalNull(String str) {
+ return "null".equalsIgnoreCase(str);
+ }
+
public static String truncate(String string, int maxLength, String truncationIndicator) {
if (truncationIndicator.length() >= maxLength) {
throw new IllegalArgumentException("maxLength must be greater than length of truncationIndicator");
diff --git a/runtime/web/src/main/java/com/alipay/muagent/web/interceptor/WebConfigure.java b/runtime/web/src/main/java/com/alipay/muagent/web/interceptor/WebConfigure.java
index da68819..419792a 100644
--- a/runtime/web/src/main/java/com/alipay/muagent/web/interceptor/WebConfigure.java
+++ b/runtime/web/src/main/java/com/alipay/muagent/web/interceptor/WebConfigure.java
@@ -19,7 +19,7 @@ public class WebConfigure implements WebMvcConfigurer {
public void addCorsMappings(CorsRegistry registry) {
registry.addMapping("/**")
- .allowedOrigins("http://30.183.177.8","http://localhost:8000")
+ .allowedOrigins("http://localhost:8000")
.allowedMethods("GET","HEAD","PUT","POST","PATCH","DELETE","OPTIONS")
.allowedHeaders("*")
.allowCredentials(true);
diff --git a/tests/service/test_main_sswd_long.py b/tests/service/test_main_sswd_long.py
index 8c5d53e..72e211c 100644
--- a/tests/service/test_main_sswd_long.py
+++ b/tests/service/test_main_sswd_long.py
@@ -32,9 +32,9 @@
from loguru import logger
import uuid
-#geabase 依赖包
-from gdbc2.geabase_client import GeaBaseClient#, Node, Edge, MutateBatchOperation, GeaBaseUtil
-from gdbc2.geabase_env import GeaBaseEnv
+# #geabase 依赖包
+# from gdbc2.geabase_client import GeaBaseClient#, Node, Edge, MutateBatchOperation, GeaBaseUtil
+# from gdbc2.geabase_env import GeaBaseEnv
#muagent 依赖包
from muagent.connector.schema import Message
@@ -72,11 +72,16 @@
if __name__ == '__main__':
#1、 LLM 和 Embedding Model 配置
llm_config = LLMConfig(
- model_name=os.environ["model_name"], api_key=os.environ["OPENAI_API_KEY"],
- api_base_url=os.environ["API_BASE_URL"], temperature=0.3
- )
-
-
+ model_name =os.environ["model_name"],
+ model_engine =os.environ["model_engine"],
+ api_key =os.environ["OPENAI_API_KEY"],
+ api_base_url =os.environ["API_BASE_URL"],
+ temperature =float(os.environ["llm_temperature"]),
+ )
+
+ #llm = CustomizedModel()
+ #llm_config_modelops = LLMConfig(llm=llm)
+
# 2、自定义向量模型配置接口
embeddings = None
#embeddings = CustomizedEmbeddings()
@@ -84,34 +89,56 @@
embed_model="default",
langchain_embeddings=embeddings
)
-
-
- # 3、tbase接口配置
- tb_config = TBConfig(
- tb_type = os.environ["tb_type"],
- index_name = os.environ["tb_index_name"],
- host = os.environ['tb_host'],
- port = int(os.environ['tb_port']),
- username = os.environ['tb_username'],
- password = os.environ['tb_password'],
- extra_kwargs={
- 'host': os.environ['tb_host'],
- 'port': int(os.environ['tb_port']),
- 'username': os.environ['tb_username'],
- 'password': os.environ['tb_password'],
- 'definition_value': os.environ['tb_definition_value'],
- 'expire_time': int(os.environ['tb_expire_time']) ,
- }
- )
-
+
+ # 3、tbase接口配置
+ if os.environ['operation_mode'] == 'antcode': # 'open_source' or 'antcode'
+ tb_config = TBConfig(
+ tb_type = os.environ["tb_type"],
+ index_name = os.environ["tb_index_name"],
+ host = os.environ['tb_host'],
+ port = int(os.environ['tb_port']),
+ username = os.environ['tb_username'],
+ password = os.environ['tb_password'],
+ extra_kwargs={
+ 'host': os.environ['tb_host'],
+ 'port': int(os.environ['tb_port']),
+ 'username': os.environ['tb_username'],
+ 'password': os.environ['tb_password'],
+ 'definition_value': os.environ['tb_definition_value'],
+ 'expire_time': int(os.environ['tb_expire_time']) ,
+ }
+ )
+ else:
+ # 初始化 TbaseHandler 实例
+ tb_config = TBConfig(
+ tb_type="TbaseHandler",
+ index_name="shanshi_node",
+ host=os.environ['tb_host'],
+ port=os.environ['tb_port'],
+ username=os.environ['tb_username'],
+ password=os.environ['tb_password'],
+ extra_kwargs={
+ 'host': os.environ['tb_host'],
+ 'port': os.environ['tb_port'],
+ 'username': os.environ['tb_username'] ,
+ 'password': os.environ['tb_password'],
+ 'definition_value': os.environ['tb_definition_value']
+ }
+ )
# 指定index_name
index_name = os.environ["tb_index_name"]
+ # th = TbaseHandler(TBASE_ARGS, index_name, definition_value="message")
+ # th = TbaseHandler(tb_config, index_name, definition_value="message")
+ # th = TbaseHandler(tb_config, index_name, definition_value="message_test_new")
th = TbaseHandler(tb_config, index_name, definition_value=os.environ['tb_definition_value'])
-
-
-
-
+
+ # # drop index
+ # th.drop_index(index_name)
+
+
+
+
# 5、memory 接口配置
# create tbase memory manager
memory_manager = TbaseMemoryManager(
@@ -121,27 +148,44 @@
tbase_handler=th,
use_vector=False
)
-
-
-
+
+
+
#6 geabase 接口配置
- gb_config = GBConfig(
- gb_type="GeaBaseHandler",
- extra_kwargs={
- 'metaserver_address': os.environ['metaserver_address'],
- 'project': os.environ['project'],
- 'city': os.environ['city'],
- # 'lib_path': '%s/geabase/geabase-client-0.0.1/libs'%(tar_path),
- 'lib_path': os.environ['lib_path']
- }
- )
-
+ if os.environ['operation_mode'] == 'antcode': # 'open_source' or 'antcode'
+ gb_config = GBConfig(
+ gb_type="GeaBaseHandler",
+ extra_kwargs={
+ 'metaserver_address': os.environ['metaserver_address'],
+ 'project': os.environ['project'],
+ 'city': os.environ['city'],
+ # 'lib_path': '%s/geabase/geabase-client-0.0.1/libs'%(tar_path),
+ 'lib_path': os.environ['lib_path']
+ }
+ )
+ else:
+ # 初始化 NebulaHandler 实例
+ gb_config = GBConfig(
+ gb_type="NebulaHandler",
+ extra_kwargs={
+ 'host': os.environ['nb_host'],
+ 'port': os.environ['nb_port'],
+ 'username': os.environ['nb_username'] ,
+ 'password': os.environ['nb_password'],
+ "space": os.environ['nb_space'],
+ #'definition_value': os.environ['nb_definition_value']
+
+ }
+ )
+
#7 构建ekg_construct_service
+
ekg_construct_service = EKGConstructService(
embed_config = embed_config,
llm_config = llm_config,
tb_config = tb_config,
gb_config = gb_config,
+ initialize_space = False
)
intention_router = IntentionRouter(
@@ -150,15 +194,21 @@
ekg_construct_service.tb,
embed_config
)
-
+
#8 获取main需要的内容
memory_manager = memory_manager
- geabase_handler = GeaBaseHandler(gb_config)
+ #geabase_handler = GeaBaseHandler(gb_config)
+ geabase_handler = ekg_construct_service.gb
intention_router = intention_router
+
+
+
- goc_test_sessionId = "TS_GOC_103346456601_0709001_sswd_05"
+
+ goc_test_sessionId = "TS_GOC_103346456601_1127001_sswd_099"
+
#debugmode 调试,谁是卧底初次输入
@@ -238,7 +288,7 @@
time.sleep(1)
'''
- {'intentionRecognitionSituation': 'None', 'sessionId': 'TS_GOC_103346456601_0709001_lrs_105', 'type': 'onlyTool', 'summary': None, 'toolPlan': [{'toolDescription': 'agent_2', 'currentNodeId': '剧本杀/谁是卧底/智能交互/开始新一轮的讨论', 'memory': '[{"role_type": "user", "role_name": "firstUserInput", "role_content": "{\\"content\\": \\"一起来玩谁是卧底\\"}"}, {"role_type": "observation", "role_name": "function_caller", "role_content": "{\\"座位分配结果\\": [{\\"player_name\\": \\"player_1\\", \\"seat_number\\": 1}, {\\"player_name\\": \\"李四(人类玩家)\\", \\"seat_number\\": 2}, {\\"player_name\\": \\"player_2\\", \\"seat_number\\": 3}, {\\"player_name\\": \\"player_3\\", \\"seat_number\\": 4}]}"}, {"role_type": "userinput", "role_name": "user", "role_content": "分配座位"}, {"role_type": "userinput", "role_name": "user", "role_content": "通知身份"}, "主持人 : 你是player_1, 你的位置是1号, 你分配的单词是汽车", {"role_type": "userinput", "role_name": "user", "role_content": "开始新一轮的讨论"}, "主持人 : 当前存活的玩家有4位,他们是player_1, 李四(人类玩家), player_2, player_3", "主持人 : 现在我们开始发言环节,按照座位顺序由小到大进行发言,首先是1号位的player_1"]', 'type': 'reactExecution'}], 'userInteraction': '["通知身份", "主持人 : 你是李四(人类玩家), 你的位置是2号, 你分配的单词是汽车", "开始新一轮的讨论", "主持人 : 现在我们开始发言环节,按照座位顺序由小到大进行发言,首先是1号位的player_1", "主持人 : 当前存活的玩家有4位,他们是player_1, 李四(人类玩家), player_2, player_3"]'}
+ {'intentionRecognitionSituation': 'None', 'sessionId': 'goc_test_sessionId', 'type': 'onlyTool', 'summary': None, 'toolPlan': [{'toolDescription': 'agent_2', 'currentNodeId': '剧本杀/谁是卧底/智能交互/开始新一轮的讨论', 'memory': '[{"role_type": "user", "role_name": "firstUserInput", "role_content": "{\\"content\\": \\"一起来玩谁是卧底\\"}"}, {"role_type": "observation", "role_name": "function_caller", "role_content": "{\\"座位分配结果\\": [{\\"player_name\\": \\"player_1\\", \\"seat_number\\": 1}, {\\"player_name\\": \\"李四(人类玩家)\\", \\"seat_number\\": 2}, {\\"player_name\\": \\"player_2\\", \\"seat_number\\": 3}, {\\"player_name\\": \\"player_3\\", \\"seat_number\\": 4}]}"}, {"role_type": "userinput", "role_name": "user", "role_content": "分配座位"}, {"role_type": "userinput", "role_name": "user", "role_content": "通知身份"}, "主持人 : 你是player_1, 你的位置是1号, 你分配的单词是汽车", {"role_type": "userinput", "role_name": "user", "role_content": "开始新一轮的讨论"}, "主持人 : 当前存活的玩家有4位,他们是player_1, 李四(人类玩家), player_2, player_3", "主持人 : 现在我们开始发言环节,按照座位顺序由小到大进行发言,首先是1号位的player_1"]', 'type': 'reactExecution'}], 'userInteraction': '["通知身份", "主持人 : 你是李四(人类玩家), 你的位置是2号, 你分配的单词是汽车", "开始新一轮的讨论", "主持人 : 现在我们开始发言环节,按照座位顺序由小到大进行发言,首先是1号位的player_1", "主持人 : 当前存活的玩家有4位,他们是player_1, 李四(人类玩家), player_2, player_3"]'}
'''
#step 4 剧本杀/谁是卧底/智能交互/关键信息_1
params_string =\
@@ -353,6 +403,9 @@
print('UNDERCOVER', res_to_lingsi)
+
+
+
# #========================
# ## step 7-1 第二轮讨论输入
# params_string =\