From 0e89d372a2356f3dfba07e06e35d1f1c1491ebb1 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 10 Apr 2025 16:54:52 +0900 Subject: [PATCH 001/230] Create agent_server.py --- src/agents/agent_server.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 src/agents/agent_server.py diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py new file mode 100644 index 00000000..1ae28956 --- /dev/null +++ b/src/agents/agent_server.py @@ -0,0 +1,27 @@ +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from agents import Agent, Runner + +app = FastAPI() + +# Optional: allow all origins (so you can test with Postman, Make, etc.) +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], +) + +# Define your agent here +agent = Agent( + name="StrategyAgent", + instructions="You are a strategic marketing assistant for small brands. Give clear, actionable advice in bullet points.", +) + +# Expose /agent POST endpoint +@app.post("/agent") +async def run_agent(request: Request): + data = await request.json() + user_input = data.get("input", "") + result = await Runner.run(agent, input=user_input) + return {"output": result.final_output} From 2ce83e9152ef98034e9f01fb7dd3e0d625bff53a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 10 Apr 2025 16:58:59 +0900 Subject: [PATCH 002/230] Create agent_server.py --- agent_server.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 agent_server.py diff --git a/agent_server.py b/agent_server.py new file mode 100644 index 00000000..1ae28956 --- /dev/null +++ b/agent_server.py @@ -0,0 +1,27 @@ +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from agents import Agent, Runner + +app = FastAPI() + +# Optional: allow all origins (so you can test with Postman, Make, etc.) +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], +) + +# Define your agent here +agent = Agent( + name="StrategyAgent", + instructions="You are a strategic marketing assistant for small brands. Give clear, actionable advice in bullet points.", +) + +# Expose /agent POST endpoint +@app.post("/agent") +async def run_agent(request: Request): + data = await request.json() + user_input = data.get("input", "") + result = await Runner.run(agent, input=user_input) + return {"output": result.final_output} From 1b94141197e79d0e1e6bc1b9fe6b4d2aa1fb7ef4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 10 Apr 2025 16:59:44 +0900 Subject: [PATCH 003/230] Update agent_server.py --- src/agents/agent_server.py | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 1ae28956..8b137891 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,27 +1 @@ -from fastapi import FastAPI, Request -from fastapi.middleware.cors import CORSMiddleware -from agents import Agent, Runner -app = FastAPI() - -# Optional: allow all origins (so you can test with Postman, Make, etc.) -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_methods=["*"], - allow_headers=["*"], -) - -# Define your agent here -agent = Agent( - name="StrategyAgent", - instructions="You are a strategic marketing assistant for small brands. Give clear, actionable advice in bullet points.", -) - -# Expose /agent POST endpoint -@app.post("/agent") -async def run_agent(request: Request): - data = await request.json() - user_input = data.get("input", "") - result = await Runner.run(agent, input=user_input) - return {"output": result.final_output} From aaa391a9e836d0b5bd2684a4522d845d2d44e55a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 10 Apr 2025 17:02:55 +0900 Subject: [PATCH 004/230] Update pyproject.toml --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ca398ba2..f8c5c22d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ dev = [ "textual", "websockets", "graphviz", + "fastapi = "^0.110.0", ] [tool.uv.workspace] From f361176a0bcb06271d5c8533690f3f0005946b80 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 10 Apr 2025 17:04:26 +0900 Subject: [PATCH 005/230] Update pyproject.toml --- pyproject.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f8c5c22d..2302d8a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,9 @@ dependencies = [ "types-requests>=2.0, <3", "mcp>=1.6.0, <2; python_version >= '3.10'", "mkdocs-static-i18n>=1.3.0", + "fastapi>=0.110.0", + "uvicorn>=0.34.0", + ] classifiers = [ "Typing :: Typed", @@ -59,7 +62,6 @@ dev = [ "textual", "websockets", "graphviz", - "fastapi = "^0.110.0", ] [tool.uv.workspace] From ee8acf9f5fa299fbedc3bcacaa1b6e91c3cdefdc Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 10 Apr 2025 19:33:03 +0900 Subject: [PATCH 006/230] Update agent_server.py --- agent_server.py | 106 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 100 insertions(+), 6 deletions(-) diff --git a/agent_server.py b/agent_server.py index 1ae28956..5d4d8071 100644 --- a/agent_server.py +++ b/agent_server.py @@ -1,27 +1,121 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from agents import Agent, Runner +from datetime import datetime +import httpx +import os +import json app = FastAPI() -# Optional: allow all origins (so you can test with Postman, Make, etc.) app.add_middleware( CORSMiddleware, allow_origins=["*"], + allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) -# Define your agent here -agent = Agent( +# === Define Agents === +manager_agent = Agent( + name="Manager", + instructions=""" +You are an intelligent router for user requests. +Decide the intent behind the message: strategy, content, repurpose, feedback. +If you are unsure or need more info, ask a clarifying question instead of routing. +Respond in strict JSON like: +{ "route_to": "strategy", "reason": "User wants a campaign plan" } +""" +) + +strategy_agent = Agent( name="StrategyAgent", - instructions="You are a strategic marketing assistant for small brands. Give clear, actionable advice in bullet points.", + instructions=""" +You create clear, actionable 7-day social media campaign strategies. +If user input is unclear or missing platform, audience, or tone — ask for clarification. +Respond in structured JSON. +""" +) + +content_agent = Agent( + name="ContentAgent", + instructions=""" +You write engaging, brand-aligned social content. +If user input lacks platform or goal, ask for clarification. +Return post drafts with caption, CTA, and hook. +""" +) + +repurpose_agent = Agent( + name="RepurposeAgent", + instructions=""" +You convert existing posts into new formats for different platforms. +If missing input post or target format, ask for clarification. +""" +) + +feedback_agent = Agent( + name="FeedbackAgent", + instructions=""" +You evaluate content and offer improvements. +If missing content or performance data, ask what’s needed. +""" ) -# Expose /agent POST endpoint +AGENT_MAP = { + "strategy": strategy_agent, + "content": content_agent, + "repurpose": repurpose_agent, + "feedback": feedback_agent, +} + @app.post("/agent") async def run_agent(request: Request): data = await request.json() user_input = data.get("input", "") + user_id = data.get("user_id", "anonymous") + linked_profile_strategy = data.get("linked_profile_strategy") + agent_type = data.get("agent_type") # Optional shortcut + webhook_url = data.get("webhook_url") + + # Step 1: If no agent_type, use Manager Agent to decide + if not agent_type: + manager_result = await Runner.run(manager_agent, input=user_input) + try: + parsed = json.loads(manager_result.final_output) + agent_type = parsed.get("route_to") + except: + return {"needs_clarification": True, "message": "Could not understand intent."} + + agent = AGENT_MAP.get(agent_type) + if not agent: + return {"error": f"Unknown agent type: {agent_type}"} + + # Step 2: Run the selected agent result = await Runner.run(agent, input=user_input) - return {"output": result.final_output} + if hasattr(result, "requires_user_input"): + return { + "needs_clarification": True, + "message": result.requires_user_input, + } + + # Step 3: Format AgentSession + session = { + "agent_type": agent_type, + "user_id": user_id, + "input_details": data.get("input_details", {}), + "output_details": result.final_output, + "linked_profile_strategy": linked_profile_strategy, + "source_content_piece": data.get("source_content_piece"), + "created_at": datetime.utcnow().isoformat(), + } + + # Step 4: Optionally push to external webhook (Make, Bubble, etc) + if webhook_url: + async with httpx.AsyncClient() as client: + try: + await client.post(webhook_url, json=session) + except Exception as e: + session["webhook_error"] = str(e) + + return session From 258e3349f8605c30c14007032b3242f1b0ef5026 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 13:58:13 +0900 Subject: [PATCH 007/230] Update agent_server.py - 0413 with updated flow logic + new onboarding agent separate file routing added. (row 20) --- agent_server.py | 105 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 93 insertions(+), 12 deletions(-) diff --git a/agent_server.py b/agent_server.py index 5d4d8071..183ad49f 100644 --- a/agent_server.py +++ b/agent_server.py @@ -1,6 +1,7 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware -from agents import Agent, Runner +from agents import Agent, Runner, tools +from agents_onboarding import router as onboarding_router # ✅ New router added from datetime import datetime import httpx import os @@ -16,6 +17,8 @@ allow_headers=["*"], ) +app.include_router(onboarding_router) # ✅ Mount /onboard endpoint here + # === Define Agents === manager_agent = Agent( name="Manager", @@ -33,8 +36,19 @@ instructions=""" You create clear, actionable 7-day social media campaign strategies. If user input is unclear or missing platform, audience, or tone — ask for clarification. -Respond in structured JSON. -""" +Respond in structured JSON like: +{ + "output_type": "strategy_plan", + "contains_image": false, + "details": { + "days": [ + { "title": "...", "theme": "...", "cta": "..." } + ] + } +} +Only return JSON in this format. +""", + tools=[tools.browser] ) content_agent = Agent( @@ -42,24 +56,65 @@ instructions=""" You write engaging, brand-aligned social content. If user input lacks platform or goal, ask for clarification. -Return post drafts with caption, CTA, and hook. -""" +Return post drafts in this JSON format: +{ + "output_type": "content_variants", + "contains_image": false, + "details": { + "variants": [ + { + "platform": "Instagram", + "caption": "...", + "hook": "...", + "cta": "..." + } + ] + } +} +Only respond in this format. +""", + tools=[] ) repurpose_agent = Agent( name="RepurposeAgent", instructions=""" You convert existing posts into new formats for different platforms. -If missing input post or target format, ask for clarification. -""" +Respond using this format: +{ + "output_type": "repurposed_posts", + "contains_image": false, + "details": { + "original": "...", + "repurposed": [ + { + "platform": "...", + "caption": "...", + "format": "..." + } + ] + } +} +""", + tools=[] ) feedback_agent = Agent( name="FeedbackAgent", instructions=""" You evaluate content and offer improvements. -If missing content or performance data, ask what’s needed. -""" +Respond in this structured format: +{ + "output_type": "content_feedback", + "contains_image": false, + "details": { + "original": "...", + "feedback": "...", + "suggested_edit": "..." + } +} +""", + tools=[tools.code_interpreter] ) AGENT_MAP = { @@ -77,6 +132,11 @@ async def run_agent(request: Request): linked_profile_strategy = data.get("linked_profile_strategy") agent_type = data.get("agent_type") # Optional shortcut webhook_url = data.get("webhook_url") + image_url = data.get("image_url") + debug_info = {} + + if image_url: + user_input += f"\nHere is the image to consider: {image_url}" # Step 1: If no agent_type, use Manager Agent to decide if not agent_type: @@ -84,8 +144,8 @@ async def run_agent(request: Request): try: parsed = json.loads(manager_result.final_output) agent_type = parsed.get("route_to") - except: - return {"needs_clarification": True, "message": "Could not understand intent."} + except Exception as e: + return {"needs_clarification": True, "message": "Could not understand intent.", "debug_info": str(e)} agent = AGENT_MAP.get(agent_type) if not agent: @@ -99,17 +159,38 @@ async def run_agent(request: Request): "message": result.requires_user_input, } + try: + parsed_output = json.loads(result.final_output) + output_type = parsed_output.get("output_type") + output_details = parsed_output.get("details") + contains_image = parsed_output.get("contains_image", False) + + if not output_type or not output_details: + raise ValueError("Missing required output keys") + except Exception as e: + parsed_output = None + output_type = "raw_text" + output_details = result.final_output + contains_image = False + debug_info["validation_error"] = str(e) + debug_info["raw_output"] = result.final_output + # Step 3: Format AgentSession session = { "agent_type": agent_type, "user_id": user_id, "input_details": data.get("input_details", {}), - "output_details": result.final_output, + "output_type": output_type, + "contains_image": contains_image, + "output_details": output_details, "linked_profile_strategy": linked_profile_strategy, "source_content_piece": data.get("source_content_piece"), "created_at": datetime.utcnow().isoformat(), } + if debug_info: + session["debug_info"] = debug_info + # Step 4: Optionally push to external webhook (Make, Bubble, etc) if webhook_url: async with httpx.AsyncClient() as client: From dbc341c259b6868fba6f41b6e1b6d6dfc57d4d47 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 13:59:50 +0900 Subject: [PATCH 008/230] Create agents_onboarding.py separate agents_onboarding file for new idea of brainstorming agent for influencers when they first join the site --- agents_onboarding.py | 79 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 agents_onboarding.py diff --git a/agents_onboarding.py b/agents_onboarding.py new file mode 100644 index 00000000..eb860bc1 --- /dev/null +++ b/agents_onboarding.py @@ -0,0 +1,79 @@ +from fastapi import APIRouter, Request +from agents import Agent, Runner +from datetime import datetime +import json +import httpx + +router = APIRouter() + +# Define the onboarding agent +onboarding_agent = Agent( + name="OnboardingAgent", + instructions=""" +You are an onboarding assistant helping new influencers introduce themselves. +Your job is to: +1. Gently ask about their interests, content goals, and style. +2. Summarize their early profile with a few soft fields. +3. Optionally ask a clarifying follow-up question if needed. + +Return your response in the following format: +{ + "output_type": "soft_profile", + "contains_image": false, + "details": { + "interests": ["wellness", "fitness"], + "preferred_style": "authentic, relaxed", + "content_goals": "collaborations, brand storytelling", + "next_question": "Would you consider doing live sessions?" + } +} +Only reply in this format. +""" +) + +@router.post("/onboard") +async def onboard_influencer(request: Request): + data = await request.json() + user_input = data.get("input", "") + user_id = data.get("user_id", "anonymous") + webhook_url = data.get("webhook_url") + debug_info = {} + + result = await Runner.run(onboarding_agent, input=user_input) + + try: + parsed_output = json.loads(result.final_output) + output_type = parsed_output.get("output_type") + output_details = parsed_output.get("details") + contains_image = parsed_output.get("contains_image", False) + + if not output_type or not output_details: + raise ValueError("Missing required output keys") + except Exception as e: + parsed_output = None + output_type = "raw_text" + output_details = result.final_output + contains_image = False + debug_info["validation_error"] = str(e) + debug_info["raw_output"] = result.final_output + + session = { + "agent_type": "onboarding", + "user_id": user_id, + "output_type": output_type, + "contains_image": contains_image, + "output_details": output_details, + "created_at": datetime.utcnow().isoformat(), + } + + if debug_info: + session["debug_info"] = debug_info + + if webhook_url: + async with httpx.AsyncClient() as client: + try: + await client.post(webhook_url, json=session) + except Exception as e: + session["webhook_error"] = str(e) + + return session From 52721183bae0118830cc5f76fac9dcdb90643b87 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 05:26:16 +0000 Subject: [PATCH 009/230] Rename agents to myagents for clarity --- agent_server.py | 2 +- agents_onboarding.py | 2 +- src/{agents => myagents}/__init__.py | 0 src/{agents => myagents}/_config.py | 0 src/{agents => myagents}/_debug.py | 0 src/{agents => myagents}/_run_impl.py | 0 src/{agents => myagents}/agent.py | 0 src/{agents => myagents}/agent_output.py | 0 src/{agents => myagents}/agent_server.py | 0 src/{agents => myagents}/computer.py | 0 src/{agents => myagents}/exceptions.py | 0 src/{agents => myagents}/extensions/__init__.py | 0 src/{agents => myagents}/extensions/handoff_filters.py | 0 src/{agents => myagents}/extensions/handoff_prompt.py | 0 src/{agents => myagents}/extensions/visualization.py | 0 src/{agents => myagents}/function_schema.py | 0 src/{agents => myagents}/guardrail.py | 0 src/{agents => myagents}/handoffs.py | 0 src/{agents => myagents}/items.py | 0 src/{agents => myagents}/lifecycle.py | 0 src/{agents => myagents}/logger.py | 0 src/{agents => myagents}/mcp/__init__.py | 0 src/{agents => myagents}/mcp/server.py | 0 src/{agents => myagents}/mcp/util.py | 0 src/{agents => myagents}/model_settings.py | 0 src/{agents => myagents}/models/__init__.py | 0 src/{agents => myagents}/models/_openai_shared.py | 0 src/{agents => myagents}/models/fake_id.py | 0 src/{agents => myagents}/models/interface.py | 0 src/{agents => myagents}/models/openai_chatcompletions.py | 0 src/{agents => myagents}/models/openai_provider.py | 0 src/{agents => myagents}/models/openai_responses.py | 0 src/{agents => myagents}/py.typed | 0 src/{agents => myagents}/result.py | 0 src/{agents => myagents}/run.py | 0 src/{agents => myagents}/run_context.py | 0 src/{agents => myagents}/stream_events.py | 0 src/{agents => myagents}/strict_schema.py | 0 src/{agents => myagents}/tool.py | 0 src/{agents => myagents}/tracing/__init__.py | 0 src/{agents => myagents}/tracing/create.py | 0 src/{agents => myagents}/tracing/logger.py | 0 src/{agents => myagents}/tracing/processor_interface.py | 0 src/{agents => myagents}/tracing/processors.py | 0 src/{agents => myagents}/tracing/scope.py | 0 src/{agents => myagents}/tracing/setup.py | 0 src/{agents => myagents}/tracing/span_data.py | 0 src/{agents => myagents}/tracing/spans.py | 0 src/{agents => myagents}/tracing/traces.py | 0 src/{agents => myagents}/tracing/util.py | 0 src/{agents => myagents}/usage.py | 0 src/{agents => myagents}/util/__init__.py | 0 src/{agents => myagents}/util/_coro.py | 0 src/{agents => myagents}/util/_error_tracing.py | 0 src/{agents => myagents}/util/_json.py | 0 src/{agents => myagents}/util/_pretty_print.py | 0 src/{agents => myagents}/util/_transforms.py | 0 src/{agents => myagents}/util/_types.py | 0 src/{agents => myagents}/version.py | 0 src/{agents => myagents}/voice/__init__.py | 0 src/{agents => myagents}/voice/events.py | 0 src/{agents => myagents}/voice/exceptions.py | 0 src/{agents => myagents}/voice/imports.py | 0 src/{agents => myagents}/voice/input.py | 0 src/{agents => myagents}/voice/model.py | 0 src/{agents => myagents}/voice/models/__init__.py | 0 src/{agents => myagents}/voice/models/openai_model_provider.py | 0 src/{agents => myagents}/voice/models/openai_stt.py | 0 src/{agents => myagents}/voice/models/openai_tts.py | 0 src/{agents => myagents}/voice/pipeline.py | 0 src/{agents => myagents}/voice/pipeline_config.py | 0 src/{agents => myagents}/voice/result.py | 0 src/{agents => myagents}/voice/utils.py | 0 src/{agents => myagents}/voice/workflow.py | 0 74 files changed, 2 insertions(+), 2 deletions(-) rename src/{agents => myagents}/__init__.py (100%) rename src/{agents => myagents}/_config.py (100%) rename src/{agents => myagents}/_debug.py (100%) rename src/{agents => myagents}/_run_impl.py (100%) rename src/{agents => myagents}/agent.py (100%) rename src/{agents => myagents}/agent_output.py (100%) rename src/{agents => myagents}/agent_server.py (100%) rename src/{agents => myagents}/computer.py (100%) rename src/{agents => myagents}/exceptions.py (100%) rename src/{agents => myagents}/extensions/__init__.py (100%) rename src/{agents => myagents}/extensions/handoff_filters.py (100%) rename src/{agents => myagents}/extensions/handoff_prompt.py (100%) rename src/{agents => myagents}/extensions/visualization.py (100%) rename src/{agents => myagents}/function_schema.py (100%) rename src/{agents => myagents}/guardrail.py (100%) rename src/{agents => myagents}/handoffs.py (100%) rename src/{agents => myagents}/items.py (100%) rename src/{agents => myagents}/lifecycle.py (100%) rename src/{agents => myagents}/logger.py (100%) rename src/{agents => myagents}/mcp/__init__.py (100%) rename src/{agents => myagents}/mcp/server.py (100%) rename src/{agents => myagents}/mcp/util.py (100%) rename src/{agents => myagents}/model_settings.py (100%) rename src/{agents => myagents}/models/__init__.py (100%) rename src/{agents => myagents}/models/_openai_shared.py (100%) rename src/{agents => myagents}/models/fake_id.py (100%) rename src/{agents => myagents}/models/interface.py (100%) rename src/{agents => myagents}/models/openai_chatcompletions.py (100%) rename src/{agents => myagents}/models/openai_provider.py (100%) rename src/{agents => myagents}/models/openai_responses.py (100%) rename src/{agents => myagents}/py.typed (100%) rename src/{agents => myagents}/result.py (100%) rename src/{agents => myagents}/run.py (100%) rename src/{agents => myagents}/run_context.py (100%) rename src/{agents => myagents}/stream_events.py (100%) rename src/{agents => myagents}/strict_schema.py (100%) rename src/{agents => myagents}/tool.py (100%) rename src/{agents => myagents}/tracing/__init__.py (100%) rename src/{agents => myagents}/tracing/create.py (100%) rename src/{agents => myagents}/tracing/logger.py (100%) rename src/{agents => myagents}/tracing/processor_interface.py (100%) rename src/{agents => myagents}/tracing/processors.py (100%) rename src/{agents => myagents}/tracing/scope.py (100%) rename src/{agents => myagents}/tracing/setup.py (100%) rename src/{agents => myagents}/tracing/span_data.py (100%) rename src/{agents => myagents}/tracing/spans.py (100%) rename src/{agents => myagents}/tracing/traces.py (100%) rename src/{agents => myagents}/tracing/util.py (100%) rename src/{agents => myagents}/usage.py (100%) rename src/{agents => myagents}/util/__init__.py (100%) rename src/{agents => myagents}/util/_coro.py (100%) rename src/{agents => myagents}/util/_error_tracing.py (100%) rename src/{agents => myagents}/util/_json.py (100%) rename src/{agents => myagents}/util/_pretty_print.py (100%) rename src/{agents => myagents}/util/_transforms.py (100%) rename src/{agents => myagents}/util/_types.py (100%) rename src/{agents => myagents}/version.py (100%) rename src/{agents => myagents}/voice/__init__.py (100%) rename src/{agents => myagents}/voice/events.py (100%) rename src/{agents => myagents}/voice/exceptions.py (100%) rename src/{agents => myagents}/voice/imports.py (100%) rename src/{agents => myagents}/voice/input.py (100%) rename src/{agents => myagents}/voice/model.py (100%) rename src/{agents => myagents}/voice/models/__init__.py (100%) rename src/{agents => myagents}/voice/models/openai_model_provider.py (100%) rename src/{agents => myagents}/voice/models/openai_stt.py (100%) rename src/{agents => myagents}/voice/models/openai_tts.py (100%) rename src/{agents => myagents}/voice/pipeline.py (100%) rename src/{agents => myagents}/voice/pipeline_config.py (100%) rename src/{agents => myagents}/voice/result.py (100%) rename src/{agents => myagents}/voice/utils.py (100%) rename src/{agents => myagents}/voice/workflow.py (100%) diff --git a/agent_server.py b/agent_server.py index 183ad49f..3b385fa6 100644 --- a/agent_server.py +++ b/agent_server.py @@ -1,6 +1,6 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware -from agents import Agent, Runner, tools +from myagents import Agent, Runner, tools from agents_onboarding import router as onboarding_router # ✅ New router added from datetime import datetime import httpx diff --git a/agents_onboarding.py b/agents_onboarding.py index eb860bc1..5c99e855 100644 --- a/agents_onboarding.py +++ b/agents_onboarding.py @@ -1,5 +1,5 @@ from fastapi import APIRouter, Request -from agents import Agent, Runner +from myagents import Agent, Runner from datetime import datetime import json import httpx diff --git a/src/agents/__init__.py b/src/myagents/__init__.py similarity index 100% rename from src/agents/__init__.py rename to src/myagents/__init__.py diff --git a/src/agents/_config.py b/src/myagents/_config.py similarity index 100% rename from src/agents/_config.py rename to src/myagents/_config.py diff --git a/src/agents/_debug.py b/src/myagents/_debug.py similarity index 100% rename from src/agents/_debug.py rename to src/myagents/_debug.py diff --git a/src/agents/_run_impl.py b/src/myagents/_run_impl.py similarity index 100% rename from src/agents/_run_impl.py rename to src/myagents/_run_impl.py diff --git a/src/agents/agent.py b/src/myagents/agent.py similarity index 100% rename from src/agents/agent.py rename to src/myagents/agent.py diff --git a/src/agents/agent_output.py b/src/myagents/agent_output.py similarity index 100% rename from src/agents/agent_output.py rename to src/myagents/agent_output.py diff --git a/src/agents/agent_server.py b/src/myagents/agent_server.py similarity index 100% rename from src/agents/agent_server.py rename to src/myagents/agent_server.py diff --git a/src/agents/computer.py b/src/myagents/computer.py similarity index 100% rename from src/agents/computer.py rename to src/myagents/computer.py diff --git a/src/agents/exceptions.py b/src/myagents/exceptions.py similarity index 100% rename from src/agents/exceptions.py rename to src/myagents/exceptions.py diff --git a/src/agents/extensions/__init__.py b/src/myagents/extensions/__init__.py similarity index 100% rename from src/agents/extensions/__init__.py rename to src/myagents/extensions/__init__.py diff --git a/src/agents/extensions/handoff_filters.py b/src/myagents/extensions/handoff_filters.py similarity index 100% rename from src/agents/extensions/handoff_filters.py rename to src/myagents/extensions/handoff_filters.py diff --git a/src/agents/extensions/handoff_prompt.py b/src/myagents/extensions/handoff_prompt.py similarity index 100% rename from src/agents/extensions/handoff_prompt.py rename to src/myagents/extensions/handoff_prompt.py diff --git a/src/agents/extensions/visualization.py b/src/myagents/extensions/visualization.py similarity index 100% rename from src/agents/extensions/visualization.py rename to src/myagents/extensions/visualization.py diff --git a/src/agents/function_schema.py b/src/myagents/function_schema.py similarity index 100% rename from src/agents/function_schema.py rename to src/myagents/function_schema.py diff --git a/src/agents/guardrail.py b/src/myagents/guardrail.py similarity index 100% rename from src/agents/guardrail.py rename to src/myagents/guardrail.py diff --git a/src/agents/handoffs.py b/src/myagents/handoffs.py similarity index 100% rename from src/agents/handoffs.py rename to src/myagents/handoffs.py diff --git a/src/agents/items.py b/src/myagents/items.py similarity index 100% rename from src/agents/items.py rename to src/myagents/items.py diff --git a/src/agents/lifecycle.py b/src/myagents/lifecycle.py similarity index 100% rename from src/agents/lifecycle.py rename to src/myagents/lifecycle.py diff --git a/src/agents/logger.py b/src/myagents/logger.py similarity index 100% rename from src/agents/logger.py rename to src/myagents/logger.py diff --git a/src/agents/mcp/__init__.py b/src/myagents/mcp/__init__.py similarity index 100% rename from src/agents/mcp/__init__.py rename to src/myagents/mcp/__init__.py diff --git a/src/agents/mcp/server.py b/src/myagents/mcp/server.py similarity index 100% rename from src/agents/mcp/server.py rename to src/myagents/mcp/server.py diff --git a/src/agents/mcp/util.py b/src/myagents/mcp/util.py similarity index 100% rename from src/agents/mcp/util.py rename to src/myagents/mcp/util.py diff --git a/src/agents/model_settings.py b/src/myagents/model_settings.py similarity index 100% rename from src/agents/model_settings.py rename to src/myagents/model_settings.py diff --git a/src/agents/models/__init__.py b/src/myagents/models/__init__.py similarity index 100% rename from src/agents/models/__init__.py rename to src/myagents/models/__init__.py diff --git a/src/agents/models/_openai_shared.py b/src/myagents/models/_openai_shared.py similarity index 100% rename from src/agents/models/_openai_shared.py rename to src/myagents/models/_openai_shared.py diff --git a/src/agents/models/fake_id.py b/src/myagents/models/fake_id.py similarity index 100% rename from src/agents/models/fake_id.py rename to src/myagents/models/fake_id.py diff --git a/src/agents/models/interface.py b/src/myagents/models/interface.py similarity index 100% rename from src/agents/models/interface.py rename to src/myagents/models/interface.py diff --git a/src/agents/models/openai_chatcompletions.py b/src/myagents/models/openai_chatcompletions.py similarity index 100% rename from src/agents/models/openai_chatcompletions.py rename to src/myagents/models/openai_chatcompletions.py diff --git a/src/agents/models/openai_provider.py b/src/myagents/models/openai_provider.py similarity index 100% rename from src/agents/models/openai_provider.py rename to src/myagents/models/openai_provider.py diff --git a/src/agents/models/openai_responses.py b/src/myagents/models/openai_responses.py similarity index 100% rename from src/agents/models/openai_responses.py rename to src/myagents/models/openai_responses.py diff --git a/src/agents/py.typed b/src/myagents/py.typed similarity index 100% rename from src/agents/py.typed rename to src/myagents/py.typed diff --git a/src/agents/result.py b/src/myagents/result.py similarity index 100% rename from src/agents/result.py rename to src/myagents/result.py diff --git a/src/agents/run.py b/src/myagents/run.py similarity index 100% rename from src/agents/run.py rename to src/myagents/run.py diff --git a/src/agents/run_context.py b/src/myagents/run_context.py similarity index 100% rename from src/agents/run_context.py rename to src/myagents/run_context.py diff --git a/src/agents/stream_events.py b/src/myagents/stream_events.py similarity index 100% rename from src/agents/stream_events.py rename to src/myagents/stream_events.py diff --git a/src/agents/strict_schema.py b/src/myagents/strict_schema.py similarity index 100% rename from src/agents/strict_schema.py rename to src/myagents/strict_schema.py diff --git a/src/agents/tool.py b/src/myagents/tool.py similarity index 100% rename from src/agents/tool.py rename to src/myagents/tool.py diff --git a/src/agents/tracing/__init__.py b/src/myagents/tracing/__init__.py similarity index 100% rename from src/agents/tracing/__init__.py rename to src/myagents/tracing/__init__.py diff --git a/src/agents/tracing/create.py b/src/myagents/tracing/create.py similarity index 100% rename from src/agents/tracing/create.py rename to src/myagents/tracing/create.py diff --git a/src/agents/tracing/logger.py b/src/myagents/tracing/logger.py similarity index 100% rename from src/agents/tracing/logger.py rename to src/myagents/tracing/logger.py diff --git a/src/agents/tracing/processor_interface.py b/src/myagents/tracing/processor_interface.py similarity index 100% rename from src/agents/tracing/processor_interface.py rename to src/myagents/tracing/processor_interface.py diff --git a/src/agents/tracing/processors.py b/src/myagents/tracing/processors.py similarity index 100% rename from src/agents/tracing/processors.py rename to src/myagents/tracing/processors.py diff --git a/src/agents/tracing/scope.py b/src/myagents/tracing/scope.py similarity index 100% rename from src/agents/tracing/scope.py rename to src/myagents/tracing/scope.py diff --git a/src/agents/tracing/setup.py b/src/myagents/tracing/setup.py similarity index 100% rename from src/agents/tracing/setup.py rename to src/myagents/tracing/setup.py diff --git a/src/agents/tracing/span_data.py b/src/myagents/tracing/span_data.py similarity index 100% rename from src/agents/tracing/span_data.py rename to src/myagents/tracing/span_data.py diff --git a/src/agents/tracing/spans.py b/src/myagents/tracing/spans.py similarity index 100% rename from src/agents/tracing/spans.py rename to src/myagents/tracing/spans.py diff --git a/src/agents/tracing/traces.py b/src/myagents/tracing/traces.py similarity index 100% rename from src/agents/tracing/traces.py rename to src/myagents/tracing/traces.py diff --git a/src/agents/tracing/util.py b/src/myagents/tracing/util.py similarity index 100% rename from src/agents/tracing/util.py rename to src/myagents/tracing/util.py diff --git a/src/agents/usage.py b/src/myagents/usage.py similarity index 100% rename from src/agents/usage.py rename to src/myagents/usage.py diff --git a/src/agents/util/__init__.py b/src/myagents/util/__init__.py similarity index 100% rename from src/agents/util/__init__.py rename to src/myagents/util/__init__.py diff --git a/src/agents/util/_coro.py b/src/myagents/util/_coro.py similarity index 100% rename from src/agents/util/_coro.py rename to src/myagents/util/_coro.py diff --git a/src/agents/util/_error_tracing.py b/src/myagents/util/_error_tracing.py similarity index 100% rename from src/agents/util/_error_tracing.py rename to src/myagents/util/_error_tracing.py diff --git a/src/agents/util/_json.py b/src/myagents/util/_json.py similarity index 100% rename from src/agents/util/_json.py rename to src/myagents/util/_json.py diff --git a/src/agents/util/_pretty_print.py b/src/myagents/util/_pretty_print.py similarity index 100% rename from src/agents/util/_pretty_print.py rename to src/myagents/util/_pretty_print.py diff --git a/src/agents/util/_transforms.py b/src/myagents/util/_transforms.py similarity index 100% rename from src/agents/util/_transforms.py rename to src/myagents/util/_transforms.py diff --git a/src/agents/util/_types.py b/src/myagents/util/_types.py similarity index 100% rename from src/agents/util/_types.py rename to src/myagents/util/_types.py diff --git a/src/agents/version.py b/src/myagents/version.py similarity index 100% rename from src/agents/version.py rename to src/myagents/version.py diff --git a/src/agents/voice/__init__.py b/src/myagents/voice/__init__.py similarity index 100% rename from src/agents/voice/__init__.py rename to src/myagents/voice/__init__.py diff --git a/src/agents/voice/events.py b/src/myagents/voice/events.py similarity index 100% rename from src/agents/voice/events.py rename to src/myagents/voice/events.py diff --git a/src/agents/voice/exceptions.py b/src/myagents/voice/exceptions.py similarity index 100% rename from src/agents/voice/exceptions.py rename to src/myagents/voice/exceptions.py diff --git a/src/agents/voice/imports.py b/src/myagents/voice/imports.py similarity index 100% rename from src/agents/voice/imports.py rename to src/myagents/voice/imports.py diff --git a/src/agents/voice/input.py b/src/myagents/voice/input.py similarity index 100% rename from src/agents/voice/input.py rename to src/myagents/voice/input.py diff --git a/src/agents/voice/model.py b/src/myagents/voice/model.py similarity index 100% rename from src/agents/voice/model.py rename to src/myagents/voice/model.py diff --git a/src/agents/voice/models/__init__.py b/src/myagents/voice/models/__init__.py similarity index 100% rename from src/agents/voice/models/__init__.py rename to src/myagents/voice/models/__init__.py diff --git a/src/agents/voice/models/openai_model_provider.py b/src/myagents/voice/models/openai_model_provider.py similarity index 100% rename from src/agents/voice/models/openai_model_provider.py rename to src/myagents/voice/models/openai_model_provider.py diff --git a/src/agents/voice/models/openai_stt.py b/src/myagents/voice/models/openai_stt.py similarity index 100% rename from src/agents/voice/models/openai_stt.py rename to src/myagents/voice/models/openai_stt.py diff --git a/src/agents/voice/models/openai_tts.py b/src/myagents/voice/models/openai_tts.py similarity index 100% rename from src/agents/voice/models/openai_tts.py rename to src/myagents/voice/models/openai_tts.py diff --git a/src/agents/voice/pipeline.py b/src/myagents/voice/pipeline.py similarity index 100% rename from src/agents/voice/pipeline.py rename to src/myagents/voice/pipeline.py diff --git a/src/agents/voice/pipeline_config.py b/src/myagents/voice/pipeline_config.py similarity index 100% rename from src/agents/voice/pipeline_config.py rename to src/myagents/voice/pipeline_config.py diff --git a/src/agents/voice/result.py b/src/myagents/voice/result.py similarity index 100% rename from src/agents/voice/result.py rename to src/myagents/voice/result.py diff --git a/src/agents/voice/utils.py b/src/myagents/voice/utils.py similarity index 100% rename from src/agents/voice/utils.py rename to src/myagents/voice/utils.py diff --git a/src/agents/voice/workflow.py b/src/myagents/voice/workflow.py similarity index 100% rename from src/agents/voice/workflow.py rename to src/myagents/voice/workflow.py From d9ec09587745cdf322c49f0de8048b994930174d Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 05:36:00 +0000 Subject: [PATCH 010/230] update agent files for path debug --- agent_server.py | 4 ++++ agents_onboarding.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/agent_server.py b/agent_server.py index 3b385fa6..b0085428 100644 --- a/agent_server.py +++ b/agent_server.py @@ -1,3 +1,7 @@ +import sys +import os +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from myagents import Agent, Runner, tools diff --git a/agents_onboarding.py b/agents_onboarding.py index 5c99e855..81123520 100644 --- a/agents_onboarding.py +++ b/agents_onboarding.py @@ -1,3 +1,7 @@ +import sys +import os +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + from fastapi import APIRouter, Request from myagents import Agent, Runner from datetime import datetime From 05b2a35af9a0596c12c2248424a291fb03544ea9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 09:41:55 +0000 Subject: [PATCH 011/230] 0413 pt2 reupdate to agents folder revert back --- src/{myagents => agents}/__init__.py | 0 src/{myagents => agents}/_config.py | 0 src/{myagents => agents}/_debug.py | 0 src/{myagents => agents}/_run_impl.py | 0 src/{myagents => agents}/agent.py | 0 src/{myagents => agents}/agent_output.py | 0 agent_server.py => src/agents/agent_server.py | 9 +++------ agents_onboarding.py => src/agents/agents_onboarding.py | 2 +- src/{myagents => agents}/computer.py | 0 src/{myagents => agents}/exceptions.py | 0 src/{myagents => agents}/extensions/__init__.py | 0 src/{myagents => agents}/extensions/handoff_filters.py | 0 src/{myagents => agents}/extensions/handoff_prompt.py | 0 src/{myagents => agents}/extensions/visualization.py | 0 src/{myagents => agents}/function_schema.py | 0 src/{myagents => agents}/guardrail.py | 0 src/{myagents => agents}/handoffs.py | 0 src/{myagents => agents}/items.py | 0 src/{myagents => agents}/lifecycle.py | 0 src/{myagents => agents}/logger.py | 0 src/{myagents => agents}/mcp/__init__.py | 0 src/{myagents => agents}/mcp/server.py | 0 src/{myagents => agents}/mcp/util.py | 0 src/{myagents => agents}/model_settings.py | 0 src/{myagents => agents}/models/__init__.py | 0 src/{myagents => agents}/models/_openai_shared.py | 0 src/{myagents => agents}/models/fake_id.py | 0 src/{myagents => agents}/models/interface.py | 0 .../models/openai_chatcompletions.py | 0 src/{myagents => agents}/models/openai_provider.py | 0 src/{myagents => agents}/models/openai_responses.py | 0 src/{myagents => agents}/py.typed | 0 src/{myagents => agents}/result.py | 0 src/{myagents => agents}/run.py | 0 src/{myagents => agents}/run_context.py | 0 src/{myagents => agents}/stream_events.py | 0 src/{myagents => agents}/strict_schema.py | 0 src/{myagents => agents}/tool.py | 0 src/{myagents => agents}/tracing/__init__.py | 0 src/{myagents => agents}/tracing/create.py | 0 src/{myagents => agents}/tracing/logger.py | 0 src/{myagents => agents}/tracing/processor_interface.py | 0 src/{myagents => agents}/tracing/processors.py | 0 src/{myagents => agents}/tracing/scope.py | 0 src/{myagents => agents}/tracing/setup.py | 0 src/{myagents => agents}/tracing/span_data.py | 0 src/{myagents => agents}/tracing/spans.py | 0 src/{myagents => agents}/tracing/traces.py | 0 src/{myagents => agents}/tracing/util.py | 0 src/{myagents => agents}/usage.py | 0 src/{myagents => agents}/util/__init__.py | 0 src/{myagents => agents}/util/_coro.py | 0 src/{myagents => agents}/util/_error_tracing.py | 0 src/{myagents => agents}/util/_json.py | 0 src/{myagents => agents}/util/_pretty_print.py | 0 src/{myagents => agents}/util/_transforms.py | 0 src/{myagents => agents}/util/_types.py | 0 src/{myagents => agents}/version.py | 0 src/{myagents => agents}/voice/__init__.py | 0 src/{myagents => agents}/voice/events.py | 0 src/{myagents => agents}/voice/exceptions.py | 0 src/{myagents => agents}/voice/imports.py | 0 src/{myagents => agents}/voice/input.py | 0 src/{myagents => agents}/voice/model.py | 0 src/{myagents => agents}/voice/models/__init__.py | 0 .../voice/models/openai_model_provider.py | 0 src/{myagents => agents}/voice/models/openai_stt.py | 0 src/{myagents => agents}/voice/models/openai_tts.py | 0 src/{myagents => agents}/voice/pipeline.py | 0 src/{myagents => agents}/voice/pipeline_config.py | 0 src/{myagents => agents}/voice/result.py | 0 src/{myagents => agents}/voice/utils.py | 0 src/{myagents => agents}/voice/workflow.py | 0 src/myagents/agent_server.py | 1 - 74 files changed, 4 insertions(+), 8 deletions(-) rename src/{myagents => agents}/__init__.py (100%) rename src/{myagents => agents}/_config.py (100%) rename src/{myagents => agents}/_debug.py (100%) rename src/{myagents => agents}/_run_impl.py (100%) rename src/{myagents => agents}/agent.py (100%) rename src/{myagents => agents}/agent_output.py (100%) rename agent_server.py => src/agents/agent_server.py (95%) rename agents_onboarding.py => src/agents/agents_onboarding.py (96%) rename src/{myagents => agents}/computer.py (100%) rename src/{myagents => agents}/exceptions.py (100%) rename src/{myagents => agents}/extensions/__init__.py (100%) rename src/{myagents => agents}/extensions/handoff_filters.py (100%) rename src/{myagents => agents}/extensions/handoff_prompt.py (100%) rename src/{myagents => agents}/extensions/visualization.py (100%) rename src/{myagents => agents}/function_schema.py (100%) rename src/{myagents => agents}/guardrail.py (100%) rename src/{myagents => agents}/handoffs.py (100%) rename src/{myagents => agents}/items.py (100%) rename src/{myagents => agents}/lifecycle.py (100%) rename src/{myagents => agents}/logger.py (100%) rename src/{myagents => agents}/mcp/__init__.py (100%) rename src/{myagents => agents}/mcp/server.py (100%) rename src/{myagents => agents}/mcp/util.py (100%) rename src/{myagents => agents}/model_settings.py (100%) rename src/{myagents => agents}/models/__init__.py (100%) rename src/{myagents => agents}/models/_openai_shared.py (100%) rename src/{myagents => agents}/models/fake_id.py (100%) rename src/{myagents => agents}/models/interface.py (100%) rename src/{myagents => agents}/models/openai_chatcompletions.py (100%) rename src/{myagents => agents}/models/openai_provider.py (100%) rename src/{myagents => agents}/models/openai_responses.py (100%) rename src/{myagents => agents}/py.typed (100%) rename src/{myagents => agents}/result.py (100%) rename src/{myagents => agents}/run.py (100%) rename src/{myagents => agents}/run_context.py (100%) rename src/{myagents => agents}/stream_events.py (100%) rename src/{myagents => agents}/strict_schema.py (100%) rename src/{myagents => agents}/tool.py (100%) rename src/{myagents => agents}/tracing/__init__.py (100%) rename src/{myagents => agents}/tracing/create.py (100%) rename src/{myagents => agents}/tracing/logger.py (100%) rename src/{myagents => agents}/tracing/processor_interface.py (100%) rename src/{myagents => agents}/tracing/processors.py (100%) rename src/{myagents => agents}/tracing/scope.py (100%) rename src/{myagents => agents}/tracing/setup.py (100%) rename src/{myagents => agents}/tracing/span_data.py (100%) rename src/{myagents => agents}/tracing/spans.py (100%) rename src/{myagents => agents}/tracing/traces.py (100%) rename src/{myagents => agents}/tracing/util.py (100%) rename src/{myagents => agents}/usage.py (100%) rename src/{myagents => agents}/util/__init__.py (100%) rename src/{myagents => agents}/util/_coro.py (100%) rename src/{myagents => agents}/util/_error_tracing.py (100%) rename src/{myagents => agents}/util/_json.py (100%) rename src/{myagents => agents}/util/_pretty_print.py (100%) rename src/{myagents => agents}/util/_transforms.py (100%) rename src/{myagents => agents}/util/_types.py (100%) rename src/{myagents => agents}/version.py (100%) rename src/{myagents => agents}/voice/__init__.py (100%) rename src/{myagents => agents}/voice/events.py (100%) rename src/{myagents => agents}/voice/exceptions.py (100%) rename src/{myagents => agents}/voice/imports.py (100%) rename src/{myagents => agents}/voice/input.py (100%) rename src/{myagents => agents}/voice/model.py (100%) rename src/{myagents => agents}/voice/models/__init__.py (100%) rename src/{myagents => agents}/voice/models/openai_model_provider.py (100%) rename src/{myagents => agents}/voice/models/openai_stt.py (100%) rename src/{myagents => agents}/voice/models/openai_tts.py (100%) rename src/{myagents => agents}/voice/pipeline.py (100%) rename src/{myagents => agents}/voice/pipeline_config.py (100%) rename src/{myagents => agents}/voice/result.py (100%) rename src/{myagents => agents}/voice/utils.py (100%) rename src/{myagents => agents}/voice/workflow.py (100%) delete mode 100644 src/myagents/agent_server.py diff --git a/src/myagents/__init__.py b/src/agents/__init__.py similarity index 100% rename from src/myagents/__init__.py rename to src/agents/__init__.py diff --git a/src/myagents/_config.py b/src/agents/_config.py similarity index 100% rename from src/myagents/_config.py rename to src/agents/_config.py diff --git a/src/myagents/_debug.py b/src/agents/_debug.py similarity index 100% rename from src/myagents/_debug.py rename to src/agents/_debug.py diff --git a/src/myagents/_run_impl.py b/src/agents/_run_impl.py similarity index 100% rename from src/myagents/_run_impl.py rename to src/agents/_run_impl.py diff --git a/src/myagents/agent.py b/src/agents/agent.py similarity index 100% rename from src/myagents/agent.py rename to src/agents/agent.py diff --git a/src/myagents/agent_output.py b/src/agents/agent_output.py similarity index 100% rename from src/myagents/agent_output.py rename to src/agents/agent_output.py diff --git a/agent_server.py b/src/agents/agent_server.py similarity index 95% rename from agent_server.py rename to src/agents/agent_server.py index b0085428..031c3193 100644 --- a/agent_server.py +++ b/src/agents/agent_server.py @@ -1,15 +1,14 @@ import sys import os -sys.path.append(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware -from myagents import Agent, Runner, tools -from agents_onboarding import router as onboarding_router # ✅ New router added +from agents import Agent, Runner, tools from datetime import datetime import httpx -import os import json +import os app = FastAPI() @@ -21,8 +20,6 @@ allow_headers=["*"], ) -app.include_router(onboarding_router) # ✅ Mount /onboard endpoint here - # === Define Agents === manager_agent = Agent( name="Manager", diff --git a/agents_onboarding.py b/src/agents/agents_onboarding.py similarity index 96% rename from agents_onboarding.py rename to src/agents/agents_onboarding.py index 81123520..a396cbfa 100644 --- a/agents_onboarding.py +++ b/src/agents/agents_onboarding.py @@ -1,6 +1,6 @@ import sys import os -sys.path.append(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import APIRouter, Request from myagents import Agent, Runner diff --git a/src/myagents/computer.py b/src/agents/computer.py similarity index 100% rename from src/myagents/computer.py rename to src/agents/computer.py diff --git a/src/myagents/exceptions.py b/src/agents/exceptions.py similarity index 100% rename from src/myagents/exceptions.py rename to src/agents/exceptions.py diff --git a/src/myagents/extensions/__init__.py b/src/agents/extensions/__init__.py similarity index 100% rename from src/myagents/extensions/__init__.py rename to src/agents/extensions/__init__.py diff --git a/src/myagents/extensions/handoff_filters.py b/src/agents/extensions/handoff_filters.py similarity index 100% rename from src/myagents/extensions/handoff_filters.py rename to src/agents/extensions/handoff_filters.py diff --git a/src/myagents/extensions/handoff_prompt.py b/src/agents/extensions/handoff_prompt.py similarity index 100% rename from src/myagents/extensions/handoff_prompt.py rename to src/agents/extensions/handoff_prompt.py diff --git a/src/myagents/extensions/visualization.py b/src/agents/extensions/visualization.py similarity index 100% rename from src/myagents/extensions/visualization.py rename to src/agents/extensions/visualization.py diff --git a/src/myagents/function_schema.py b/src/agents/function_schema.py similarity index 100% rename from src/myagents/function_schema.py rename to src/agents/function_schema.py diff --git a/src/myagents/guardrail.py b/src/agents/guardrail.py similarity index 100% rename from src/myagents/guardrail.py rename to src/agents/guardrail.py diff --git a/src/myagents/handoffs.py b/src/agents/handoffs.py similarity index 100% rename from src/myagents/handoffs.py rename to src/agents/handoffs.py diff --git a/src/myagents/items.py b/src/agents/items.py similarity index 100% rename from src/myagents/items.py rename to src/agents/items.py diff --git a/src/myagents/lifecycle.py b/src/agents/lifecycle.py similarity index 100% rename from src/myagents/lifecycle.py rename to src/agents/lifecycle.py diff --git a/src/myagents/logger.py b/src/agents/logger.py similarity index 100% rename from src/myagents/logger.py rename to src/agents/logger.py diff --git a/src/myagents/mcp/__init__.py b/src/agents/mcp/__init__.py similarity index 100% rename from src/myagents/mcp/__init__.py rename to src/agents/mcp/__init__.py diff --git a/src/myagents/mcp/server.py b/src/agents/mcp/server.py similarity index 100% rename from src/myagents/mcp/server.py rename to src/agents/mcp/server.py diff --git a/src/myagents/mcp/util.py b/src/agents/mcp/util.py similarity index 100% rename from src/myagents/mcp/util.py rename to src/agents/mcp/util.py diff --git a/src/myagents/model_settings.py b/src/agents/model_settings.py similarity index 100% rename from src/myagents/model_settings.py rename to src/agents/model_settings.py diff --git a/src/myagents/models/__init__.py b/src/agents/models/__init__.py similarity index 100% rename from src/myagents/models/__init__.py rename to src/agents/models/__init__.py diff --git a/src/myagents/models/_openai_shared.py b/src/agents/models/_openai_shared.py similarity index 100% rename from src/myagents/models/_openai_shared.py rename to src/agents/models/_openai_shared.py diff --git a/src/myagents/models/fake_id.py b/src/agents/models/fake_id.py similarity index 100% rename from src/myagents/models/fake_id.py rename to src/agents/models/fake_id.py diff --git a/src/myagents/models/interface.py b/src/agents/models/interface.py similarity index 100% rename from src/myagents/models/interface.py rename to src/agents/models/interface.py diff --git a/src/myagents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py similarity index 100% rename from src/myagents/models/openai_chatcompletions.py rename to src/agents/models/openai_chatcompletions.py diff --git a/src/myagents/models/openai_provider.py b/src/agents/models/openai_provider.py similarity index 100% rename from src/myagents/models/openai_provider.py rename to src/agents/models/openai_provider.py diff --git a/src/myagents/models/openai_responses.py b/src/agents/models/openai_responses.py similarity index 100% rename from src/myagents/models/openai_responses.py rename to src/agents/models/openai_responses.py diff --git a/src/myagents/py.typed b/src/agents/py.typed similarity index 100% rename from src/myagents/py.typed rename to src/agents/py.typed diff --git a/src/myagents/result.py b/src/agents/result.py similarity index 100% rename from src/myagents/result.py rename to src/agents/result.py diff --git a/src/myagents/run.py b/src/agents/run.py similarity index 100% rename from src/myagents/run.py rename to src/agents/run.py diff --git a/src/myagents/run_context.py b/src/agents/run_context.py similarity index 100% rename from src/myagents/run_context.py rename to src/agents/run_context.py diff --git a/src/myagents/stream_events.py b/src/agents/stream_events.py similarity index 100% rename from src/myagents/stream_events.py rename to src/agents/stream_events.py diff --git a/src/myagents/strict_schema.py b/src/agents/strict_schema.py similarity index 100% rename from src/myagents/strict_schema.py rename to src/agents/strict_schema.py diff --git a/src/myagents/tool.py b/src/agents/tool.py similarity index 100% rename from src/myagents/tool.py rename to src/agents/tool.py diff --git a/src/myagents/tracing/__init__.py b/src/agents/tracing/__init__.py similarity index 100% rename from src/myagents/tracing/__init__.py rename to src/agents/tracing/__init__.py diff --git a/src/myagents/tracing/create.py b/src/agents/tracing/create.py similarity index 100% rename from src/myagents/tracing/create.py rename to src/agents/tracing/create.py diff --git a/src/myagents/tracing/logger.py b/src/agents/tracing/logger.py similarity index 100% rename from src/myagents/tracing/logger.py rename to src/agents/tracing/logger.py diff --git a/src/myagents/tracing/processor_interface.py b/src/agents/tracing/processor_interface.py similarity index 100% rename from src/myagents/tracing/processor_interface.py rename to src/agents/tracing/processor_interface.py diff --git a/src/myagents/tracing/processors.py b/src/agents/tracing/processors.py similarity index 100% rename from src/myagents/tracing/processors.py rename to src/agents/tracing/processors.py diff --git a/src/myagents/tracing/scope.py b/src/agents/tracing/scope.py similarity index 100% rename from src/myagents/tracing/scope.py rename to src/agents/tracing/scope.py diff --git a/src/myagents/tracing/setup.py b/src/agents/tracing/setup.py similarity index 100% rename from src/myagents/tracing/setup.py rename to src/agents/tracing/setup.py diff --git a/src/myagents/tracing/span_data.py b/src/agents/tracing/span_data.py similarity index 100% rename from src/myagents/tracing/span_data.py rename to src/agents/tracing/span_data.py diff --git a/src/myagents/tracing/spans.py b/src/agents/tracing/spans.py similarity index 100% rename from src/myagents/tracing/spans.py rename to src/agents/tracing/spans.py diff --git a/src/myagents/tracing/traces.py b/src/agents/tracing/traces.py similarity index 100% rename from src/myagents/tracing/traces.py rename to src/agents/tracing/traces.py diff --git a/src/myagents/tracing/util.py b/src/agents/tracing/util.py similarity index 100% rename from src/myagents/tracing/util.py rename to src/agents/tracing/util.py diff --git a/src/myagents/usage.py b/src/agents/usage.py similarity index 100% rename from src/myagents/usage.py rename to src/agents/usage.py diff --git a/src/myagents/util/__init__.py b/src/agents/util/__init__.py similarity index 100% rename from src/myagents/util/__init__.py rename to src/agents/util/__init__.py diff --git a/src/myagents/util/_coro.py b/src/agents/util/_coro.py similarity index 100% rename from src/myagents/util/_coro.py rename to src/agents/util/_coro.py diff --git a/src/myagents/util/_error_tracing.py b/src/agents/util/_error_tracing.py similarity index 100% rename from src/myagents/util/_error_tracing.py rename to src/agents/util/_error_tracing.py diff --git a/src/myagents/util/_json.py b/src/agents/util/_json.py similarity index 100% rename from src/myagents/util/_json.py rename to src/agents/util/_json.py diff --git a/src/myagents/util/_pretty_print.py b/src/agents/util/_pretty_print.py similarity index 100% rename from src/myagents/util/_pretty_print.py rename to src/agents/util/_pretty_print.py diff --git a/src/myagents/util/_transforms.py b/src/agents/util/_transforms.py similarity index 100% rename from src/myagents/util/_transforms.py rename to src/agents/util/_transforms.py diff --git a/src/myagents/util/_types.py b/src/agents/util/_types.py similarity index 100% rename from src/myagents/util/_types.py rename to src/agents/util/_types.py diff --git a/src/myagents/version.py b/src/agents/version.py similarity index 100% rename from src/myagents/version.py rename to src/agents/version.py diff --git a/src/myagents/voice/__init__.py b/src/agents/voice/__init__.py similarity index 100% rename from src/myagents/voice/__init__.py rename to src/agents/voice/__init__.py diff --git a/src/myagents/voice/events.py b/src/agents/voice/events.py similarity index 100% rename from src/myagents/voice/events.py rename to src/agents/voice/events.py diff --git a/src/myagents/voice/exceptions.py b/src/agents/voice/exceptions.py similarity index 100% rename from src/myagents/voice/exceptions.py rename to src/agents/voice/exceptions.py diff --git a/src/myagents/voice/imports.py b/src/agents/voice/imports.py similarity index 100% rename from src/myagents/voice/imports.py rename to src/agents/voice/imports.py diff --git a/src/myagents/voice/input.py b/src/agents/voice/input.py similarity index 100% rename from src/myagents/voice/input.py rename to src/agents/voice/input.py diff --git a/src/myagents/voice/model.py b/src/agents/voice/model.py similarity index 100% rename from src/myagents/voice/model.py rename to src/agents/voice/model.py diff --git a/src/myagents/voice/models/__init__.py b/src/agents/voice/models/__init__.py similarity index 100% rename from src/myagents/voice/models/__init__.py rename to src/agents/voice/models/__init__.py diff --git a/src/myagents/voice/models/openai_model_provider.py b/src/agents/voice/models/openai_model_provider.py similarity index 100% rename from src/myagents/voice/models/openai_model_provider.py rename to src/agents/voice/models/openai_model_provider.py diff --git a/src/myagents/voice/models/openai_stt.py b/src/agents/voice/models/openai_stt.py similarity index 100% rename from src/myagents/voice/models/openai_stt.py rename to src/agents/voice/models/openai_stt.py diff --git a/src/myagents/voice/models/openai_tts.py b/src/agents/voice/models/openai_tts.py similarity index 100% rename from src/myagents/voice/models/openai_tts.py rename to src/agents/voice/models/openai_tts.py diff --git a/src/myagents/voice/pipeline.py b/src/agents/voice/pipeline.py similarity index 100% rename from src/myagents/voice/pipeline.py rename to src/agents/voice/pipeline.py diff --git a/src/myagents/voice/pipeline_config.py b/src/agents/voice/pipeline_config.py similarity index 100% rename from src/myagents/voice/pipeline_config.py rename to src/agents/voice/pipeline_config.py diff --git a/src/myagents/voice/result.py b/src/agents/voice/result.py similarity index 100% rename from src/myagents/voice/result.py rename to src/agents/voice/result.py diff --git a/src/myagents/voice/utils.py b/src/agents/voice/utils.py similarity index 100% rename from src/myagents/voice/utils.py rename to src/agents/voice/utils.py diff --git a/src/myagents/voice/workflow.py b/src/agents/voice/workflow.py similarity index 100% rename from src/myagents/voice/workflow.py rename to src/agents/voice/workflow.py diff --git a/src/myagents/agent_server.py b/src/myagents/agent_server.py deleted file mode 100644 index 8b137891..00000000 --- a/src/myagents/agent_server.py +++ /dev/null @@ -1 +0,0 @@ - From baf4677d5d7e8b9e3922e957bec0b8ca93ec6cc8 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 09:49:05 +0000 Subject: [PATCH 012/230] 0413 pt2 reupdate to agents folder revert back --- src/agents/agent_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 031c3193..e595ab9e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -4,7 +4,7 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware -from agents import Agent, Runner, tools +from agents import Agent, Runner, tool from datetime import datetime import httpx import json @@ -49,7 +49,7 @@ } Only return JSON in this format. """, - tools=[tools.browser] + tools=[tool.browser] ) content_agent = Agent( @@ -115,7 +115,7 @@ } } """, - tools=[tools.code_interpreter] + tools=[tool.code_interpreter] ) AGENT_MAP = { From 17c1641612fe9a36a9d11e5259ea0afa69b5bc9f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 09:57:39 +0000 Subject: [PATCH 013/230] 0413 pt2 reupdate to agents folder revert back --- src/agents/agent_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index e595ab9e..a599fc57 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -5,6 +5,7 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from agents import Agent, Runner, tool +from agents.extensions import browser from datetime import datetime import httpx import json @@ -49,7 +50,7 @@ } Only return JSON in this format. """, - tools=[tool.browser] + tools=[browser.Browser()] ) content_agent = Agent( From 829a39c3af3ca279d519e3f044d6b0c01ac6d0d6 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 10:03:47 +0000 Subject: [PATCH 014/230] 0413 pt2 reupdate to agents folder revert back --- src/agents/agent_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index a599fc57..e7659fa6 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -5,7 +5,7 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from agents import Agent, Runner, tool -from agents.extensions import browser +from agents.extensions.browser import Browser from datetime import datetime import httpx import json @@ -50,7 +50,7 @@ } Only return JSON in this format. """, - tools=[browser.Browser()] +tools=[Browser()] ) content_agent = Agent( @@ -116,7 +116,7 @@ } } """, - tools=[tool.code_interpreter] + tools=[tool.CodeInterpreter()] ) AGENT_MAP = { From 97f25202eb3ca2d0d4148a048fa49115c79b0b9b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 10:07:10 +0000 Subject: [PATCH 015/230] 0413 pt2 reupdate to agents folder revert back --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index e7659fa6..14720bc0 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -50,7 +50,7 @@ } Only return JSON in this format. """, -tools=[Browser()] +tools=[] ) content_agent = Agent( From 0cc1530ace9fbe16a7d1ea08953bcc68990bb759 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 10:10:42 +0000 Subject: [PATCH 016/230] 0413 pt2 reupdate to agents folder revert back --- src/agents/agent_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 14720bc0..b6f0bbdd 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -5,7 +5,6 @@ from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from agents import Agent, Runner, tool -from agents.extensions.browser import Browser from datetime import datetime import httpx import json From d8ad562219c65bd4b96673022d7d60ec3d107470 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 13 Apr 2025 10:13:33 +0000 Subject: [PATCH 017/230] 0413 pt2 reupdate to agents folder revert back --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index b6f0bbdd..50a97c43 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -115,7 +115,7 @@ } } """, - tools=[tool.CodeInterpreter()] + tools=[] ) AGENT_MAP = { From ad581bbcfcac904270a81e9d7150bcdd275155a1 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 14 Apr 2025 01:48:57 +0000 Subject: [PATCH 018/230] update agent_server with new run_agent for 2 webhook --- src/agents/agent_server.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 50a97c43..556353c6 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -196,8 +196,13 @@ async def run_agent(request: Request): if webhook_url: async with httpx.AsyncClient() as client: try: - await client.post(webhook_url, json=session) - except Exception as e: - session["webhook_error"] = str(e) + if parsed_output is None: # Clarification flow + await client.post(clarification_webhook_url, json={ + "user_id": user_id, + "message": result.final_output, + "agent_type": agent_type, + }) + else: # Structured output flow + await client.post(structured_webhook_url, json=session) return session From eee3a028dff73558bbeec9656bb26e1836c6efea Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 14 Apr 2025 01:55:23 +0000 Subject: [PATCH 019/230] update agent_server with new run_agent for 2 webhook --- src/agents/agent_server.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 556353c6..7815eec0 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -197,12 +197,14 @@ async def run_agent(request: Request): async with httpx.AsyncClient() as client: try: if parsed_output is None: # Clarification flow - await client.post(clarification_webhook_url, json={ + await client.post(webhook_url, json={ "user_id": user_id, "message": result.final_output, "agent_type": agent_type, }) else: # Structured output flow - await client.post(structured_webhook_url, json=session) + await client.post(webhook_url, json=session) + except Exception as e: + session["webhook_error"] = str(e) return session From 931484f2b8f03144dfd81de13c47c824cf84d54f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 14 Apr 2025 06:06:07 +0000 Subject: [PATCH 020/230] change webhook structure and url --- src/agents/agent_server.py | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 7815eec0..cc42ffb4 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,6 +10,10 @@ import json import os +# === Predefined Webhook URLs === +STRUCTURED_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" +CLARIFICATION_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" + app = FastAPI() app.add_middleware( @@ -132,7 +136,6 @@ async def run_agent(request: Request): user_id = data.get("user_id", "anonymous") linked_profile_strategy = data.get("linked_profile_strategy") agent_type = data.get("agent_type") # Optional shortcut - webhook_url = data.get("webhook_url") image_url = data.get("image_url") debug_info = {} @@ -146,7 +149,11 @@ async def run_agent(request: Request): parsed = json.loads(manager_result.final_output) agent_type = parsed.get("route_to") except Exception as e: - return {"needs_clarification": True, "message": "Could not understand intent.", "debug_info": str(e)} + return { + "needs_clarification": True, + "message": "Could not understand intent.", + "debug_info": str(e) + } agent = AGENT_MAP.get(agent_type) if not agent: @@ -192,19 +199,18 @@ async def run_agent(request: Request): if debug_info: session["debug_info"] = debug_info - # Step 4: Optionally push to external webhook (Make, Bubble, etc) - if webhook_url: - async with httpx.AsyncClient() as client: - try: - if parsed_output is None: # Clarification flow - await client.post(webhook_url, json={ - "user_id": user_id, - "message": result.final_output, - "agent_type": agent_type, - }) - else: # Structured output flow - await client.post(webhook_url, json=session) - except Exception as e: - session["webhook_error"] = str(e) + # Step 4: Post to correct webhook + async with httpx.AsyncClient() as client: + try: + if parsed_output: + await client.post(STRUCTURED_WEBHOOK_URL, json=session) + else: + await client.post(CLARIFICATION_WEBHOOK_URL, json={ + "user_id": user_id, + "message": result.final_output, + "agent_type": agent_type, + }) + except Exception as e: + session["webhook_error"] = str(e) return session From 330494df943d02438b91f682d23b137c56a1d99f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 11:20:32 +0900 Subject: [PATCH 021/230] Update agent_server.py to include uuid task_id --- src/agents/agent_server.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index cc42ffb4..18647c38 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -134,6 +134,7 @@ async def run_agent(request: Request): data = await request.json() user_input = data.get("input", "") user_id = data.get("user_id", "anonymous") + task_id = data.get("task_id") linked_profile_strategy = data.get("linked_profile_strategy") agent_type = data.get("agent_type") # Optional shortcut image_url = data.get("image_url") @@ -185,6 +186,7 @@ async def run_agent(request: Request): # Step 3: Format AgentSession session = { + "task_id": task_id, "agent_type": agent_type, "user_id": user_id, "input_details": data.get("input_details", {}), @@ -209,6 +211,7 @@ async def run_agent(request: Request): "user_id": user_id, "message": result.final_output, "agent_type": agent_type, + "task_id": task_id }) except Exception as e: session["webhook_error"] = str(e) From bd5ee753f85a65aad5d33edc1882d9b2a142f04c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:07:49 +0900 Subject: [PATCH 022/230] Update agent_server.py flatten input details json --- src/agents/agent_server.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 18647c38..92bafb70 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -133,6 +133,7 @@ async def run_agent(request: Request): data = await request.json() user_input = data.get("input", "") + input_details = data.get("input_details", {}) user_id = data.get("user_id", "anonymous") task_id = data.get("task_id") linked_profile_strategy = data.get("linked_profile_strategy") @@ -143,6 +144,16 @@ async def run_agent(request: Request): if image_url: user_input += f"\nHere is the image to consider: {image_url}" + # Step 0.5: Flatten input_details into input prompt (if not empty) + if input_details: + detail_strings = [] + for key, value in input_details.items(): + if value and value.lower() != "null": + detail_strings.append(f"{key}: {value}") + if detail_strings: + user_input += "\n\nAdditional details:\n" + "\n".join(detail_strings) + + # Step 1: If no agent_type, use Manager Agent to decide if not agent_type: manager_result = await Runner.run(manager_agent, input=user_input) From 0724378a37d0e28bf02b6818bd5fca9a750cb165 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:17:10 +0900 Subject: [PATCH 023/230] Update agent_server.py --- src/agents/agent_server.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 92bafb70..2d9f26b2 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -53,7 +53,7 @@ } Only return JSON in this format. """, -tools=[] + tools=[] ) content_agent = Agent( @@ -119,7 +119,7 @@ } } """, - tools=[] + tools=[] ) AGENT_MAP = { @@ -144,7 +144,6 @@ async def run_agent(request: Request): if image_url: user_input += f"\nHere is the image to consider: {image_url}" - # Step 0.5: Flatten input_details into input prompt (if not empty) if input_details: detail_strings = [] for key, value in input_details.items(): @@ -152,9 +151,7 @@ async def run_agent(request: Request): detail_strings.append(f"{key}: {value}") if detail_strings: user_input += "\n\nAdditional details:\n" + "\n".join(detail_strings) - - # Step 1: If no agent_type, use Manager Agent to decide if not agent_type: manager_result = await Runner.run(manager_agent, input=user_input) try: @@ -171,7 +168,6 @@ async def run_agent(request: Request): if not agent: return {"error": f"Unknown agent type: {agent_type}"} - # Step 2: Run the selected agent result = await Runner.run(agent, input=user_input) if hasattr(result, "requires_user_input"): return { @@ -179,8 +175,13 @@ async def run_agent(request: Request): "message": result.requires_user_input, } + # === Clean Output Block === + clean_output = result.final_output.strip() + if clean_output.startswith("```") and clean_output.endswith("```"): + clean_output = clean_output.split("\n", 1)[-1].rsplit("\n", 1)[0] + try: - parsed_output = json.loads(result.final_output) + parsed_output = json.loads(clean_output) output_type = parsed_output.get("output_type") output_details = parsed_output.get("details") contains_image = parsed_output.get("contains_image", False) @@ -195,7 +196,6 @@ async def run_agent(request: Request): debug_info["validation_error"] = str(e) debug_info["raw_output"] = result.final_output - # Step 3: Format AgentSession session = { "task_id": task_id, "agent_type": agent_type, @@ -212,7 +212,6 @@ async def run_agent(request: Request): if debug_info: session["debug_info"] = debug_info - # Step 4: Post to correct webhook async with httpx.AsyncClient() as client: try: if parsed_output: From 241294cdc46397bb28df522a7f8d50864e792ab4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 17:41:29 +0900 Subject: [PATCH 024/230] Create agent_profilebuilder.py created separate py file for agent_profilebuilder.py. premise is that it will provide the required context based on user provided personal profile info --- src/agents/agent_profilebuilder.py | 89 ++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 src/agents/agent_profilebuilder.py diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py new file mode 100644 index 00000000..319dd24a --- /dev/null +++ b/src/agents/agent_profilebuilder.py @@ -0,0 +1,89 @@ +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +from fastapi import APIRouter, Request +from myagents import Agent, Runner +from datetime import datetime +import json +import httpx + +router = APIRouter() + +# Define the ProfileBuilder agent +profile_builder_agent = Agent( + name="ProfileBuilderAgent", + instructions=""" +You are a profile builder assistant. +Based on the input text and optionally any linked information, construct a structured influencer profile. + +Respond in the following format: +{ + "output_type": "structured_profile", + "contains_image": false, + "details": { + "profile_summary": "...", + "prompt_snippet": { + "tone": "...", + "goal": "...", + "platform": "..." + } + } +} +Only respond in this format. +""" +) + +@router.post("/profilebuilder") +async def build_profile(request: Request): + data = await request.json() + user_input = data.get("input", "") + user_id = data.get("user_id", "anonymous") + webhook_url = data.get("webhook_url") + debug_info = {} + + result = await Runner.run(profile_builder_agent, input=user_input) + + # === Clean output block (remove markdown code block formatting) === + clean_output = result.final_output.strip() + if clean_output.startswith("```") and clean_output.endswith("```"): + clean_output = clean_output.split("\n", 1)[-1].rsplit("\n", 1)[0] + + try: + parsed_output = json.loads(clean_output) + output_type = parsed_output.get("output_type") + output_details = parsed_output.get("details") + contains_image = parsed_output.get("contains_image", False) + + if not output_type or not output_details: + raise ValueError("Missing required output keys") + + profile_summary = output_details.get("profile_summary") + prompt_snippet = output_details.get("prompt_snippet") + + except Exception as e: + output_type = "raw_text" + profile_summary = result.final_output + prompt_snippet = {} + contains_image = False + debug_info["validation_error"] = str(e) + debug_info["raw_output"] = result.final_output + + profile_data = { + "user_id": user_id, + "profile_summary_text": profile_summary, + "profile_prompt_snippet": prompt_snippet, + "created_at": datetime.utcnow().isoformat(), + } + + if debug_info: + profile_data["debug_info"] = debug_info + + if webhook_url: + async with httpx.AsyncClient() as client: + try: + await client.post(webhook_url, json=profile_data) + except Exception as e: + profile_data["webhook_error"] = str(e) + + return profile_data From dbf5f85c53ace8fcc32d4717262edf7cd78b9118 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 17:56:33 +0900 Subject: [PATCH 025/230] Update agent_server.py --- src/agents/agent_server.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 2d9f26b2..d4370e1e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,6 +10,14 @@ import json import os +# === Routing the other agent.py files +from agent_onboarding import router as onboarding_router +from agent_profilebuilder import router as profilebuilder_router + +app.include_router(onboarding_router) +app.include_router(profilebuilder_router) + + # === Predefined Webhook URLs === STRUCTURED_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" CLARIFICATION_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" From b27a10a5f1c3ddd15a6ca0851ad9adac74b394ec Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 17:56:44 +0900 Subject: [PATCH 026/230] Rename agents_onboarding.py to agent_onboarding.py --- src/agents/{agents_onboarding.py => agent_onboarding.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/agents/{agents_onboarding.py => agent_onboarding.py} (100%) diff --git a/src/agents/agents_onboarding.py b/src/agents/agent_onboarding.py similarity index 100% rename from src/agents/agents_onboarding.py rename to src/agents/agent_onboarding.py From 893dd0d6ebeed3480c1361dc7d43881f0b20e937 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 18:02:12 +0900 Subject: [PATCH 027/230] Update agent_server.py --- src/agents/agent_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index d4370e1e..27da7548 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -11,8 +11,8 @@ import os # === Routing the other agent.py files -from agent_onboarding import router as onboarding_router -from agent_profilebuilder import router as profilebuilder_router +from .agent_onboarding import router as onboarding_router +from .agent_profilebuilder import router as profilebuilder_router app.include_router(onboarding_router) app.include_router(profilebuilder_router) From daf37d1103e2a7d9a2e2ca107b3b40ea951176a9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 18:08:51 +0900 Subject: [PATCH 028/230] Update agent_onboarding.py --- src/agents/agent_onboarding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_onboarding.py b/src/agents/agent_onboarding.py index a396cbfa..18eb7aa3 100644 --- a/src/agents/agent_onboarding.py +++ b/src/agents/agent_onboarding.py @@ -3,7 +3,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import APIRouter, Request -from myagents import Agent, Runner +from agents import Agent, Runner from datetime import datetime import json import httpx From afe9c275cdaf515829bd12ef5bbfc685eaed7b6f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 18:09:12 +0900 Subject: [PATCH 029/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 319dd24a..e2d058ca 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -3,7 +3,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import APIRouter, Request -from myagents import Agent, Runner +from agents import Agent, Runner from datetime import datetime import json import httpx From 5aeae178f780ff4638c71eb699d05e7a315db03b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 18:17:47 +0900 Subject: [PATCH 030/230] Update agent_server.py --- src/agents/agent_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 27da7548..d0cdb2ea 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,7 +10,8 @@ import json import os -# === Routing the other agent.py files +# === instantiate FastAPI, then include routers for other agent.py files === +app = FastAPI() from .agent_onboarding import router as onboarding_router from .agent_profilebuilder import router as profilebuilder_router From 70c5451b92523bd180df582aa7bc6c645324f321 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 18:26:15 +0900 Subject: [PATCH 031/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index e2d058ca..fbe59f2a 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -10,6 +10,9 @@ router = APIRouter() +# === Predefined Webhook URLs === +WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" + # Define the ProfileBuilder agent profile_builder_agent = Agent( name="ProfileBuilderAgent", @@ -82,8 +85,8 @@ async def build_profile(request: Request): if webhook_url: async with httpx.AsyncClient() as client: try: - await client.post(webhook_url, json=profile_data) + await client.post(WEBHOOK_URL, json=session) except Exception as e: - profile_data["webhook_error"] = str(e) + session["webhook_error"] = str(e) return profile_data From d4f4640c1b46bcd50d74e21919d927f5a745e0ce Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 18:35:14 +0900 Subject: [PATCH 032/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index fbe59f2a..53a5958d 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -10,7 +10,7 @@ router = APIRouter() -# === Predefined Webhook URLs === +# === Predefined Webhook URL === WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" # Define the ProfileBuilder agent @@ -42,7 +42,6 @@ async def build_profile(request: Request): data = await request.json() user_input = data.get("input", "") user_id = data.get("user_id", "anonymous") - webhook_url = data.get("webhook_url") debug_info = {} result = await Runner.run(profile_builder_agent, input=user_input) @@ -82,11 +81,10 @@ async def build_profile(request: Request): if debug_info: profile_data["debug_info"] = debug_info - if webhook_url: - async with httpx.AsyncClient() as client: - try: - await client.post(WEBHOOK_URL, json=session) - except Exception as e: - session["webhook_error"] = str(e) + async with httpx.AsyncClient() as client: + try: + await client.post(WEBHOOK_URL, json=profile_data) + except Exception as e: + profile_data["webhook_error"] = str(e) return profile_data From fb9f0fdcf7ee21f03a93df86cf878b13e84ed48d Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 20:52:15 +0900 Subject: [PATCH 033/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 53a5958d..82f6616d 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -10,7 +10,7 @@ router = APIRouter() -# === Predefined Webhook URL === +# Predefined webhook URL (set to your Bubble endpoint) WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" # Define the ProfileBuilder agent @@ -44,11 +44,13 @@ async def build_profile(request: Request): user_id = data.get("user_id", "anonymous") debug_info = {} + # Run the ProfileBuilder agent with the given user input result = await Runner.run(profile_builder_agent, input=user_input) - # === Clean output block (remove markdown code block formatting) === + # Clean the output in case it is wrapped in markdown code block formatting clean_output = result.final_output.strip() if clean_output.startswith("```") and clean_output.endswith("```"): + # Remove the first and last lines (the markdown fences) clean_output = clean_output.split("\n", 1)[-1].rsplit("\n", 1)[0] try: @@ -57,12 +59,8 @@ async def build_profile(request: Request): output_details = parsed_output.get("details") contains_image = parsed_output.get("contains_image", False) - if not output_type or not output_details: - raise ValueError("Missing required output keys") - profile_summary = output_details.get("profile_summary") prompt_snippet = output_details.get("prompt_snippet") - except Exception as e: output_type = "raw_text" profile_summary = result.final_output @@ -71,6 +69,7 @@ async def build_profile(request: Request): debug_info["validation_error"] = str(e) debug_info["raw_output"] = result.final_output + # Build the profile data that will be sent to Bubble profile_data = { "user_id": user_id, "profile_summary_text": profile_summary, @@ -81,6 +80,7 @@ async def build_profile(request: Request): if debug_info: profile_data["debug_info"] = debug_info + # Post the profile data to your predefined Bubble webhook async with httpx.AsyncClient() as client: try: await client.post(WEBHOOK_URL, json=profile_data) From 74a048901eb65c3e8f7dce1108a9923d91e4148c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 21:04:51 +0900 Subject: [PATCH 034/230] Update agent_server.py --- src/agents/agent_server.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index d0cdb2ea..2cdac569 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,29 +10,29 @@ import json import os -# === instantiate FastAPI, then include routers for other agent.py files === +# === Instantiate FastAPI once === app = FastAPI() + +# === Add middleware to the app === +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# === Include routers from other agent files === from .agent_onboarding import router as onboarding_router from .agent_profilebuilder import router as profilebuilder_router app.include_router(onboarding_router) app.include_router(profilebuilder_router) - # === Predefined Webhook URLs === STRUCTURED_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" CLARIFICATION_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" -app = FastAPI() - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - # === Define Agents === manager_agent = Agent( name="Manager", From 54095ea0a77377a9d48a49bd2126e65f2aee3f47 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 21:15:26 +0900 Subject: [PATCH 035/230] Update agent_profilebuilder.py attempting websearch tool add --- src/agents/agent_profilebuilder.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 82f6616d..47835d85 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -34,7 +34,11 @@ } } Only respond in this format. -""" +""", + tools=[{ + "type": "web_search_preview", + "search_context_size": "low" + }] ) @router.post("/profilebuilder") From 03d37e4d2476d19876b6db33e6e26b54d866077a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 21:21:33 +0900 Subject: [PATCH 036/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 31 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 47835d85..86a327ed 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -4,6 +4,7 @@ from fastapi import APIRouter, Request from agents import Agent, Runner +from agents.tool import Tool # Updated: import the Tool class from datetime import datetime import json import httpx @@ -13,32 +14,31 @@ # Predefined webhook URL (set to your Bubble endpoint) WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" -# Define the ProfileBuilder agent +# Define the ProfileBuilder agent with web search capability, +# now using a Tool instance with the expected attributes. profile_builder_agent = Agent( name="ProfileBuilderAgent", instructions=""" -You are a profile builder assistant. -Based on the input text and optionally any linked information, construct a structured influencer profile. +You are a profile builder assistant with web search capability. +Based on the input text and any optionally linked external information, perform a web search for publicly available details about the influencer using the provided web search tool. Use any relevant data you find to enrich the influencer's profile. +Then, construct a structured influencer profile that includes a concise profile summary and a prompt snippet with key characteristics. Respond in the following format: { "output_type": "structured_profile", "contains_image": false, "details": { - "profile_summary": "...", + "profile_summary": "A concise summary of the influencer that includes details from web search if applicable.", "prompt_snippet": { - "tone": "...", - "goal": "...", - "platform": "..." + "tone": "The influencer's style (e.g., playful, professional, authentic)", + "goal": "Key content goals (e.g., brand storytelling, engagement)", + "platform": "Primary platform (e.g., Instagram)" } } } -Only respond in this format. +Only reply in this format. """, - tools=[{ - "type": "web_search_preview", - "search_context_size": "low" - }] + tools=[Tool(name="web_search_preview", search_context_size="low")] ) @router.post("/profilebuilder") @@ -48,13 +48,12 @@ async def build_profile(request: Request): user_id = data.get("user_id", "anonymous") debug_info = {} - # Run the ProfileBuilder agent with the given user input + # Run the ProfileBuilder agent with the given user input. result = await Runner.run(profile_builder_agent, input=user_input) - # Clean the output in case it is wrapped in markdown code block formatting + # Clean the output in case it is wrapped in markdown code block formatting. clean_output = result.final_output.strip() if clean_output.startswith("```") and clean_output.endswith("```"): - # Remove the first and last lines (the markdown fences) clean_output = clean_output.split("\n", 1)[-1].rsplit("\n", 1)[0] try: @@ -73,7 +72,6 @@ async def build_profile(request: Request): debug_info["validation_error"] = str(e) debug_info["raw_output"] = result.final_output - # Build the profile data that will be sent to Bubble profile_data = { "user_id": user_id, "profile_summary_text": profile_summary, @@ -84,7 +82,6 @@ async def build_profile(request: Request): if debug_info: profile_data["debug_info"] = debug_info - # Post the profile data to your predefined Bubble webhook async with httpx.AsyncClient() as client: try: await client.post(WEBHOOK_URL, json=profile_data) From b474ed3427a606069416c7af3e0d1f514699b748 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 21:24:58 +0900 Subject: [PATCH 037/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 86a327ed..58ad651c 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -4,7 +4,6 @@ from fastapi import APIRouter, Request from agents import Agent, Runner -from agents.tool import Tool # Updated: import the Tool class from datetime import datetime import json import httpx @@ -14,8 +13,9 @@ # Predefined webhook URL (set to your Bubble endpoint) WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" -# Define the ProfileBuilder agent with web search capability, -# now using a Tool instance with the expected attributes. +# Define the ProfileBuilder agent with web search capability. +# Instead of instantiating a Tool (which is defined as a Union), we pass a dictionary +# with the required keys as per the OpenAI tools-web-search documentation. profile_builder_agent = Agent( name="ProfileBuilderAgent", instructions=""" @@ -38,7 +38,8 @@ } Only reply in this format. """, - tools=[Tool(name="web_search_preview", search_context_size="low")] + # Use a dict for the tool configuration. + tools=[{"type": "web_search_preview", "search_context_size": "low"}] ) @router.post("/profilebuilder") From fccc3dd83807894539bb403c8cf056ee4d26ef3f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 16 Apr 2025 21:31:45 +0900 Subject: [PATCH 038/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 58ad651c..2560ecaf 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -4,6 +4,7 @@ from fastapi import APIRouter, Request from agents import Agent, Runner +from agents.tool import WebSearchTool # Import the concrete WebSearchTool class from datetime import datetime import json import httpx @@ -14,8 +15,8 @@ WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" # Define the ProfileBuilder agent with web search capability. -# Instead of instantiating a Tool (which is defined as a Union), we pass a dictionary -# with the required keys as per the OpenAI tools-web-search documentation. +# We instantiate WebSearchTool directly; note that WebSearchTool accepts an optional 'user_location' +# and a 'search_context_size'. Here we set search_context_size to "low". profile_builder_agent = Agent( name="ProfileBuilderAgent", instructions=""" @@ -38,8 +39,7 @@ } Only reply in this format. """, - # Use a dict for the tool configuration. - tools=[{"type": "web_search_preview", "search_context_size": "low"}] + tools=[WebSearchTool(search_context_size="low")] ) @router.post("/profilebuilder") From 49eac751e3d73aa79f4b1e081afbed1cec149f9f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 17 Apr 2025 11:57:44 +0900 Subject: [PATCH 039/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 102 ++++++++++++++++------------- 1 file changed, 55 insertions(+), 47 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 2560ecaf..8bb6b28c 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -4,85 +4,93 @@ from fastapi import APIRouter, Request from agents import Agent, Runner -from agents.tool import WebSearchTool # Import the concrete WebSearchTool class +from agents.tool import WebSearchTool from datetime import datetime import json import httpx router = APIRouter() -# Predefined webhook URL (set to your Bubble endpoint) +# Predefined Bubble webhook URL WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" -# Define the ProfileBuilder agent with web search capability. -# We instantiate WebSearchTool directly; note that WebSearchTool accepts an optional 'user_location' -# and a 'search_context_size'. Here we set search_context_size to "low". +# ProfileBuilder agent skeleton; tools will be set per-request for dynamic locale/fallback profile_builder_agent = Agent( name="ProfileBuilderAgent", instructions=""" You are a profile builder assistant with web search capability. -Based on the input text and any optionally linked external information, perform a web search for publicly available details about the influencer using the provided web search tool. Use any relevant data you find to enrich the influencer's profile. -Then, construct a structured influencer profile that includes a concise profile summary and a prompt snippet with key characteristics. -Respond in the following format: +You will receive a set of key-value inputs (e.g., profile_uuid, handle URL, etc.). +Your job: +1. Use the provided fields (including fallback follower count if given). +2. If a locale is provided, use it to tailor the web search tool's user_location. +3. Perform web searches and reasoning to determine follower_count, posting_style, industry, engagement_rate, and any notable public context. +4. Summarize this into JSON as follows: { "output_type": "structured_profile", "contains_image": false, "details": { - "profile_summary": "A concise summary of the influencer that includes details from web search if applicable.", - "prompt_snippet": { - "tone": "The influencer's style (e.g., playful, professional, authentic)", - "goal": "Key content goals (e.g., brand storytelling, engagement)", - "platform": "Primary platform (e.g., Instagram)" - } + "profile_uuid": "...", + "summary": "Concise profile summary...", + "prompt_snippet": { "tone": "...", "goal": "...", "platform": "..." }, + "follower_count": 12345, + "posting_style": "...", + "industry": "...", + "engagement_rate": "...", + "additional_context": "..." } } -Only reply in this format. +Only return JSON with exactly these fields—no markdown or commentary. """, - tools=[WebSearchTool(search_context_size="low")] + tools=[] ) @router.post("/profilebuilder") async def build_profile(request: Request): data = await request.json() - user_input = data.get("input", "") - user_id = data.get("user_id", "anonymous") - debug_info = {} + # Extract core identifiers and optional fallbacks + profile_uuid = data.pop("profile_uuid", None) + provided_fc = data.pop("provided_follower_count", None) + locale_text = data.pop("locale", None) - # Run the ProfileBuilder agent with the given user input. - result = await Runner.run(profile_builder_agent, input=user_input) + # Build tool list dynamically based on locale + user_loc = {"type": "approximate", "region": locale_text} if locale_text else None + tools = [WebSearchTool(user_location=user_loc, search_context_size="low")] + profile_builder_agent.tools = tools - # Clean the output in case it is wrapped in markdown code block formatting. - clean_output = result.final_output.strip() - if clean_output.startswith("```") and clean_output.endswith("```"): - clean_output = clean_output.split("\n", 1)[-1].rsplit("\n", 1)[0] + # Flatten remaining inputs into prompt lines + prompt_lines = [] + for key, val in data.items(): + if val not in (None, "", "null"): + prompt_lines.append(f"{key}: {val}") + if provided_fc is not None: + prompt_lines.append(f"Provided follower count: {provided_fc}") - try: - parsed_output = json.loads(clean_output) - output_type = parsed_output.get("output_type") - output_details = parsed_output.get("details") - contains_image = parsed_output.get("contains_image", False) + # Construct the agent prompt + agent_input = f"Profile UUID: {profile_uuid}\n" + "\n".join(prompt_lines) + + # Invoke the agent + result = await Runner.run(profile_builder_agent, input=agent_input) - profile_summary = output_details.get("profile_summary") - prompt_snippet = output_details.get("prompt_snippet") - except Exception as e: - output_type = "raw_text" - profile_summary = result.final_output - prompt_snippet = {} - contains_image = False - debug_info["validation_error"] = str(e) - debug_info["raw_output"] = result.final_output + # Clean markdown fences + output = result.final_output.strip() + if output.startswith("```") and output.endswith("```"): + output = output.split("\n", 1)[-1].rsplit("\n", 1)[0] - profile_data = { - "user_id": user_id, - "profile_summary_text": profile_summary, - "profile_prompt_snippet": prompt_snippet, - "created_at": datetime.utcnow().isoformat(), - } + # Parse agent JSON response + try: + parsed = json.loads(output) + details = parsed.get("details", {}) + except Exception: + details = {} - if debug_info: - profile_data["debug_info"] = debug_info + # Build profile_data payload dynamically + profile_data = {"profile_uuid": profile_uuid} + for k, v in details.items(): + profile_data[k] = v + profile_data["created_at"] = datetime.utcnow().isoformat() + # Post to Bubble webhook async with httpx.AsyncClient() as client: try: await client.post(WEBHOOK_URL, json=profile_data) From 17bc6ddeb708d2dffbcded3b1aee9ff9977d460b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 03:59:13 +0000 Subject: [PATCH 040/230] latest sdk updates on routing webhook new files --- .env.sample | 10 ++++++++++ src/agents/agent_server.py | 6 ++++++ src/agents/util/services.py | 30 ++++++++++++++++++++++++++++++ src/agents/util/webhook.py | 16 ++++++++++++++++ src/schemas.py | 17 +++++++++++++++++ 5 files changed, 79 insertions(+) create mode 100644 .env.sample create mode 100644 src/agents/util/services.py create mode 100644 src/agents/util/webhook.py create mode 100644 src/schemas.py diff --git a/.env.sample b/.env.sample new file mode 100644 index 00000000..8d5e74ed --- /dev/null +++ b/.env.sample @@ -0,0 +1,10 @@ +# Bubble Web‑hook endpoints +BUBBLE_STRUCTURED_URL= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" +BUBBLE_CHAT_URL= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" + +# (Optional) task‑specific override +BUBBLE_PROFILE_WEBHOOK= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profile_output" + +# OpenAI, DB, etc. +OPENAI_API_KEY= "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" +DATABASE_URL= diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index cc42ffb4..89100027 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,6 +10,12 @@ import json import os +# === added dotenv 2lines below +from util.schemas import Inbound +from util.services import handle_new_task, handle_new_message +from dotenv import load_dotenv +load_dotenv() + # === Predefined Webhook URLs === STRUCTURED_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" CLARIFICATION_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" diff --git a/src/agents/util/services.py b/src/agents/util/services.py new file mode 100644 index 00000000..82e5bef4 --- /dev/null +++ b/src/agents/util/services.py @@ -0,0 +1,30 @@ +from models import Task, AgentSession, Message # your ORM models +from utils.webhook import post_webhook, STRUCTURED_URL, CLARIFICATION_URL +from agents.runner import run_agent, decide_session # you already have + +async def handle_new_task(p): + task = Task.create(user_id=p.request_user.id, # provided by auth + title=p.user_prompt[:40], + type=p.task_type, + status="pending", + params=p.params) + first_def = "manager" if p.first_agent == "auto" else p.first_agent + session = AgentSession.create(task=task, + agent_definition=first_def, + status="running") + Message.create(task=task, role="user", content=p.user_prompt) + + await run_agent(session) # async call to your agent loop + return {"task_id": task.id} + +async def handle_new_message(p): + Message.create(task_id=p.task_id, + agent_session_id=p.agent_session_id, + role="user", + content=p.message) + + session = (AgentSession.get(p.agent_session_id) + if p.agent_session_id + else decide_session(p.task_id)) + await run_agent(session) + return {"ok": True} diff --git a/src/agents/util/webhook.py b/src/agents/util/webhook.py new file mode 100644 index 00000000..fd735c87 --- /dev/null +++ b/src/agents/util/webhook.py @@ -0,0 +1,16 @@ +import httpx, asyncio, logging, os + +STRUCTURED_URL = os.getenv("BUBBLE_STRUCTURED_URL") +CLARIFICATION_URL = os.getenv("BUBBLE_CHAT_URL") + +async def post_webhook(url: str, data: dict, retries: int = 3): + for i in range(retries): + try: + async with httpx.AsyncClient(timeout=10) as client: + r = await client.post(url, json=data) + r.raise_for_status() + return + except Exception as e: + if i == retries - 1: + logging.error("Webhook fail %s %s", url, e) + await asyncio.sleep(2 ** i) diff --git a/src/schemas.py b/src/schemas.py new file mode 100644 index 00000000..c26c7ecb --- /dev/null +++ b/src/schemas.py @@ -0,0 +1,17 @@ +from typing import Literal, Optional, Dict, Union +from pydantic import BaseModel, Field + +class NewTask(BaseModel): + action: Literal["new_task"] + task_type: str + user_prompt: str + params: Dict = Field(default_factory=dict) + first_agent: Optional[str] = "auto" + +class NewMessage(BaseModel): + action: Literal["new_message"] + task_id: str + message: str + agent_session_id: Optional[str] = None + +Inbound = Union[NewTask, NewMessage] From 7bf7b8a41ca390b5479f1b73e2d9d31516ccfc27 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 06:34:36 +0000 Subject: [PATCH 041/230] feat: unified /agent endpoint + add util schemas, services, webhook helper --- src/agents/agent_server.py | 197 +++++++++++++----------------------- src/agents/util/schemas.py | 17 ++++ src/agents/util/services.py | 34 +++++++ src/agents/util/webhook.py | 16 +++ 4 files changed, 139 insertions(+), 125 deletions(-) create mode 100644 src/agents/util/schemas.py create mode 100644 src/agents/util/services.py create mode 100644 src/agents/util/webhook.py diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 2cdac569..327279ab 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,39 +1,33 @@ +# File: src/agents/agent_server.py + import sys import os +from dotenv import load_dotenv + +# 1) Load environment variables from .env +load_dotenv() + +# 2) Ensure src/ is on the Python path so “util” is importable sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from fastapi import FastAPI, Request +from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -from agents import Agent, Runner, tool +from pydantic import parse_obj_as, ValidationError +import json from datetime import datetime import httpx -import json -import os - -# === Instantiate FastAPI once === -app = FastAPI() - -# === Add middleware to the app === -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) -# === Include routers from other agent files === -from .agent_onboarding import router as onboarding_router -from .agent_profilebuilder import router as profilebuilder_router - -app.include_router(onboarding_router) -app.include_router(profilebuilder_router) +# 3) Core SDK imports +from agents import Agent, Runner, tool -# === Predefined Webhook URLs === -STRUCTURED_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" -CLARIFICATION_WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" +# 4) Pydantic schemas and service handlers +from util.schemas import Inbound # Union of NewTask, NewMessage +from util.services import handle_new_task, handle_new_message -# === Define Agents === +# ─────────────────────────────────────────────────────────── +# 5) Agent definitions (Phase 1: keep here for simplicity) +# ─────────────────────────────────────────────────────────── +# Manager: routes requests or asks for clarifications manager_agent = Agent( name="Manager", instructions=""" @@ -45,6 +39,7 @@ """ ) +# Strategy: builds a 7‑day social campaign plan strategy_agent = Agent( name="StrategyAgent", instructions=""" @@ -65,6 +60,7 @@ tools=[] ) +# Content: writes social post variants content_agent = Agent( name="ContentAgent", instructions=""" @@ -90,6 +86,7 @@ tools=[] ) +# Repurpose: converts posts into new formats repurpose_agent = Agent( name="RepurposeAgent", instructions=""" @@ -113,6 +110,7 @@ tools=[] ) +# Feedback: critiques content and suggests improvements feedback_agent = Agent( name="FeedbackAgent", instructions=""" @@ -131,108 +129,57 @@ tools=[] ) +# Map Manager’s routing keys to Agent instances AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } +# ─────────────────────────────────────────────────────────── -@app.post("/agent") -async def run_agent(request: Request): - data = await request.json() - user_input = data.get("input", "") - input_details = data.get("input_details", {}) - user_id = data.get("user_id", "anonymous") - task_id = data.get("task_id") - linked_profile_strategy = data.get("linked_profile_strategy") - agent_type = data.get("agent_type") # Optional shortcut - image_url = data.get("image_url") - debug_info = {} - - if image_url: - user_input += f"\nHere is the image to consider: {image_url}" - - if input_details: - detail_strings = [] - for key, value in input_details.items(): - if value and value.lower() != "null": - detail_strings.append(f"{key}: {value}") - if detail_strings: - user_input += "\n\nAdditional details:\n" + "\n".join(detail_strings) - - if not agent_type: - manager_result = await Runner.run(manager_agent, input=user_input) - try: - parsed = json.loads(manager_result.final_output) - agent_type = parsed.get("route_to") - except Exception as e: - return { - "needs_clarification": True, - "message": "Could not understand intent.", - "debug_info": str(e) - } - - agent = AGENT_MAP.get(agent_type) - if not agent: - return {"error": f"Unknown agent type: {agent_type}"} - - result = await Runner.run(agent, input=user_input) - if hasattr(result, "requires_user_input"): - return { - "needs_clarification": True, - "message": result.requires_user_input, - } - - # === Clean Output Block === - clean_output = result.final_output.strip() - if clean_output.startswith("```") and clean_output.endswith("```"): - clean_output = clean_output.split("\n", 1)[-1].rsplit("\n", 1)[0] +# 6) Instantiate FastAPI +app = FastAPI() + +# 7) CORS middleware (adjust allow_origins as needed) +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +# 8) Include your existing agent routers +from .agent_onboarding import router as onboarding_router +from .agent_profilebuilder import router as profilebuilder_router + +app.include_router(onboarding_router) +app.include_router(profilebuilder_router) + +# 9) Unified /agent endpoint +@app.post("/agent") +async def agent_endpoint(request: Request): + """ + Handles all client calls: + - action = "new_task" + - action = "new_message" + - future actions as you add them to Inbound + """ + body = await request.json() try: - parsed_output = json.loads(clean_output) - output_type = parsed_output.get("output_type") - output_details = parsed_output.get("details") - contains_image = parsed_output.get("contains_image", False) - - if not output_type or not output_details: - raise ValueError("Missing required output keys") - except Exception as e: - parsed_output = None - output_type = "raw_text" - output_details = result.final_output - contains_image = False - debug_info["validation_error"] = str(e) - debug_info["raw_output"] = result.final_output - - session = { - "task_id": task_id, - "agent_type": agent_type, - "user_id": user_id, - "input_details": data.get("input_details", {}), - "output_type": output_type, - "contains_image": contains_image, - "output_details": output_details, - "linked_profile_strategy": linked_profile_strategy, - "source_content_piece": data.get("source_content_piece"), - "created_at": datetime.utcnow().isoformat(), - } - - if debug_info: - session["debug_info"] = debug_info - - async with httpx.AsyncClient() as client: - try: - if parsed_output: - await client.post(STRUCTURED_WEBHOOK_URL, json=session) - else: - await client.post(CLARIFICATION_WEBHOOK_URL, json={ - "user_id": user_id, - "message": result.final_output, - "agent_type": agent_type, - "task_id": task_id - }) - except Exception as e: - session["webhook_error"] = str(e) - - return session + payload = parse_obj_as(Inbound, body) + except ValidationError as e: + raise HTTPException(status_code=400, detail=e.errors()) + + if payload.action == "new_task": + return await handle_new_task(payload) + + elif payload.action == "new_message": + return await handle_new_message(payload) + + else: + raise HTTPException( + status_code=400, + detail=f"Unsupported action: {payload.action}" + ) diff --git a/src/agents/util/schemas.py b/src/agents/util/schemas.py new file mode 100644 index 00000000..c26c7ecb --- /dev/null +++ b/src/agents/util/schemas.py @@ -0,0 +1,17 @@ +from typing import Literal, Optional, Dict, Union +from pydantic import BaseModel, Field + +class NewTask(BaseModel): + action: Literal["new_task"] + task_type: str + user_prompt: str + params: Dict = Field(default_factory=dict) + first_agent: Optional[str] = "auto" + +class NewMessage(BaseModel): + action: Literal["new_message"] + task_id: str + message: str + agent_session_id: Optional[str] = None + +Inbound = Union[NewTask, NewMessage] diff --git a/src/agents/util/services.py b/src/agents/util/services.py new file mode 100644 index 00000000..d58b8dc0 --- /dev/null +++ b/src/agents/util/services.py @@ -0,0 +1,34 @@ +from your_orm_models import Task, AgentSession, Message # adapt import path +from util.webhook import post_webhook, STRUCTURED_URL, CLARIFICATION_URL +from agents.runner import run_agent, decide_session # your existing runner + +async def handle_new_task(p: NewTask): + task = Task.create( + user_id=p.request_user.id, + title=p.user_prompt[:40], + type=p.task_type, + status="pending", + params=p.params, + ) + first_def = "manager" if p.first_agent == "auto" else p.first_agent + session = AgentSession.create( + task=task, agent_definition=first_def, status="running" + ) + Message.create(task=task, role="user", content=p.user_prompt) + await run_agent(session) + return {"task_id": task.id} + +async def handle_new_message(p: NewMessage): + Message.create( + task_id=p.task_id, + agent_session_id=p.agent_session_id, + role="user", + content=p.message, + ) + session = ( + AgentSession.get(p.agent_session_id) + if p.agent_session_id + else decide_session(p.task_id) + ) + await run_agent(session) + return {"ok": True} diff --git a/src/agents/util/webhook.py b/src/agents/util/webhook.py new file mode 100644 index 00000000..825800d5 --- /dev/null +++ b/src/agents/util/webhook.py @@ -0,0 +1,16 @@ +import os, asyncio, logging, httpx + +STRUCTURED_URL = os.getenv("BUBBLE_STRUCTURED_URL") +CLARIFICATION_URL = os.getenv("BUBBLE_CHAT_URL") + +async def post_webhook(url: str, data: dict, retries: int = 3): + for i in range(retries): + try: + async with httpx.AsyncClient(timeout=10) as client: + r = await client.post(url, json=data) + r.raise_for_status() + return + except Exception as e: + if i == retries - 1: + logging.error("Webhook failed %s: %s", url, e) + await asyncio.sleep(2 ** i) From 2ea8c18854c133cbd4bd39b655b371a7a8254382 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 07:29:43 +0000 Subject: [PATCH 042/230] try again --- .env.sample | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.env.sample b/.env.sample index 8d5e74ed..08e8ddf6 100644 --- a/.env.sample +++ b/.env.sample @@ -6,5 +6,5 @@ BUBBLE_CHAT_URL= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/opena BUBBLE_PROFILE_WEBHOOK= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profile_output" # OpenAI, DB, etc. -OPENAI_API_KEY= "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" +OPENAI_API_KEY= "YOUR_OPENAI_KEY_HERE" DATABASE_URL= From 57935c13cbe6b3596b80805c6b6957412443841b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 07:35:41 +0000 Subject: [PATCH 043/230] =?UTF-8?q?fix:=20relocate=20util=20package=20to?= =?UTF-8?q?=20top=E2=80=91level=20src/util=20so=20imports=20work?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/{agents => }/util/__init__.py | 0 src/{agents => }/util/_coro.py | 0 src/{agents => }/util/_error_tracing.py | 0 src/{agents => }/util/_json.py | 0 src/{agents => }/util/_pretty_print.py | 0 src/{agents => }/util/_transforms.py | 0 src/{agents => }/util/_types.py | 0 src/{agents => }/util/schemas.py | 0 src/{agents => }/util/services.py | 0 src/{agents => }/util/webhook.py | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename src/{agents => }/util/__init__.py (100%) rename src/{agents => }/util/_coro.py (100%) rename src/{agents => }/util/_error_tracing.py (100%) rename src/{agents => }/util/_json.py (100%) rename src/{agents => }/util/_pretty_print.py (100%) rename src/{agents => }/util/_transforms.py (100%) rename src/{agents => }/util/_types.py (100%) rename src/{agents => }/util/schemas.py (100%) rename src/{agents => }/util/services.py (100%) rename src/{agents => }/util/webhook.py (100%) diff --git a/src/agents/util/__init__.py b/src/util/__init__.py similarity index 100% rename from src/agents/util/__init__.py rename to src/util/__init__.py diff --git a/src/agents/util/_coro.py b/src/util/_coro.py similarity index 100% rename from src/agents/util/_coro.py rename to src/util/_coro.py diff --git a/src/agents/util/_error_tracing.py b/src/util/_error_tracing.py similarity index 100% rename from src/agents/util/_error_tracing.py rename to src/util/_error_tracing.py diff --git a/src/agents/util/_json.py b/src/util/_json.py similarity index 100% rename from src/agents/util/_json.py rename to src/util/_json.py diff --git a/src/agents/util/_pretty_print.py b/src/util/_pretty_print.py similarity index 100% rename from src/agents/util/_pretty_print.py rename to src/util/_pretty_print.py diff --git a/src/agents/util/_transforms.py b/src/util/_transforms.py similarity index 100% rename from src/agents/util/_transforms.py rename to src/util/_transforms.py diff --git a/src/agents/util/_types.py b/src/util/_types.py similarity index 100% rename from src/agents/util/_types.py rename to src/util/_types.py diff --git a/src/agents/util/schemas.py b/src/util/schemas.py similarity index 100% rename from src/agents/util/schemas.py rename to src/util/schemas.py diff --git a/src/agents/util/services.py b/src/util/services.py similarity index 100% rename from src/agents/util/services.py rename to src/util/services.py diff --git a/src/agents/util/webhook.py b/src/util/webhook.py similarity index 100% rename from src/agents/util/webhook.py rename to src/util/webhook.py From 7725a6bc80a0fa2ccb5364aea7ee5b52e0c60cd6 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 07:47:19 +0000 Subject: [PATCH 044/230] =?UTF-8?q?Revert=20"fix:=20relocate=20util=20pack?= =?UTF-8?q?age=20to=20top=E2=80=91level=20src/util=20so=20imports=20work"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 57935c13cbe6b3596b80805c6b6957412443841b. --- src/{ => agents}/util/__init__.py | 0 src/{ => agents}/util/_coro.py | 0 src/{ => agents}/util/_error_tracing.py | 0 src/{ => agents}/util/_json.py | 0 src/{ => agents}/util/_pretty_print.py | 0 src/{ => agents}/util/_transforms.py | 0 src/{ => agents}/util/_types.py | 0 src/{ => agents}/util/schemas.py | 0 src/{ => agents}/util/services.py | 0 src/{ => agents}/util/webhook.py | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename src/{ => agents}/util/__init__.py (100%) rename src/{ => agents}/util/_coro.py (100%) rename src/{ => agents}/util/_error_tracing.py (100%) rename src/{ => agents}/util/_json.py (100%) rename src/{ => agents}/util/_pretty_print.py (100%) rename src/{ => agents}/util/_transforms.py (100%) rename src/{ => agents}/util/_types.py (100%) rename src/{ => agents}/util/schemas.py (100%) rename src/{ => agents}/util/services.py (100%) rename src/{ => agents}/util/webhook.py (100%) diff --git a/src/util/__init__.py b/src/agents/util/__init__.py similarity index 100% rename from src/util/__init__.py rename to src/agents/util/__init__.py diff --git a/src/util/_coro.py b/src/agents/util/_coro.py similarity index 100% rename from src/util/_coro.py rename to src/agents/util/_coro.py diff --git a/src/util/_error_tracing.py b/src/agents/util/_error_tracing.py similarity index 100% rename from src/util/_error_tracing.py rename to src/agents/util/_error_tracing.py diff --git a/src/util/_json.py b/src/agents/util/_json.py similarity index 100% rename from src/util/_json.py rename to src/agents/util/_json.py diff --git a/src/util/_pretty_print.py b/src/agents/util/_pretty_print.py similarity index 100% rename from src/util/_pretty_print.py rename to src/agents/util/_pretty_print.py diff --git a/src/util/_transforms.py b/src/agents/util/_transforms.py similarity index 100% rename from src/util/_transforms.py rename to src/agents/util/_transforms.py diff --git a/src/util/_types.py b/src/agents/util/_types.py similarity index 100% rename from src/util/_types.py rename to src/agents/util/_types.py diff --git a/src/util/schemas.py b/src/agents/util/schemas.py similarity index 100% rename from src/util/schemas.py rename to src/agents/util/schemas.py diff --git a/src/util/services.py b/src/agents/util/services.py similarity index 100% rename from src/util/services.py rename to src/agents/util/services.py diff --git a/src/util/webhook.py b/src/agents/util/webhook.py similarity index 100% rename from src/util/webhook.py rename to src/agents/util/webhook.py From ce845e7eb2ce53641458fa09f579d4712669dc2f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 08:11:09 +0000 Subject: [PATCH 045/230] updated utils import --- src/agents/agent_server.py | 4 ++-- src/agents/util/services.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 327279ab..fe531a8c 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -21,8 +21,8 @@ from agents import Agent, Runner, tool # 4) Pydantic schemas and service handlers -from util.schemas import Inbound # Union of NewTask, NewMessage -from util.services import handle_new_task, handle_new_message +from agents.util.schemas import Inbound # Union of NewTask, NewMessage +from agents.util.services import handle_new_task, handle_new_message # ─────────────────────────────────────────────────────────── # 5) Agent definitions (Phase 1: keep here for simplicity) diff --git a/src/agents/util/services.py b/src/agents/util/services.py index d58b8dc0..7e8f3142 100644 --- a/src/agents/util/services.py +++ b/src/agents/util/services.py @@ -1,5 +1,5 @@ from your_orm_models import Task, AgentSession, Message # adapt import path -from util.webhook import post_webhook, STRUCTURED_URL, CLARIFICATION_URL +from agents.util.webhook import post_webhook, STRUCTURED_URL, CLARIFICATION_URL from agents.runner import run_agent, decide_session # your existing runner async def handle_new_task(p: NewTask): From eaab7002a64a56095c71459fc1bae499c8d0fda5 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 21 Apr 2025 08:18:12 +0000 Subject: [PATCH 046/230] refactor: single-file unified /agent with manager & agents defined --- src/agents/agent_server.py | 119 +++++++++++++++++++++++-------------- 1 file changed, 74 insertions(+), 45 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index fe531a8c..1b76937a 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,33 +1,31 @@ # File: src/agents/agent_server.py -import sys import os +import sys +import json +import asyncio +from datetime import datetime from dotenv import load_dotenv -# 1) Load environment variables from .env +# 1) Load environment variables load_dotenv() -# 2) Ensure src/ is on the Python path so “util” is importable +# 2) Add project src folder so "agents" can import its own util subpackage sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) +import httpx from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -from pydantic import parse_obj_as, ValidationError -import json -from datetime import datetime -import httpx # 3) Core SDK imports from agents import Agent, Runner, tool -# 4) Pydantic schemas and service handlers -from agents.util.schemas import Inbound # Union of NewTask, NewMessage -from agents.util.services import handle_new_task, handle_new_message +# 4) SDK guardrail types (so guardrail imports work) +from agents.util._types import MaybeAwaitable # ─────────────────────────────────────────────────────────── -# 5) Agent definitions (Phase 1: keep here for simplicity) +# 5) Agent definitions (Phase 1: keep them here) # ─────────────────────────────────────────────────────────── -# Manager: routes requests or asks for clarifications manager_agent = Agent( name="Manager", instructions=""" @@ -39,7 +37,6 @@ """ ) -# Strategy: builds a 7‑day social campaign plan strategy_agent = Agent( name="StrategyAgent", instructions=""" @@ -60,7 +57,6 @@ tools=[] ) -# Content: writes social post variants content_agent = Agent( name="ContentAgent", instructions=""" @@ -86,7 +82,6 @@ tools=[] ) -# Repurpose: converts posts into new formats repurpose_agent = Agent( name="RepurposeAgent", instructions=""" @@ -110,7 +105,6 @@ tools=[] ) -# Feedback: critiques content and suggests improvements feedback_agent = Agent( name="FeedbackAgent", instructions=""" @@ -129,7 +123,6 @@ tools=[] ) -# Map Manager’s routing keys to Agent instances AGENT_MAP = { "strategy": strategy_agent, "content": content_agent, @@ -138,10 +131,8 @@ } # ─────────────────────────────────────────────────────────── -# 6) Instantiate FastAPI +# 6) FastAPI app setup app = FastAPI() - -# 7) CORS middleware (adjust allow_origins as needed) app.add_middleware( CORSMiddleware, allow_origins=["*"], @@ -150,36 +141,74 @@ allow_headers=["*"], ) -# 8) Include your existing agent routers +# 7) Your existing bubble‑hook routers (keep these unchanged) from .agent_onboarding import router as onboarding_router from .agent_profilebuilder import router as profilebuilder_router - app.include_router(onboarding_router) app.include_router(profilebuilder_router) -# 9) Unified /agent endpoint +# 8) Bubble webhook URLs +STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") +CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") + +# 9) Unified /agent endpoint handling both new_task and new_message @app.post("/agent") -async def agent_endpoint(request: Request): - """ - Handles all client calls: - - action = "new_task" - - action = "new_message" - - future actions as you add them to Inbound - """ - body = await request.json() - try: - payload = parse_obj_as(Inbound, body) - except ValidationError as e: - raise HTTPException(status_code=400, detail=e.errors()) - - if payload.action == "new_task": - return await handle_new_task(payload) - - elif payload.action == "new_message": - return await handle_new_message(payload) +async def agent_endpoint(req: Request): + data = await req.json() + action = data.get("action") + + # --- New Task --- + if action == "new_task": + user_input = data["user_prompt"] + # 1) Manager routes or asks clarification + mgr_result = await Runner.run(manager_agent, input=user_input) + try: + route = json.loads(mgr_result.final_output) + agent_type = route["route_to"] + except Exception: + raise HTTPException(400, "Manager failed to parse intent") + + # 2) Run the selected agent + agent = AGENT_MAP.get(agent_type) + if not agent: + raise HTTPException(400, f"Unknown agent: {agent_type}") + result = await Runner.run(agent, input=user_input) + + # 3) Send output back to Bubble + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, + "created_at": datetime.utcnow().isoformat(), + "output": result.final_output, + } + webhook = STRUCTURED_WEBHOOK_URL + if not result.final_output: + webhook = CLARIFICATION_WEBHOOK_URL + async with httpx.AsyncClient() as client: + await client.post(webhook, json=payload) + + return {"ok": True} + + # --- New Message --- + elif action == "new_message": + user_msg = data["message"] + sess = data.get("agent_session_id") + agent = AGENT_MAP.get(sess, manager_agent) + result = await Runner.run(agent, input=user_msg) + + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "created_at": datetime.utcnow().isoformat(), + "output": result.final_output, + } + webhook = STRUCTURED_WEBHOOK_URL + if not result.final_output: + webhook = CLARIFICATION_WEBHOOK_URL + async with httpx.AsyncClient() as client: + await client.post(webhook, json=payload) + + return {"ok": True} else: - raise HTTPException( - status_code=400, - detail=f"Unsupported action: {payload.action}" - ) + raise HTTPException(400, "Unknown action") From c76ad1073239f73ea4d1c71df01053cbe1439008 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 22 Apr 2025 12:54:54 +0900 Subject: [PATCH 047/230] Update agent_server.py --- src/agents/agent_server.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 1b76937a..6b973dfc 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -181,12 +181,18 @@ async def agent_endpoint(req: Request): "created_at": datetime.utcnow().isoformat(), "output": result.final_output, } - webhook = STRUCTURED_WEBHOOK_URL - if not result.final_output: - webhook = CLARIFICATION_WEBHOOK_URL + # --- updated webhook logic 04.22.2025 + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + is_structured = False + + webhook = STRUCTURED_WEBHOOK_URL if is_structured else CLARIFICATION_WEBHOOK_URL + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") await client.post(webhook, json=payload) - return {"ok": True} # --- New Message --- @@ -202,10 +208,17 @@ async def agent_endpoint(req: Request): "created_at": datetime.utcnow().isoformat(), "output": result.final_output, } - webhook = STRUCTURED_WEBHOOK_URL - if not result.final_output: - webhook = CLARIFICATION_WEBHOOK_URL + # --- updated webhook logic 04.22.2025 + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + is_structured = False + + webhook = STRUCTURED_WEBHOOK_URL if is_structured else CLARIFICATION_WEBHOOK_URL + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") await client.post(webhook, json=payload) return {"ok": True} From 45dfe9b662c30dd4b8a31676c478eba29a2a3d2b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 22 Apr 2025 13:27:31 +0900 Subject: [PATCH 048/230] Update agent_server.py --- src/agents/agent_server.py | 82 ++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 12 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 6b973dfc..8be48761 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -175,12 +175,41 @@ async def agent_endpoint(req: Request): result = await Runner.run(agent, input=user_input) # 3) Send output back to Bubble - payload = { - "task_id": data.get("task_id"), - "agent_type": agent_type, - "created_at": datetime.utcnow().isoformat(), - "output": result.final_output, - } + # --- Determine webhook type and structure payload + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess if in new_message + "message": result.requires_user_input, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + # --- updated webhook logic 04.22.2025 try: parsed_output = json.loads(result.final_output) @@ -202,12 +231,41 @@ async def agent_endpoint(req: Request): agent = AGENT_MAP.get(sess, manager_agent) result = await Runner.run(agent, input=user_msg) - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "created_at": datetime.utcnow().isoformat(), - "output": result.final_output, - } + # --- Determine webhook type and structure payload + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess if in new_message + "message": result.requires_user_input, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + # --- updated webhook logic 04.22.2025 try: parsed_output = json.loads(result.final_output) From c5b0b830643126233bc93864e9f0b53ba40cbe3e Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 22 Apr 2025 13:37:47 +0900 Subject: [PATCH 049/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 8be48761..c2a8591e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -243,7 +243,7 @@ async def agent_endpoint(req: Request): webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess if in new_message + "agent_type": sess or "manager", "message": result.requires_user_input, "created_at": datetime.utcnow().isoformat() } From 80e54eff6295d33d7c67c76ff472923d93886cc9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 09:38:11 +0900 Subject: [PATCH 050/230] Update agent_server.py updating agent type logic --- src/agents/agent_server.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index c2a8591e..59f5ade2 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -173,6 +173,7 @@ async def agent_endpoint(req: Request): if not agent: raise HTTPException(400, f"Unknown agent: {agent_type}") result = await Runner.run(agent, input=user_input) + result.agent_type = agent_type # Explicitly tag result # 3) Send output back to Bubble # --- Determine webhook type and structure payload @@ -187,7 +188,7 @@ async def agent_endpoint(req: Request): webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess if in new_message + "agent_type": getattr(result, "agent_type", agent_type), "message": result.requires_user_input, "created_at": datetime.utcnow().isoformat() } @@ -195,7 +196,7 @@ async def agent_endpoint(req: Request): webhook = STRUCTURED_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess + "agent_type": getattr(result, "agent_type", agent_type), "output_type": parsed_output.get("output_type"), "output_details": parsed_output.get("details"), "contains_image": parsed_output.get("contains_image", False), @@ -230,6 +231,7 @@ async def agent_endpoint(req: Request): sess = data.get("agent_session_id") agent = AGENT_MAP.get(sess, manager_agent) result = await Runner.run(agent, input=user_msg) + result.agent_type = sess or "manager" # --- Determine webhook type and structure payload try: @@ -243,7 +245,7 @@ async def agent_endpoint(req: Request): webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": sess or "manager", + "agent_type": getattr(result, "agent_type", sess or "manager"), "message": result.requires_user_input, "created_at": datetime.utcnow().isoformat() } @@ -251,7 +253,7 @@ async def agent_endpoint(req: Request): webhook = STRUCTURED_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess + "agent_type": getattr(result, "agent_type", sess or "manager"), "output_type": parsed_output.get("output_type"), "output_details": parsed_output.get("details"), "contains_image": parsed_output.get("contains_image", False), From 2c09c9dbb9861aa9a61a8f93c9f039ac635a9851 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 10:34:58 +0900 Subject: [PATCH 051/230] Update agent_server.py --- src/agents/agent_server.py | 106 +++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 57 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 59f5ade2..66e4f099 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -225,63 +225,55 @@ async def agent_endpoint(req: Request): await client.post(webhook, json=payload) return {"ok": True} - # --- New Message --- - elif action == "new_message": - user_msg = data["message"] - sess = data.get("agent_session_id") - agent = AGENT_MAP.get(sess, manager_agent) - result = await Runner.run(agent, input=user_msg) - result.agent_type = sess or "manager" - - # --- Determine webhook type and structure payload - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": getattr(result, "agent_type", sess or "manager"), - "message": result.requires_user_input, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": getattr(result, "agent_type", sess or "manager"), - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess - "message": result.final_output, - "created_at": datetime.utcnow().isoformat() - } - - # --- updated webhook logic 04.22.2025 - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - is_structured = False - - webhook = STRUCTURED_WEBHOOK_URL if is_structured else CLARIFICATION_WEBHOOK_URL - - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) - - return {"ok": True} + # --- New Message --- + elif action == "new_message": + user_msg = data.get("message") or data.get("user_prompt") + if user_msg is None: + raise HTTPException(status_code=422, detail="Missing 'message' or 'user_prompt' in new_message") + + sess = data.get("agent_session_id") + agent = AGENT_MAP.get(sess, manager_agent) + result = await Runner.run(agent, input=user_msg) + + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "message": result.requires_user_input, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") + await client.post(webhook, json=payload) + + return {"ok": True} else: raise HTTPException(400, "Unknown action") From 87eea4d0846349771a23c2938484dbb716eaf8f6 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 10:40:06 +0900 Subject: [PATCH 052/230] Update agent_server.py --- src/agents/agent_server.py | 98 +++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 66e4f099..ed6e0b00 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -225,55 +225,55 @@ async def agent_endpoint(req: Request): await client.post(webhook, json=payload) return {"ok": True} - # --- New Message --- - elif action == "new_message": - user_msg = data.get("message") or data.get("user_prompt") - if user_msg is None: - raise HTTPException(status_code=422, detail="Missing 'message' or 'user_prompt' in new_message") - - sess = data.get("agent_session_id") - agent = AGENT_MAP.get(sess, manager_agent) - result = await Runner.run(agent, input=user_msg) - - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "message": result.requires_user_input, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "message": result.final_output, - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) - - return {"ok": True} + # --- New Message --- + elif action == "new_message": + user_msg = data.get("message") or data.get("user_prompt") + if user_msg is None: + raise HTTPException(status_code=422, detail="Missing 'message' or 'user_prompt' in new_message") + + sess = data.get("agent_session_id") + agent = AGENT_MAP.get(sess, manager_agent) + result = await Runner.run(agent, input=user_msg) + + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "message": result.requires_user_input, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") + await client.post(webhook, json=payload) + + return {"ok": True} else: raise HTTPException(400, "Unknown action") From 8a126468bf59ffbe0bc5ebf695757800cb6ff567 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:26:31 +0900 Subject: [PATCH 053/230] Update agent_server.py From 276418c83bd71e1b2052159a15de5c688e83bc57 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:29:23 +0900 Subject: [PATCH 054/230] Update agent_server.py --- src/agents/agent_server.py | 42 +++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index ed6e0b00..27d2a486 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -185,13 +185,20 @@ async def agent_endpoint(req: Request): is_structured = False if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": getattr(result, "agent_type", agent_type), - "message": result.requires_user_input, - "created_at": datetime.utcnow().isoformat() - } + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } elif is_structured: webhook = STRUCTURED_WEBHOOK_URL payload = { @@ -243,13 +250,20 @@ async def agent_endpoint(req: Request): is_structured = False if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "message": result.requires_user_input, - "created_at": datetime.utcnow().isoformat() - } + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } elif is_structured: webhook = STRUCTURED_WEBHOOK_URL payload = { From 7afad3d28a47444cf1c226709629981b60cc94b7 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:33:22 +0900 Subject: [PATCH 055/230] Update agent_server.py From 01c91d7f5ae26ad7d373643b4ec3833f8eb95fae Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:35:38 +0900 Subject: [PATCH 056/230] Update agent_server.py --- src/agents/agent_server.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 27d2a486..6fd9aa12 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -186,6 +186,20 @@ async def agent_endpoint(req: Request): if getattr(result, "requires_user_input", None): webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), @@ -251,6 +265,20 @@ async def agent_endpoint(req: Request): if getattr(result, "requires_user_input", None): webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), From 7109ad6c0b09b34dbba5ccc8190810c41a00f2cc Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 24 Apr 2025 20:23:23 +0900 Subject: [PATCH 057/230] Update agent_server.py --- src/agents/agent_server.py | 266 ++++++++++++++++++------------------- 1 file changed, 133 insertions(+), 133 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 6fd9aa12..640e73a4 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -185,137 +185,137 @@ async def agent_endpoint(req: Request): is_structured = False if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": getattr(result, "agent_type", agent_type), - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), - "created_at": datetime.utcnow().isoformat() - } + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": getattr(result, "agent_type", agent_type), + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, # or sess + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + + # --- updated webhook logic 04.22.2025 + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + is_structured = False + + webhook = STRUCTURED_WEBHOOK_URL if is_structured else CLARIFICATION_WEBHOOK_URL + + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") + await client.post(webhook, json=payload) + return {"ok": True} + + # --- New Message --- + elif action == "new_message": + user_msg = data.get("message") or data.get("user_prompt") + if user_msg is None: + raise HTTPException(status_code=422, detail="Missing 'message' or 'user_prompt' in new_message") + + sess = data.get("agent_session_id") + agent = AGENT_MAP.get(sess, manager_agent) + result = await Runner.run(agent, input=user_msg) + + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") + await client.post(webhook, json=payload) + + return {"ok": True} + else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess - "message": result.final_output, - "created_at": datetime.utcnow().isoformat() - } - - # --- updated webhook logic 04.22.2025 - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - is_structured = False - - webhook = STRUCTURED_WEBHOOK_URL if is_structured else CLARIFICATION_WEBHOOK_URL - - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) - return {"ok": True} - - # --- New Message --- - elif action == "new_message": - user_msg = data.get("message") or data.get("user_prompt") - if user_msg is None: - raise HTTPException(status_code=422, detail="Missing 'message' or 'user_prompt' in new_message") - - sess = data.get("agent_session_id") - agent = AGENT_MAP.get(sess, manager_agent) - result = await Runner.run(agent, input=user_msg) - - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "message": result.final_output, - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) - - return {"ok": True} - - else: - raise HTTPException(400, "Unknown action") + raise HTTPException(400, "Unknown action") From 99280670604ec601cd15af722db4477bdc6f4433 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 24 Apr 2025 20:35:59 +0900 Subject: [PATCH 058/230] Update agent_server.py --- src/agents/agent_server.py | 239 ++++++++++++++----------------------- 1 file changed, 92 insertions(+), 147 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 640e73a4..a29ea4fd 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,5 +1,3 @@ -# File: src/agents/agent_server.py - import os import sys import json @@ -129,9 +127,7 @@ "repurpose": repurpose_agent, "feedback": feedback_agent, } -# ─────────────────────────────────────────────────────────── -# 6) FastAPI app setup app = FastAPI() app.add_middleware( CORSMiddleware, @@ -141,26 +137,21 @@ allow_headers=["*"], ) -# 7) Your existing bubble‑hook routers (keep these unchanged) from .agent_onboarding import router as onboarding_router from .agent_profilebuilder import router as profilebuilder_router app.include_router(onboarding_router) app.include_router(profilebuilder_router) -# 8) Bubble webhook URLs STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") -# 9) Unified /agent endpoint handling both new_task and new_message @app.post("/agent") async def agent_endpoint(req: Request): data = await req.json() action = data.get("action") - # --- New Task --- if action == "new_task": user_input = data["user_prompt"] - # 1) Manager routes or asks clarification mgr_result = await Runner.run(manager_agent, input=user_input) try: route = json.loads(mgr_result.final_output) @@ -168,154 +159,108 @@ async def agent_endpoint(req: Request): except Exception: raise HTTPException(400, "Manager failed to parse intent") - # 2) Run the selected agent agent = AGENT_MAP.get(agent_type) if not agent: raise HTTPException(400, f"Unknown agent: {agent_type}") result = await Runner.run(agent, input=user_input) - result.agent_type = agent_type # Explicitly tag result + result.agent_type = agent_type + + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": {"reason": "Agent requested clarification"}, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": agent_type, + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") + await client.post(webhook, json=payload) + return {"ok": True} + + elif action == "new_message": + user_msg = data.get("message") or data.get("user_prompt") + if user_msg is None: + raise HTTPException(422, "Missing 'message' or 'user_prompt'") + + sess = data.get("agent_session_id") + agent = AGENT_MAP.get(sess, manager_agent) + result = await Runner.run(agent, input=user_msg) - # 3) Send output back to Bubble - # --- Determine webhook type and structure payload try: parsed_output = json.loads(result.final_output) is_structured = "output_type" in parsed_output except Exception: parsed_output = None is_structured = False - + if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": getattr(result, "agent_type", agent_type), - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": agent_type, # or sess - "message": result.final_output, - "created_at": datetime.utcnow().isoformat() - } - - # --- updated webhook logic 04.22.2025 - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - is_structured = False - - webhook = STRUCTURED_WEBHOOK_URL if is_structured else CLARIFICATION_WEBHOOK_URL - - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) - return {"ok": True} - - # --- New Message --- - elif action == "new_message": - user_msg = data.get("message") or data.get("user_prompt") - if user_msg is None: - raise HTTPException(status_code=422, detail="Missing 'message' or 'user_prompt' in new_message") - - sess = data.get("agent_session_id") - agent = AGENT_MAP.get(sess, manager_agent) - result = await Runner.run(agent, input=user_msg) - - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "message": result.final_output, - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) - - return {"ok": True} - + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": {"reason": "Agent requested clarification"}, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "output_type": parsed_output.get("output_type"), + "output_details": parsed_output.get("details"), + "contains_image": parsed_output.get("contains_image", False), + "created_at": datetime.utcnow().isoformat() + } else: - raise HTTPException(400, "Unknown action") + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "agent_type": sess or "manager", + "message": result.final_output, + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print(f"Selected webhook: {webhook}") + await client.post(webhook, json=payload) + return {"ok": True} + + else: + raise HTTPException(400, "Unknown action") From 64e189011cd4e0870c3373b9f2e0ca18ab5ad53e Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 24 Apr 2025 21:30:26 +0900 Subject: [PATCH 059/230] Update agent_server.py --- src/agents/agent_server.py | 39 ++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index a29ea4fd..199f5563 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -30,7 +30,8 @@ You are an intelligent router for user requests. Decide the intent behind the message: strategy, content, repurpose, feedback. If you are unsure or need more info, ask a clarifying question instead of routing. -Respond in strict JSON like: +If clarification is needed, respond only with a plain text clarification question. +Otherwise, respond in strict JSON like: { "route_to": "strategy", "reason": "User wants a campaign plan" } """ ) @@ -153,17 +154,35 @@ async def agent_endpoint(req: Request): if action == "new_task": user_input = data["user_prompt"] mgr_result = await Runner.run(manager_agent, input=user_input) + try: - route = json.loads(mgr_result.final_output) - agent_type = route["route_to"] + parsed_mgr = json.loads(mgr_result.final_output) + if "route_to" in parsed_mgr: + agent_type = parsed_mgr["route_to"] + agent = AGENT_MAP.get(agent_type) + if not agent: + raise HTTPException(400, f"Unknown agent: {agent_type}") + result = await Runner.run(agent, input=user_input) + result.agent_type = agent_type + else: + raise ValueError("Missing route_to") except Exception: - raise HTTPException(400, "Manager failed to parse intent") - - agent = AGENT_MAP.get(agent_type) - if not agent: - raise HTTPException(400, f"Unknown agent: {agent_type}") - result = await Runner.run(agent, input=user_input) - result.agent_type = agent_type + # Manager returned clarification as plain string + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": "manager", + "message": { + "type": "text", + "content": mgr_result.final_output.strip() + }, + "metadata": {"reason": "Manager requested clarification"}, + "created_at": datetime.utcnow().isoformat() + } + async with httpx.AsyncClient() as client: + await client.post(webhook, json=payload) + return {"ok": True} try: parsed_output = json.loads(result.final_output) From 5a2d7aafb7d36f9a31047b25fcfcc00ec2086c7f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 10:18:56 +0900 Subject: [PATCH 060/230] Update agent_server.py --- src/agents/agent_server.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 199f5563..947875b8 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -181,7 +181,9 @@ async def agent_endpoint(req: Request): "created_at": datetime.utcnow().isoformat() } async with httpx.AsyncClient() as client: - await client.post(webhook, json=payload) + response = await client.post(webhook, json=payload) + print(f"[Webhook Response] Status: {response.status_code}") + print(f"[Webhook Response] Body: {response.text}") return {"ok": True} try: @@ -225,7 +227,9 @@ async def agent_endpoint(req: Request): async with httpx.AsyncClient() as client: print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) + response = await client.post(webhook, json=payload) + print(f"[Webhook Response] Status: {response.status_code}") + print(f"[Webhook Response] Body: {response.text}") return {"ok": True} elif action == "new_message": @@ -278,7 +282,9 @@ async def agent_endpoint(req: Request): async with httpx.AsyncClient() as client: print(f"Selected webhook: {webhook}") - await client.post(webhook, json=payload) + response = await client.post(webhook, json=payload) + print(f"[Webhook Response] Status: {response.status_code}") + print(f"[Webhook Response] Body: {response.text}") return {"ok": True} else: From f02f0fad11382c35a11c7b7aab7eaba112d55fc4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 10:28:31 +0900 Subject: [PATCH 061/230] Update agent_server.py --- src/agents/agent_server.py | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 947875b8..53af3f23 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -210,18 +210,22 @@ async def agent_endpoint(req: Request): webhook = STRUCTURED_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": agent_type, - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), + "user_id": data.get("user_id"), + "agent_type": agent_type if action == "new_task" else sess or "manager", + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } else: webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": agent_type, - "message": result.final_output, + "user_id": data.get("user_id"), + "agent_type": agent_type if action == "new_task" else sess or "manager", + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } @@ -265,18 +269,22 @@ async def agent_endpoint(req: Request): webhook = STRUCTURED_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "output_type": parsed_output.get("output_type"), - "output_details": parsed_output.get("details"), - "contains_image": parsed_output.get("contains_image", False), + "user_id": data.get("user_id"), + "agent_type": agent_type if action == "new_task" else sess or "manager", + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } else: webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), - "agent_type": sess or "manager", - "message": result.final_output, + "user_id": data.get("user_id"), + "agent_type": agent_type if action == "new_task" else sess or "manager", + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } From 381a72484e346bc60fb2526dfe38a78649caf639 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 10:47:23 +0900 Subject: [PATCH 062/230] Update agent_server.py --- src/agents/agent_server.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 53af3f23..3e1ac8c3 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -230,10 +230,14 @@ async def agent_endpoint(req: Request): } async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) # Pretty-print payload response = await client.post(webhook, json=payload) - print(f"[Webhook Response] Status: {response.status_code}") - print(f"[Webhook Response] Body: {response.text}") + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================\n") return {"ok": True} elif action == "new_message": @@ -289,10 +293,14 @@ async def agent_endpoint(req: Request): } async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) # Pretty-print payload response = await client.post(webhook, json=payload) - print(f"[Webhook Response] Status: {response.status_code}") - print(f"[Webhook Response] Body: {response.text}") + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================\n") return {"ok": True} else: From 412417eaefb250f9347b9745a53eaa6b5c844cbf Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 11:00:55 +0900 Subject: [PATCH 063/230] Update agent_server.py --- src/agents/agent_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 3e1ac8c3..74b11a9a 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -246,7 +246,8 @@ async def agent_endpoint(req: Request): raise HTTPException(422, "Missing 'message' or 'user_prompt'") sess = data.get("agent_session_id") - agent = AGENT_MAP.get(sess, manager_agent) + agent_type = sess or "manager" + agent = AGENT_MAP.get(agent_type, manager_agent) result = await Runner.run(agent, input=user_msg) try: From 1548eebbae26440ff986819ac30350e3cd8d2ba0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 11:22:33 +0900 Subject: [PATCH 064/230] Update agent_server.py --- src/agents/agent_server.py | 57 +++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 74b11a9a..98b007cf 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -157,7 +157,8 @@ async def agent_endpoint(req: Request): try: parsed_mgr = json.loads(mgr_result.final_output) - if "route_to" in parsed_mgr: + # If manager returns a clarification message (plain text), skip routing + if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: agent_type = parsed_mgr["route_to"] agent = AGENT_MAP.get(agent_type) if not agent: @@ -165,7 +166,28 @@ async def agent_endpoint(req: Request): result = await Runner.run(agent, input=user_input) result.agent_type = agent_type else: - raise ValueError("Missing route_to") + # Treat entire final_output as clarification string + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": "manager", + "message_type": "text", + "message_content": mgr_result.final_output.strip(), + "metadata_reason": "Manager requested clarification", + "created_at": datetime.utcnow().isoformat() + } + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("======================== +") + return {"ok": True} except Exception: # Manager returned clarification as plain string webhook = CLARIFICATION_WEBHOOK_URL @@ -211,7 +233,7 @@ async def agent_endpoint(req: Request): payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), - "agent_type": agent_type if action == "new_task" else sess or "manager", + "agent_type": agent_type, "message_type": "text", "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", @@ -222,7 +244,7 @@ async def agent_endpoint(req: Request): payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), - "agent_type": agent_type if action == "new_task" else sess or "manager", + "agent_type": agent_type, "message_type": "text", "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", @@ -230,14 +252,10 @@ async def agent_endpoint(req: Request): } async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) # Pretty-print payload + print(f"Selected webhook: {webhook}") response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================\n") + print(f"[Webhook Response] Status: {response.status_code}") + print(f"[Webhook Response] Body: {response.text}") return {"ok": True} elif action == "new_message": @@ -246,8 +264,7 @@ async def agent_endpoint(req: Request): raise HTTPException(422, "Missing 'message' or 'user_prompt'") sess = data.get("agent_session_id") - agent_type = sess or "manager" - agent = AGENT_MAP.get(agent_type, manager_agent) + agent = AGENT_MAP.get(sess, manager_agent) result = await Runner.run(agent, input=user_msg) try: @@ -275,7 +292,7 @@ async def agent_endpoint(req: Request): payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), - "agent_type": agent_type if action == "new_task" else sess or "manager", + "agent_type": agent_type, "message_type": "text", "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", @@ -286,7 +303,7 @@ async def agent_endpoint(req: Request): payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), - "agent_type": agent_type if action == "new_task" else sess or "manager", + "agent_type": agent_type, "message_type": "text", "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", @@ -294,14 +311,10 @@ async def agent_endpoint(req: Request): } async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) # Pretty-print payload + print(f"Selected webhook: {webhook}") response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================\n") + print(f"[Webhook Response] Status: {response.status_code}") + print(f"[Webhook Response] Body: {response.text}") return {"ok": True} else: From 5daed3246b507468e69de2063171ddef0167d874 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 11:44:29 +0900 Subject: [PATCH 065/230] Update agent_server.py --- src/agents/agent_server.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 98b007cf..50990e07 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -185,8 +185,7 @@ async def agent_endpoint(req: Request): response = await client.post(webhook, json=payload) print(f"Response Status: {response.status_code}") print(f"Response Body: {response.text}") - print("======================== -") + print("========================") return {"ok": True} except Exception: # Manager returned clarification as plain string From c44a990eae91f0c80bdf5fb4eb9bb3db9f9020ac Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 11:53:50 +0900 Subject: [PATCH 066/230] Update agent_server.py --- src/agents/agent_server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 50990e07..8ad41df1 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -261,9 +261,10 @@ async def agent_endpoint(req: Request): user_msg = data.get("message") or data.get("user_prompt") if user_msg is None: raise HTTPException(422, "Missing 'message' or 'user_prompt'") - + sess = data.get("agent_session_id") - agent = AGENT_MAP.get(sess, manager_agent) + agent_type = sess or "manager" + agent = AGENT_MAP.get(agent_type, manager_agent) result = await Runner.run(agent, input=user_msg) try: From 396bc3effc71c70f525f04ee8de7b83a072a6c9b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 13:36:18 +0900 Subject: [PATCH 067/230] Update agent_server.py --- src/agents/agent_server.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 8ad41df1..c4a1c235 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -202,9 +202,14 @@ async def agent_endpoint(req: Request): "created_at": datetime.utcnow().isoformat() } async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) response = await client.post(webhook, json=payload) - print(f"[Webhook Response] Status: {response.status_code}") - print(f"[Webhook Response] Body: {response.text}") + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") return {"ok": True} try: @@ -251,10 +256,14 @@ async def agent_endpoint(req: Request): } async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) response = await client.post(webhook, json=payload) - print(f"[Webhook Response] Status: {response.status_code}") - print(f"[Webhook Response] Body: {response.text}") + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") return {"ok": True} elif action == "new_message": From 0694277961615502b281fc9ae53ef2dfbba2982c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 16:52:39 +0900 Subject: [PATCH 068/230] Update agent_server.py --- src/agents/agent_server.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index c4a1c235..91246ac6 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -318,12 +318,16 @@ async def agent_endpoint(req: Request): "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } - async with httpx.AsyncClient() as client: - print(f"Selected webhook: {webhook}") + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) response = await client.post(webhook, json=payload) - print(f"[Webhook Response] Status: {response.status_code}") - print(f"[Webhook Response] Body: {response.text}") + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return {"ok": True} else: From 36cc043e0f1864cc215d7ff4abfc128085538f7b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 17:02:04 +0900 Subject: [PATCH 069/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 91246ac6..1a70cbc8 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -272,7 +272,7 @@ async def agent_endpoint(req: Request): raise HTTPException(422, "Missing 'message' or 'user_prompt'") sess = data.get("agent_session_id") - agent_type = sess or "manager" + agent_type = sess if sess in AGENT_MAP else "manager" agent = AGENT_MAP.get(agent_type, manager_agent) result = await Runner.run(agent, input=user_msg) From 297f18a14d710c7693945449ed71f0d0707a5b32 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 17:40:19 +0900 Subject: [PATCH 070/230] Update agent_server.py --- src/agents/agent_server.py | 105 +++++++++++++++++++++++++++---------- 1 file changed, 76 insertions(+), 29 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 1a70cbc8..5e535b3f 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -151,44 +151,88 @@ async def agent_endpoint(req: Request): data = await req.json() action = data.get("action") + # PATCHED SECTION: new_task action handling if action == "new_task": user_input = data["user_prompt"] mgr_result = await Runner.run(manager_agent, input=user_input) - + try: parsed_mgr = json.loads(mgr_result.final_output) - # If manager returns a clarification message (plain text), skip routing if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: + # Manager successfully routed to downstream agent agent_type = parsed_mgr["route_to"] agent = AGENT_MAP.get(agent_type) if not agent: raise HTTPException(400, f"Unknown agent: {agent_type}") + result = await Runner.run(agent, input=user_input) - result.agent_type = agent_type + parsed_output = None + is_structured = False + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + pass + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message": parsed_output, + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message": { + "type": "text", + "content": result.final_output + }, + "metadata": { + "reason": "Agent returned unstructured output" + }, + "created_at": datetime.utcnow().isoformat() + } + else: - # Treat entire final_output as clarification string + # Manager requested clarification directly webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": "manager", - "message_type": "text", - "message_content": mgr_result.final_output.strip(), - "metadata_reason": "Manager requested clarification", + "message": { + "type": "text", + "content": mgr_result.final_output.strip() + }, + "metadata": { + "reason": "Manager requested clarification" + }, "created_at": datetime.utcnow().isoformat() } - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} + except Exception: - # Manager returned clarification as plain string + # Manager returned malformed or unclear output webhook = CLARIFICATION_WEBHOOK_URL payload = { "task_id": data.get("task_id"), @@ -198,19 +242,22 @@ async def agent_endpoint(req: Request): "type": "text", "content": mgr_result.final_output.strip() }, - "metadata": {"reason": "Manager requested clarification"}, + "metadata": { + "reason": "Manager output parsing error" + }, "created_at": datetime.utcnow().isoformat() } - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} + + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return {"ok": True} try: parsed_output = json.loads(result.final_output) From d6bfad7933f5b175e50a4d424fa3465a1b925f53 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 17:53:19 +0900 Subject: [PATCH 071/230] Update agent_server.py From 5c219d3daa9e35023ea992eb67bfb1ceeeddc61b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 20:58:12 +0900 Subject: [PATCH 072/230] Update agent_server.py --- src/agents/agent_server.py | 387 +++++++++++-------------------------- 1 file changed, 118 insertions(+), 269 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 5e535b3f..27914b43 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,29 +1,21 @@ import os import sys import json -import asyncio from datetime import datetime from dotenv import load_dotenv -# 1) Load environment variables load_dotenv() - -# 2) Add project src folder so "agents" can import its own util subpackage sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) import httpx from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -# 3) Core SDK imports -from agents import Agent, Runner, tool - -# 4) SDK guardrail types (so guardrail imports work) -from agents.util._types import MaybeAwaitable +from agents import Agent, Runner +from .agent_onboarding import router as onboarding_router +from .agent_profilebuilder import router as profilebuilder_router -# ─────────────────────────────────────────────────────────── -# 5) Agent definitions (Phase 1: keep them here) -# ─────────────────────────────────────────────────────────── +# Agent Definitions manager_agent = Agent( name="Manager", instructions=""" @@ -31,8 +23,9 @@ Decide the intent behind the message: strategy, content, repurpose, feedback. If you are unsure or need more info, ask a clarifying question instead of routing. If clarification is needed, respond only with a plain text clarification question. -Otherwise, respond in strict JSON like: -{ "route_to": "strategy", "reason": "User wants a campaign plan" } +Return: +- Internal: { "type": "internal", "route_to": "strategy" } +- Clarification: { "type": "clarification", "content": "..." } """ ) @@ -42,18 +35,10 @@ You create clear, actionable 7-day social media campaign strategies. If user input is unclear or missing platform, audience, or tone — ask for clarification. Respond in structured JSON like: -{ - "output_type": "strategy_plan", - "contains_image": false, - "details": { - "days": [ - { "title": "...", "theme": "...", "cta": "..." } - ] - } -} -Only return JSON in this format. -""", - tools=[] +Return either: +- Clarification: { "type": "clarification", "content": "..." } +- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +""" ) content_agent = Agent( @@ -61,72 +46,40 @@ instructions=""" You write engaging, brand-aligned social content. If user input lacks platform or goal, ask for clarification. -Return post drafts in this JSON format: -{ - "output_type": "content_variants", - "contains_image": false, - "details": { - "variants": [ - { - "platform": "Instagram", - "caption": "...", - "hook": "...", - "cta": "..." - } - ] - } -} -Only respond in this format. -""", - tools=[] +Respond in structured JSON like: +Return either: +- Clarification: { "type": "clarification", "content": "..." } +- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +""" ) - repurpose_agent = Agent( name="RepurposeAgent", instructions=""" You convert existing posts into new formats for different platforms. -Respond using this format: -{ - "output_type": "repurposed_posts", - "contains_image": false, - "details": { - "original": "...", - "repurposed": [ - { - "platform": "...", - "caption": "...", - "format": "..." - } - ] - } -} -""", - tools=[] +If user input is unclear or missing platform, audience, or tone — ask for clarification. +Respond in structured JSON like: +Return either: +- Clarification: { "type": "clarification", "content": "..." } +- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +""" ) - feedback_agent = Agent( name="FeedbackAgent", instructions=""" You evaluate content and offer improvements. -Respond in this structured format: -{ - "output_type": "content_feedback", - "contains_image": false, - "details": { - "original": "...", - "feedback": "...", - "suggested_edit": "..." - } -} -""", - tools=[] +If user input is unclear or missing platform, audience, or tone — ask for clarification. +Respond in structured JSON like: +Return either: +- Clarification: { "type": "clarification", "content": "..." } +- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +""" ) AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } app = FastAPI() @@ -138,244 +91,140 @@ allow_headers=["*"], ) -from .agent_onboarding import router as onboarding_router -from .agent_profilebuilder import router as profilebuilder_router app.include_router(onboarding_router) app.include_router(profilebuilder_router) -STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") +STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") +def log_and_send(webhook, payload): + async def _send(): + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return _send() + @app.post("/agent") async def agent_endpoint(req: Request): data = await req.json() action = data.get("action") + task_id = data.get("task_id") + user_id = data.get("user_id") - # PATCHED SECTION: new_task action handling if action == "new_task": user_input = data["user_prompt"] - mgr_result = await Runner.run(manager_agent, input=user_input) - + manager_result = await Runner.run(manager_agent, input=user_input) + try: - parsed_mgr = json.loads(mgr_result.final_output) - if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: - # Manager successfully routed to downstream agent - agent_type = parsed_mgr["route_to"] + parsed_mgr = json.loads(manager_result.final_output) + output_type = parsed_mgr.get("type") + + if output_type == "internal": + agent_type = parsed_mgr.get("route_to") agent = AGENT_MAP.get(agent_type) if not agent: - raise HTTPException(400, f"Unknown agent: {agent_type}") - - result = await Runner.run(agent, input=user_input) - parsed_output = None - is_structured = False - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - pass - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL + raise HTTPException(400, f"Unknown agent type: {agent_type}") + + agent_result = await Runner.run(agent, input=user_input) + parsed_agent = json.loads(agent_result.final_output) + + if parsed_agent.get("type") == "clarification": payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), + "task_id": task_id, + "user_id": user_id, "agent_type": agent_type, "message": { "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" + "content": parsed_agent["content"] }, + "metadata": {"reason": "Agent requested clarification"}, "created_at": datetime.utcnow().isoformat() } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL + return await log_and_send(CLARIFICATION_WEBHOOK_URL, payload) + + elif parsed_agent.get("type") == "structured": payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), + "task_id": task_id, + "user_id": user_id, "agent_type": agent_type, - "message": parsed_output, + "message": parsed_agent, "created_at": datetime.utcnow().isoformat() } + return await log_and_send(STRUCTURED_WEBHOOK_URL, payload) + else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message": { - "type": "text", - "content": result.final_output - }, - "metadata": { - "reason": "Agent returned unstructured output" - }, - "created_at": datetime.utcnow().isoformat() - } - - else: - # Manager requested clarification directly - webhook = CLARIFICATION_WEBHOOK_URL + raise HTTPException(500, "Unexpected downstream agent output type.") + + elif output_type == "clarification": payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), + "task_id": task_id, + "user_id": user_id, "agent_type": "manager", "message": { "type": "text", - "content": mgr_result.final_output.strip() - }, - "metadata": { - "reason": "Manager requested clarification" + "content": parsed_mgr["content"] }, + "metadata": {"reason": "Manager requested clarification"}, "created_at": datetime.utcnow().isoformat() } - - except Exception: - # Manager returned malformed or unclear output - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": "manager", - "message": { - "type": "text", - "content": mgr_result.final_output.strip() - }, - "metadata": { - "reason": "Manager output parsing error" - }, - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} - - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False + return await log_and_send(CLARIFICATION_WEBHOOK_URL, payload) - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": {"reason": "Agent requested clarification"}, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } + else: + raise HTTPException(500, "Unexpected manager output type.") - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} + except Exception as e: + raise HTTPException(500, f"Failed to parse manager output: {str(e)}") elif action == "new_message": user_msg = data.get("message") or data.get("user_prompt") - if user_msg is None: + if not user_msg: raise HTTPException(422, "Missing 'message' or 'user_prompt'") - + sess = data.get("agent_session_id") agent_type = sess if sess in AGENT_MAP else "manager" agent = AGENT_MAP.get(agent_type, manager_agent) + result = await Runner.run(agent, input=user_msg) try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False + parsed = json.loads(result.final_output) + output_type = parsed.get("type") - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": {"reason": "Agent requested clarification"}, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - - return {"ok": True} + if output_type == "clarification": + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": { + "type": "text", + "content": parsed["content"] + }, + "metadata": {"reason": "Agent requested clarification"}, + "created_at": datetime.utcnow().isoformat() + } + return await log_and_send(CLARIFICATION_WEBHOOK_URL, payload) + + elif output_type == "structured": + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": parsed, + "created_at": datetime.utcnow().isoformat() + } + return await log_and_send(STRUCTURED_WEBHOOK_URL, payload) + + else: + raise HTTPException(500, "Unexpected agent output type.") + + except Exception as e: + raise HTTPException(500, f"Failed to parse agent output: {str(e)}") else: raise HTTPException(400, "Unknown action") From 9d9df6112bf0d4c1c7e89400524efee97b802173 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 21:06:02 +0900 Subject: [PATCH 073/230] Update agent_server.py --- src/agents/agent_server.py | 148 +++++++++++++++++++++++++++++-------- 1 file changed, 118 insertions(+), 30 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 27914b43..585134bb 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -20,58 +20,146 @@ name="Manager", instructions=""" You are an intelligent router for user requests. -Decide the intent behind the message: strategy, content, repurpose, feedback. -If you are unsure or need more info, ask a clarifying question instead of routing. -If clarification is needed, respond only with a plain text clarification question. -Return: -- Internal: { "type": "internal", "route_to": "strategy" } -- Clarification: { "type": "clarification", "content": "..." } +Your outputs must follow one of these formats: + +If routing to another agent: +{ + "type": "internal", + "route_to": "strategy" // or "content", "repurpose", "feedback" +} + +If asking for clarification: +{ + "type": "clarification", + "content": "Could you clarify what platform you want to use?" +} + +Respond ONLY in one of the above JSON formats. """ ) strategy_agent = Agent( name="StrategyAgent", instructions=""" -You create clear, actionable 7-day social media campaign strategies. -If user input is unclear or missing platform, audience, or tone — ask for clarification. -Respond in structured JSON like: -Return either: -- Clarification: { "type": "clarification", "content": "..." } -- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +You create detailed, actionable 7-day social media campaign strategies. +Your outputs must follow one of these formats: + +If asking for clarification: +{ + "type": "clarification", + "content": "Could you tell me your campaign tone and target audience?" +} + +If outputting the full strategy: +{ + "type": "structured", + "output_type": "strategy_plan", + "contains_image": false, + "details": { + "days": [ + { "title": "Day 1", "theme": "Awareness", "cta": "Visit our page" }, + { "title": "Day 2", "theme": "Engagement", "cta": "Comment your thoughts" }, + ... + ] + } +} + +Respond ONLY in one of the above JSON formats. """ ) content_agent = Agent( name="ContentAgent", instructions=""" -You write engaging, brand-aligned social content. -If user input lacks platform or goal, ask for clarification. -Respond in structured JSON like: -Return either: -- Clarification: { "type": "clarification", "content": "..." } -- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +You create brand-aligned social media content drafts. +Your outputs must follow one of these formats: + +If asking for clarification: +{ + "type": "clarification", + "content": "Which platform and tone should the posts match?" +} + +If outputting content variations: +{ + "type": "structured", + "output_type": "content_variants", + "contains_image": false, + "details": { + "variants": [ + { + "platform": "Instagram", + "caption": "Life’s a journey 🚀 #MondayMotivation", + "hook": "Feeling stuck?", + "cta": "Check out our tips!" + }, + ... + ] + } +} + +Respond ONLY in one of the above JSON formats. """ ) + repurpose_agent = Agent( name="RepurposeAgent", instructions=""" -You convert existing posts into new formats for different platforms. -If user input is unclear or missing platform, audience, or tone — ask for clarification. -Respond in structured JSON like: -Return either: -- Clarification: { "type": "clarification", "content": "..." } -- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +You transform existing social media posts into new formats for different platforms. +Your outputs must follow one of these formats: + +If asking for clarification: +{ + "type": "clarification", + "content": "Which platforms would you like to repurpose for?" +} + +If outputting repurposed posts: +{ + "type": "structured", + "output_type": "repurposed_posts", + "contains_image": false, + "details": { + "original": "Original Instagram caption here...", + "repurposed": [ + { + "platform": "Twitter", + "caption": "Short and punchy tweet version!" + }, + ... + ] + } +} + +Respond ONLY in one of the above JSON formats. """ ) + feedback_agent = Agent( name="FeedbackAgent", instructions=""" -You evaluate content and offer improvements. -If user input is unclear or missing platform, audience, or tone — ask for clarification. -Respond in structured JSON like: -Return either: -- Clarification: { "type": "clarification", "content": "..." } -- Structured: { "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": {...} } +You review social media posts and suggest improvements. +Your outputs must follow one of these formats: + +If asking for clarification: +{ + "type": "clarification", + "content": "Could you specify which post style (formal, casual, humorous) you want feedback on?" +} + +If providing feedback: +{ + "type": "structured", + "output_type": "content_feedback", + "contains_image": false, + "details": { + "original": "Original caption here...", + "feedback": "This caption is a bit generic. Consider adding a stronger emotional hook.", + "suggested_edit": "Transform your life starting today! 🚀 #Motivation" + } +} + +Respond ONLY in one of the above JSON formats. """ ) From 85b40b119bec7094f265bc5d97262ae30f0e0834 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 21:49:28 +0900 Subject: [PATCH 074/230] Update agent_server.py --- src/agents/agent_server.py | 407 ++++++++++++++++++++----------------- 1 file changed, 219 insertions(+), 188 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 585134bb..9dca8d6b 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,173 +1,132 @@ import os import sys import json +import asyncio from datetime import datetime from dotenv import load_dotenv +# 1) Load environment variables load_dotenv() + +# 2) Add project src folder so "agents" can import its own util subpackage sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) import httpx from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -from agents import Agent, Runner -from .agent_onboarding import router as onboarding_router -from .agent_profilebuilder import router as profilebuilder_router +# 3) Core SDK imports +from agents import Agent, Runner, tool + +# 4) SDK guardrail types (so guardrail imports work) +from agents.util._types import MaybeAwaitable -# Agent Definitions +# ─────────────────────────────────────────────────────────── +# 5) Agent definitions (Phase 1: keep them here) +# ─────────────────────────────────────────────────────────── manager_agent = Agent( name="Manager", instructions=""" You are an intelligent router for user requests. -Your outputs must follow one of these formats: - -If routing to another agent: -{ - "type": "internal", - "route_to": "strategy" // or "content", "repurpose", "feedback" -} - -If asking for clarification: -{ - "type": "clarification", - "content": "Could you clarify what platform you want to use?" -} - -Respond ONLY in one of the above JSON formats. +Decide the intent behind the message: strategy, content, repurpose, feedback. +If you are unsure or need more info, ask a clarifying question instead of routing. +If clarification is needed, respond only with a plain text clarification question. +Otherwise, respond in strict JSON like: +{ "route_to": "strategy", "reason": "User wants a campaign plan" } """ ) strategy_agent = Agent( name="StrategyAgent", instructions=""" -You create detailed, actionable 7-day social media campaign strategies. -Your outputs must follow one of these formats: - -If asking for clarification: -{ - "type": "clarification", - "content": "Could you tell me your campaign tone and target audience?" -} - -If outputting the full strategy: +You create clear, actionable 7-day social media campaign strategies. +If user input is unclear or missing platform, audience, or tone — ask for clarification. +Respond in structured JSON like: { - "type": "structured", "output_type": "strategy_plan", "contains_image": false, "details": { "days": [ - { "title": "Day 1", "theme": "Awareness", "cta": "Visit our page" }, - { "title": "Day 2", "theme": "Engagement", "cta": "Comment your thoughts" }, - ... + { "title": "...", "theme": "...", "cta": "..." } ] } } - -Respond ONLY in one of the above JSON formats. -""" +Only return JSON in this format. +""", + tools=[] ) content_agent = Agent( name="ContentAgent", instructions=""" -You create brand-aligned social media content drafts. -Your outputs must follow one of these formats: - -If asking for clarification: +You write engaging, brand-aligned social content. +If user input lacks platform or goal, ask for clarification. +Return post drafts in this JSON format: { - "type": "clarification", - "content": "Which platform and tone should the posts match?" -} - -If outputting content variations: -{ - "type": "structured", "output_type": "content_variants", "contains_image": false, "details": { "variants": [ { "platform": "Instagram", - "caption": "Life’s a journey 🚀 #MondayMotivation", - "hook": "Feeling stuck?", - "cta": "Check out our tips!" - }, - ... + "caption": "...", + "hook": "...", + "cta": "..." + } ] } } - -Respond ONLY in one of the above JSON formats. -""" +Only respond in this format. +""", + tools=[] ) repurpose_agent = Agent( name="RepurposeAgent", instructions=""" -You transform existing social media posts into new formats for different platforms. -Your outputs must follow one of these formats: - -If asking for clarification: +You convert existing posts into new formats for different platforms. +Respond using this format: { - "type": "clarification", - "content": "Which platforms would you like to repurpose for?" -} - -If outputting repurposed posts: -{ - "type": "structured", "output_type": "repurposed_posts", "contains_image": false, "details": { - "original": "Original Instagram caption here...", + "original": "...", "repurposed": [ { - "platform": "Twitter", - "caption": "Short and punchy tweet version!" - }, - ... + "platform": "...", + "caption": "...", + "format": "..." + } ] } } - -Respond ONLY in one of the above JSON formats. -""" +""", + tools=[] ) feedback_agent = Agent( name="FeedbackAgent", instructions=""" -You review social media posts and suggest improvements. -Your outputs must follow one of these formats: - -If asking for clarification: -{ - "type": "clarification", - "content": "Could you specify which post style (formal, casual, humorous) you want feedback on?" -} - -If providing feedback: +You evaluate content and offer improvements. +Respond in this structured format: { - "type": "structured", "output_type": "content_feedback", "contains_image": false, "details": { - "original": "Original caption here...", - "feedback": "This caption is a bit generic. Consider adding a stronger emotional hook.", - "suggested_edit": "Transform your life starting today! 🚀 #Motivation" + "original": "...", + "feedback": "...", + "suggested_edit": "..." } } - -Respond ONLY in one of the above JSON formats. -""" +""", + tools=[] ) AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } app = FastAPI() @@ -179,140 +138,212 @@ allow_headers=["*"], ) +from .agent_onboarding import router as onboarding_router +from .agent_profilebuilder import router as profilebuilder_router app.include_router(onboarding_router) app.include_router(profilebuilder_router) -STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") +STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") -def log_and_send(webhook, payload): - async def _send(): - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return _send() - @app.post("/agent") async def agent_endpoint(req: Request): data = await req.json() action = data.get("action") - task_id = data.get("task_id") - user_id = data.get("user_id") + # PATCHED SECTION: new_task action handling if action == "new_task": user_input = data["user_prompt"] - manager_result = await Runner.run(manager_agent, input=user_input) - + mgr_result = await Runner.run(manager_agent, input=user_input) + try: - parsed_mgr = json.loads(manager_result.final_output) - output_type = parsed_mgr.get("type") - - if output_type == "internal": - agent_type = parsed_mgr.get("route_to") + parsed_mgr = json.loads(mgr_result.final_output) + if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: + # Manager successfully routed to downstream agent + agent_type = parsed_mgr["route_to"] agent = AGENT_MAP.get(agent_type) if not agent: - raise HTTPException(400, f"Unknown agent type: {agent_type}") - - agent_result = await Runner.run(agent, input=user_input) - parsed_agent = json.loads(agent_result.final_output) - - if parsed_agent.get("type") == "clarification": + raise HTTPException(400, f"Unknown agent: {agent_type}") + + result = await Runner.run(agent, input=user_input) + parsed_output = None + is_structured = False + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + pass + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL payload = { - "task_id": task_id, - "user_id": user_id, + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), "agent_type": agent_type, - "message": { - "type": "text", - "content": parsed_agent["content"] - }, - "metadata": {"reason": "Agent requested clarification"}, + "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), "created_at": datetime.utcnow().isoformat() } - return await log_and_send(CLARIFICATION_WEBHOOK_URL, payload) - - elif parsed_agent.get("type") == "structured": + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL payload = { - "task_id": task_id, - "user_id": user_id, + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), "agent_type": agent_type, - "message": parsed_agent, + "message": parsed_output, "created_at": datetime.utcnow().isoformat() } - return await log_and_send(STRUCTURED_WEBHOOK_URL, payload) - else: - raise HTTPException(500, "Unexpected downstream agent output type.") - - elif output_type == "clarification": + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "created_at": datetime.utcnow().isoformat() + } + + else: + # Manager requested clarification directly + webhook = CLARIFICATION_WEBHOOK_URL payload = { - "task_id": task_id, - "user_id": user_id, + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), "agent_type": "manager", - "message": { - "type": "text", - "content": parsed_mgr["content"] - }, - "metadata": {"reason": "Manager requested clarification"}, + "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), "created_at": datetime.utcnow().isoformat() } - return await log_and_send(CLARIFICATION_WEBHOOK_URL, payload) + + except Exception: + # Manager returned malformed or unclear output + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": "manager", + "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return {"ok": True} - else: - raise HTTPException(500, "Unexpected manager output type.") + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "created_at": datetime.utcnow().isoformat() + } - except Exception as e: - raise HTTPException(500, f"Failed to parse manager output: {str(e)}") + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return {"ok": True} elif action == "new_message": user_msg = data.get("message") or data.get("user_prompt") - if not user_msg: + if user_msg is None: raise HTTPException(422, "Missing 'message' or 'user_prompt'") - + sess = data.get("agent_session_id") agent_type = sess if sess in AGENT_MAP else "manager" agent = AGENT_MAP.get(agent_type, manager_agent) - result = await Runner.run(agent, input=user_msg) try: - parsed = json.loads(result.final_output) - output_type = parsed.get("type") - - if output_type == "clarification": - payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": agent_type, - "message": { - "type": "text", - "content": parsed["content"] - }, - "metadata": {"reason": "Agent requested clarification"}, - "created_at": datetime.utcnow().isoformat() - } - return await log_and_send(CLARIFICATION_WEBHOOK_URL, payload) - - elif output_type == "structured": - payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": agent_type, - "message": parsed, - "created_at": datetime.utcnow().isoformat() - } - return await log_and_send(STRUCTURED_WEBHOOK_URL, payload) - - else: - raise HTTPException(500, "Unexpected agent output type.") - - except Exception as e: - raise HTTPException(500, f"Failed to parse agent output: {str(e)}") + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": sess or "manager", + "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "created_at": datetime.utcnow().isoformat() + } + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + + return {"ok": True} else: raise HTTPException(400, "Unknown action") From ea3444edbd241915e66e32b13058d378138b7f33 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 22:07:39 +0900 Subject: [PATCH 075/230] Update agent_server.py --- src/agents/agent_server.py | 67 +++++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 9dca8d6b..8af844d2 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -180,7 +180,13 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": { + "reason": "Agent requested clarification" + }, "created_at": datetime.utcnow().isoformat() } elif is_structured: @@ -189,7 +195,8 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message": parsed_output, + "message_raw": json.dumps(parsed_output), + "metadata_raw": json.dumps({ "reason": "Final structured output" }), "created_at": datetime.utcnow().isoformat() } else: @@ -198,7 +205,13 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "message": { + "type": "text", + "content": result.final_output + }, + "metadata": { + "reason": "Agent returned unstructured output" + }, "created_at": datetime.utcnow().isoformat() } @@ -209,7 +222,13 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": "manager", - "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "message": { + "type": "text", + "content": mgr_result.final_output.strip() + }, + "metadata": { + "reason": "Manager requested clarification" + }, "created_at": datetime.utcnow().isoformat() } @@ -220,7 +239,13 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": "manager", - "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "message": { + "type": "text", + "content": mgr_result.final_output.strip() + }, + "metadata": { + "reason": "Manager output parsing error" + }, "created_at": datetime.utcnow().isoformat() } @@ -248,7 +273,11 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": {"reason": "Agent requested clarification"}, "created_at": datetime.utcnow().isoformat() } elif is_structured: @@ -257,9 +286,8 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), + "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), "created_at": datetime.utcnow().isoformat() } else: @@ -268,9 +296,8 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), + "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), "created_at": datetime.utcnow().isoformat() } @@ -308,7 +335,11 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": sess or "manager", - "message_raw": json.dumps(parsed), "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "message": { + "type": "text", + "content": result.requires_user_input + }, + "metadata": {"reason": "Agent requested clarification"}, "created_at": datetime.utcnow().isoformat() } elif is_structured: @@ -317,9 +348,8 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), + "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), "created_at": datetime.utcnow().isoformat() } else: @@ -328,9 +358,8 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", + "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), + "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), "created_at": datetime.utcnow().isoformat() } async with httpx.AsyncClient() as client: From cea1a2dd8b74d1e10f64786964c4d84c83079d76 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 22:45:39 +0900 Subject: [PATCH 076/230] Update agent_server.py re routing --- src/agents/agent_server.py | 80 +++++++++++++++++++++++++++++++++----- 1 file changed, 70 insertions(+), 10 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 8af844d2..76edb953 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -166,6 +166,63 @@ async def agent_endpoint(req: Request): raise HTTPException(400, f"Unknown agent: {agent_type}") result = await Runner.run(agent, input=user_input) + + # Determine if agent output is structured or needs clarification + try: + parsed_output = json.loads(result.final_output) + is_structured = "output_type" in parsed_output + except Exception: + parsed_output = None + is_structured = False + + if getattr(result, "requires_user_input", None): + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_raw": json.dumps({ + "type": "clarification", + "content": result.requires_user_input + }), + "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), + "created_at": datetime.utcnow().isoformat() + } + elif is_structured: + webhook = STRUCTURED_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_raw": json.dumps(parsed_output), + "metadata_raw": json.dumps({ "reason": "Structured agent response" }), + "created_at": datetime.utcnow().isoformat() + } + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": agent_type, + "message_raw": json.dumps({ + "type": "text", + "content": result.final_output + }), + "metadata_raw": json.dumps({ "reason": "Agent returned unstructured output" }), + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return {"ok": True} + parsed_output = None is_structured = False try: @@ -195,8 +252,7 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed_output), - "metadata_raw": json.dumps({ "reason": "Final structured output" }), + "message": parsed_output, "created_at": datetime.utcnow().isoformat() } else: @@ -286,8 +342,9 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), - "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } else: @@ -296,8 +353,9 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), - "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } @@ -348,8 +406,9 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), - "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } else: @@ -358,8 +417,9 @@ async def agent_endpoint(req: Request): "task_id": data.get("task_id"), "user_id": data.get("user_id"), "agent_type": agent_type, - "message_raw": json.dumps(parsed_output if parsed_output else { "type": "text", "content": result.final_output }), - "metadata_raw": json.dumps({ "reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message" }), + "message_type": "text", + "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, + "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", "created_at": datetime.utcnow().isoformat() } async with httpx.AsyncClient() as client: From ca741c8e12b5cb36aab66bce9f29e3ae387878c3 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 22:52:37 +0900 Subject: [PATCH 077/230] Update agent_server.py --- src/agents/agent_server.py | 70 ++++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 76edb953..79f8cf54 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -155,19 +155,18 @@ async def agent_endpoint(req: Request): if action == "new_task": user_input = data["user_prompt"] mgr_result = await Runner.run(manager_agent, input=user_input) - try: parsed_mgr = json.loads(mgr_result.final_output) + + # ✅ Case 1: Manager routes to downstream agent if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: - # Manager successfully routed to downstream agent agent_type = parsed_mgr["route_to"] agent = AGENT_MAP.get(agent_type) if not agent: raise HTTPException(400, f"Unknown agent: {agent_type}") - + result = await Runner.run(agent, input=user_input) - # Determine if agent output is structured or needs clarification try: parsed_output = json.loads(result.final_output) is_structured = "output_type" in parsed_output @@ -195,7 +194,7 @@ async def agent_endpoint(req: Request): "user_id": data.get("user_id"), "agent_type": agent_type, "message_raw": json.dumps(parsed_output), - "metadata_raw": json.dumps({ "reason": "Structured agent response" }), + "metadata_raw": json.dumps({ "reason": "Structured agent output" }), "created_at": datetime.utcnow().isoformat() } else: @@ -212,17 +211,58 @@ async def agent_endpoint(req: Request): "created_at": datetime.utcnow().isoformat() } - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} + # ✅ Case 2: Manager is unclear and returns a clarification question (not a route_to) + else: + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": "manager", + "message_raw": json.dumps({ + "type": "clarification", + "content": mgr_result.final_output.strip() + }), + "metadata_raw": json.dumps({ "reason": "Manager requested clarification" }), + "created_at": datetime.utcnow().isoformat() + } + except Exception: + # Fallback for malformed manager output + webhook = CLARIFICATION_WEBHOOK_URL + payload = { + "task_id": data.get("task_id"), + "user_id": data.get("user_id"), + "agent_type": "manager", + "message_raw": json.dumps({ + "type": "clarification", + "content": mgr_result.final_output.strip() + }), + "metadata_raw": json.dumps({ "reason": "Manager output parsing error" }), + "created_at": datetime.utcnow().isoformat() + } + + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {webhook}") + print("Payload being sent:") + print(json.dumps(payload, indent=2)) + response = await client.post(webhook, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + return {"ok": True"} + + + try: + parsed_mgr = json.loads(mgr_result.final_output) + if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: + # Manager successfully routed to downstream agent + agent_type = parsed_mgr["route_to"] + agent = AGENT_MAP.get(agent_type) + if not agent: + raise HTTPException(400, f"Unknown agent: {agent_type}") + + result = await Runner.run(agent, input=user_input) parsed_output = None is_structured = False try: From 1ff960ce51e9ade171aacf396e9e55e2b40b5dc4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 22:56:59 +0900 Subject: [PATCH 078/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 79f8cf54..be1b9b5e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -250,7 +250,7 @@ async def agent_endpoint(req: Request): print(f"Response Status: {response.status_code}") print(f"Response Body: {response.text}") print("========================") - return {"ok": True"} + return {"ok": True} try: From 092fa8e2b413023e2f26c1710b252387588c56da Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 23:07:17 +0900 Subject: [PATCH 079/230] Update agent_server.py --- src/agents/agent_server.py | 153 +------------------------------------ 1 file changed, 1 insertion(+), 152 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index be1b9b5e..64ba6248 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -155,103 +155,6 @@ async def agent_endpoint(req: Request): if action == "new_task": user_input = data["user_prompt"] mgr_result = await Runner.run(manager_agent, input=user_input) - try: - parsed_mgr = json.loads(mgr_result.final_output) - - # ✅ Case 1: Manager routes to downstream agent - if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: - agent_type = parsed_mgr["route_to"] - agent = AGENT_MAP.get(agent_type) - if not agent: - raise HTTPException(400, f"Unknown agent: {agent_type}") - - result = await Runner.run(agent, input=user_input) - - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_raw": json.dumps({ - "type": "clarification", - "content": result.requires_user_input - }), - "metadata_raw": json.dumps({ "reason": "Agent requested clarification" }), - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_raw": json.dumps(parsed_output), - "metadata_raw": json.dumps({ "reason": "Structured agent output" }), - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_raw": json.dumps({ - "type": "text", - "content": result.final_output - }), - "metadata_raw": json.dumps({ "reason": "Agent returned unstructured output" }), - "created_at": datetime.utcnow().isoformat() - } - - # ✅ Case 2: Manager is unclear and returns a clarification question (not a route_to) - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": "manager", - "message_raw": json.dumps({ - "type": "clarification", - "content": mgr_result.final_output.strip() - }), - "metadata_raw": json.dumps({ "reason": "Manager requested clarification" }), - "created_at": datetime.utcnow().isoformat() - } - - except Exception: - # Fallback for malformed manager output - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": "manager", - "message_raw": json.dumps({ - "type": "clarification", - "content": mgr_result.final_output.strip() - }), - "metadata_raw": json.dumps({ "reason": "Manager output parsing error" }), - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} - try: parsed_mgr = json.loads(mgr_result.final_output) @@ -356,61 +259,7 @@ async def agent_endpoint(req: Request): print("========================") return {"ok": True} - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": {"reason": "Agent requested clarification"}, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} - - elif action == "new_message": +elif action == "new_message": user_msg = data.get("message") or data.get("user_prompt") if user_msg is None: raise HTTPException(422, "Missing 'message' or 'user_prompt'") From 2ca8e3232782ee68d7e99dd360844d4bf211df27 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 25 Apr 2025 23:12:26 +0900 Subject: [PATCH 080/230] Update agent_server.py From a1d6cdee1dba865f3371f40dd113db14cd0e3853 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 11:15:46 +0900 Subject: [PATCH 081/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 64ba6248..e4754213 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -259,7 +259,7 @@ async def agent_endpoint(req: Request): print("========================") return {"ok": True} -elif action == "new_message": + if action == "new_message": user_msg = data.get("message") or data.get("user_prompt") if user_msg is None: raise HTTPException(422, "Missing 'message' or 'user_prompt'") From 645d02c3dfdd7e792e24a5451848c1646b3f4dc2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 11:26:41 +0900 Subject: [PATCH 082/230] Update agent_server.py, v1 production scale ready 2025.04.26 --- src/agents/agent_server.py | 367 ++++++++++++------------------------- 1 file changed, 118 insertions(+), 249 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index e4754213..c85552cd 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,57 +1,45 @@ +# agents/agent_server.py (Production-cleaned version) + import os import sys import json -import asyncio from datetime import datetime +import asyncio + from dotenv import load_dotenv +from fastapi import FastAPI, Request, HTTPException +from fastapi.middleware.cors import CORSMiddleware +import httpx -# 1) Load environment variables +# Load environment variables load_dotenv() -# 2) Add project src folder so "agents" can import its own util subpackage +# Make sure we can import from /agents/util, etc. sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -import httpx -from fastapi import FastAPI, Request, HTTPException -from fastapi.middleware.cors import CORSMiddleware - -# 3) Core SDK imports -from agents import Agent, Runner, tool - -# 4) SDK guardrail types (so guardrail imports work) +from agents import Agent, Runner from agents.util._types import MaybeAwaitable +from .agent_onboarding import router as onboarding_router +from .agent_profilebuilder import router as profilebuilder_router # ─────────────────────────────────────────────────────────── -# 5) Agent definitions (Phase 1: keep them here) +# Agent Definitions # ─────────────────────────────────────────────────────────── manager_agent = Agent( name="Manager", instructions=""" You are an intelligent router for user requests. -Decide the intent behind the message: strategy, content, repurpose, feedback. -If you are unsure or need more info, ask a clarifying question instead of routing. -If clarification is needed, respond only with a plain text clarification question. -Otherwise, respond in strict JSON like: -{ "route_to": "strategy", "reason": "User wants a campaign plan" } +Decide the intent: strategy, content, repurpose, feedback. +If unclear, ask a clarification. Otherwise respond strictly in JSON: +{ "route_to": "strategy", "reason": "..." } """ ) strategy_agent = Agent( name="StrategyAgent", instructions=""" -You create clear, actionable 7-day social media campaign strategies. -If user input is unclear or missing platform, audience, or tone — ask for clarification. -Respond in structured JSON like: -{ - "output_type": "strategy_plan", - "contains_image": false, - "details": { - "days": [ - { "title": "...", "theme": "...", "cta": "..." } - ] - } -} -Only return JSON in this format. +You create 7-day social media strategies. +Respond ONLY in structured JSON format. """, tools=[] ) @@ -59,24 +47,8 @@ content_agent = Agent( name="ContentAgent", instructions=""" -You write engaging, brand-aligned social content. -If user input lacks platform or goal, ask for clarification. -Return post drafts in this JSON format: -{ - "output_type": "content_variants", - "contains_image": false, - "details": { - "variants": [ - { - "platform": "Instagram", - "caption": "...", - "hook": "...", - "cta": "..." - } - ] - } -} -Only respond in this format. +You write brand-aligned social posts. +Respond ONLY in structured JSON format. """, tools=[] ) @@ -84,22 +56,8 @@ repurpose_agent = Agent( name="RepurposeAgent", instructions=""" -You convert existing posts into new formats for different platforms. -Respond using this format: -{ - "output_type": "repurposed_posts", - "contains_image": false, - "details": { - "original": "...", - "repurposed": [ - { - "platform": "...", - "caption": "...", - "format": "..." - } - ] - } -} +You repurpose content across platforms. +Respond ONLY in structured JSON format. """, tools=[] ) @@ -107,28 +65,26 @@ feedback_agent = Agent( name="FeedbackAgent", instructions=""" -You evaluate content and offer improvements. -Respond in this structured format: -{ - "output_type": "content_feedback", - "contains_image": false, - "details": { - "original": "...", - "feedback": "...", - "suggested_edit": "..." - } -} +You critique content and suggest edits. +Respond ONLY in structured JSON format. """, tools=[] ) AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } +STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") +CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") + +# ─────────────────────────────────────────────────────────── +# FastAPI App Setup +# ─────────────────────────────────────────────────────────── + app = FastAPI() app.add_middleware( CORSMiddleware, @@ -138,190 +94,103 @@ allow_headers=["*"], ) -from .agent_onboarding import router as onboarding_router -from .agent_profilebuilder import router as profilebuilder_router app.include_router(onboarding_router) app.include_router(profilebuilder_router) -STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") -CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") +# ─────────────────────────────────────────────────────────── +# Helper Functions +# ─────────────────────────────────────────────────────────── + +def build_clarification_payload(task_id, user_id, agent_type, message_text, reason="Agent requested clarification"): + return { + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": { "type": "text", "content": message_text }, + "metadata": { "reason": reason }, + "created_at": datetime.utcnow().isoformat() + } + +def build_structured_payload(task_id, user_id, agent_type, structured_obj): + return { + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": structured_obj, + "created_at": datetime.utcnow().isoformat() + } + +async def dispatch_webhook(url, payload): + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===") + print(f"Webhook URL: {url}") + print("Payload being sent:", json.dumps(payload, indent=2)) + response = await client.post(url, json=payload) + print(f"Response Status: {response.status_code}") + print(f"Response Body: {response.text}") + print("========================") + +# ─────────────────────────────────────────────────────────── +# Main Endpoint +# ─────────────────────────────────────────────────────────── @app.post("/agent") async def agent_endpoint(req: Request): data = await req.json() action = data.get("action") - # PATCHED SECTION: new_task action handling + if action not in ("new_task", "new_message"): + raise HTTPException(400, "Unknown action") + + user_input = data.get("user_prompt") or data.get("message") + if not user_input: + raise HTTPException(422, "Missing 'user_prompt' or 'message'") + + task_id = data.get("task_id") + user_id = data.get("user_id") + if action == "new_task": - user_input = data["user_prompt"] - mgr_result = await Runner.run(manager_agent, input=user_input) - + # Always first pass through manager + manager_result = await Runner.run(manager_agent, input=user_input) try: - parsed_mgr = json.loads(mgr_result.final_output) - if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: - # Manager successfully routed to downstream agent - agent_type = parsed_mgr["route_to"] - agent = AGENT_MAP.get(agent_type) + parsed = json.loads(manager_result.final_output) + if isinstance(parsed, dict) and "route_to" in parsed: + downstream_agent_type = parsed["route_to"] + agent = AGENT_MAP.get(downstream_agent_type) if not agent: - raise HTTPException(400, f"Unknown agent: {agent_type}") - + raise HTTPException(400, f"Unknown agent type: {downstream_agent_type}") result = await Runner.run(agent, input=user_input) - parsed_output = None - is_structured = False - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - pass - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": { - "reason": "Agent requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message": parsed_output, - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message": { - "type": "text", - "content": result.final_output - }, - "metadata": { - "reason": "Agent returned unstructured output" - }, - "created_at": datetime.utcnow().isoformat() - } - else: - # Manager requested clarification directly - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": "manager", - "message": { - "type": "text", - "content": mgr_result.final_output.strip() - }, - "metadata": { - "reason": "Manager requested clarification" - }, - "created_at": datetime.utcnow().isoformat() - } - + payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager requested clarification") + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + return {"ok": True} except Exception: - # Manager returned malformed or unclear output - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": "manager", - "message": { - "type": "text", - "content": mgr_result.final_output.strip() - }, - "metadata": { - "reason": "Manager output parsing error" - }, - "created_at": datetime.utcnow().isoformat() - } - - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - return {"ok": True} - - if action == "new_message": - user_msg = data.get("message") or data.get("user_prompt") - if user_msg is None: - raise HTTPException(422, "Missing 'message' or 'user_prompt'") - - sess = data.get("agent_session_id") - agent_type = sess if sess in AGENT_MAP else "manager" - agent = AGENT_MAP.get(agent_type, manager_agent) - result = await Runner.run(agent, input=user_msg) - - try: - parsed_output = json.loads(result.final_output) - is_structured = "output_type" in parsed_output - except Exception: - parsed_output = None - is_structured = False - - if getattr(result, "requires_user_input", None): - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": sess or "manager", - "message": { - "type": "text", - "content": result.requires_user_input - }, - "metadata": {"reason": "Agent requested clarification"}, - "created_at": datetime.utcnow().isoformat() - } - elif is_structured: - webhook = STRUCTURED_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - else: - webhook = CLARIFICATION_WEBHOOK_URL - payload = { - "task_id": data.get("task_id"), - "user_id": data.get("user_id"), - "agent_type": agent_type, - "message_type": "text", - "message_content": result.requires_user_input if getattr(result, "requires_user_input", None) else result.final_output, - "metadata_reason": "Agent requested clarification" if getattr(result, "requires_user_input", None) else "Auto-forwarded message", - "created_at": datetime.utcnow().isoformat() - } - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {webhook}") - print("Payload being sent:") - print(json.dumps(payload, indent=2)) - response = await client.post(webhook, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") - print("========================") - - return {"ok": True} - + payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager output parsing error") + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + return {"ok": True} + + elif action == "new_message": + agent_session = data.get("agent_session_id") + downstream_agent_type = agent_session if agent_session in AGENT_MAP else "manager" + agent = AGENT_MAP.get(downstream_agent_type, manager_agent) + result = await Runner.run(agent, input=user_input) + + # Common: Now interpret result and dispatch to webhook + try: + parsed = json.loads(result.final_output) + is_structured = "output_type" in parsed + except Exception: + parsed = None + is_structured = False + + if getattr(result, "requires_user_input", None): + payload = build_clarification_payload(task_id, user_id, downstream_agent_type, result.requires_user_input) + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + elif is_structured: + payload = build_structured_payload(task_id, user_id, downstream_agent_type, parsed) + await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) else: - raise HTTPException(400, "Unknown action") + payload = build_clarification_payload(task_id, user_id, downstream_agent_type, result.final_output.strip(), reason="Agent returned unstructured output") + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + + return {"ok": True} From a09e64e2fdb30bd88f86d5ef93126344311cf323 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 12:03:47 +0900 Subject: [PATCH 083/230] Update agent_server.py v2 updated with new 2 layer routing --- src/agents/agent_server.py | 77 +++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 27 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index c85552cd..79d6c02b 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,4 +1,4 @@ -# agents/agent_server.py (Production-cleaned version) +# agents/agent_server.py import os import sys @@ -14,9 +14,8 @@ # Load environment variables load_dotenv() -# Make sure we can import from /agents/util, etc. +# Import project modules sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) - from agents import Agent, Runner from agents.util._types import MaybeAwaitable from .agent_onboarding import router as onboarding_router @@ -37,37 +36,25 @@ strategy_agent = Agent( name="StrategyAgent", - instructions=""" -You create 7-day social media strategies. -Respond ONLY in structured JSON format. -""", + instructions="You create structured 7-day social media strategies. Only respond in JSON.", tools=[] ) content_agent = Agent( name="ContentAgent", - instructions=""" -You write brand-aligned social posts. -Respond ONLY in structured JSON format. -""", + instructions="You write structured social media content. Only respond in JSON.", tools=[] ) repurpose_agent = Agent( name="RepurposeAgent", - instructions=""" -You repurpose content across platforms. -Respond ONLY in structured JSON format. -""", + instructions="You repurpose structured content across platforms. Only respond in JSON.", tools=[] ) feedback_agent = Agent( name="FeedbackAgent", - instructions=""" -You critique content and suggest edits. -Respond ONLY in structured JSON format. -""", + instructions="You critique and edit content. Only respond in JSON.", tools=[] ) @@ -82,7 +69,7 @@ CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") # ─────────────────────────────────────────────────────────── -# FastAPI App Setup +# FastAPI Setup # ─────────────────────────────────────────────────────────── app = FastAPI() @@ -150,22 +137,48 @@ async def agent_endpoint(req: Request): user_id = data.get("user_id") if action == "new_task": - # Always first pass through manager manager_result = await Runner.run(manager_agent, input=user_input) try: parsed = json.loads(manager_result.final_output) + if isinstance(parsed, dict) and "route_to" in parsed: downstream_agent_type = parsed["route_to"] + + # ✅ Step 1: Send manager routing webhook (as clarification message) + routing_message = f"Routing user to {downstream_agent_type} because {parsed.get('reason', 'unspecified reason')}." + routing_payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "manager", + "message": { "type": "routing", "content": routing_message }, + "metadata": { "reason": "Manager routed to downstream agent" }, + "created_at": datetime.utcnow().isoformat() + } + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, routing_payload) + + # ✅ Step 2: Actually run downstream agent agent = AGENT_MAP.get(downstream_agent_type) if not agent: raise HTTPException(400, f"Unknown agent type: {downstream_agent_type}") + result = await Runner.run(agent, input=user_input) + else: - payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager requested clarification") + # Manager requested clarification directly + payload = build_clarification_payload( + task_id, user_id, "manager", + manager_result.final_output.strip(), + reason="Manager requested clarification" + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} + except Exception: - payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager output parsing error") + payload = build_clarification_payload( + task_id, user_id, "manager", + manager_result.final_output.strip(), + reason="Manager output parsing error" + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} @@ -175,7 +188,7 @@ async def agent_endpoint(req: Request): agent = AGENT_MAP.get(downstream_agent_type, manager_agent) result = await Runner.run(agent, input=user_input) - # Common: Now interpret result and dispatch to webhook + # COMMON SECTION: Dispatch agent result try: parsed = json.loads(result.final_output) is_structured = "output_type" in parsed @@ -184,13 +197,23 @@ async def agent_endpoint(req: Request): is_structured = False if getattr(result, "requires_user_input", None): - payload = build_clarification_payload(task_id, user_id, downstream_agent_type, result.requires_user_input) + payload = build_clarification_payload( + task_id, user_id, downstream_agent_type, + result.requires_user_input + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) elif is_structured: - payload = build_structured_payload(task_id, user_id, downstream_agent_type, parsed) + payload = build_structured_payload( + task_id, user_id, downstream_agent_type, + parsed + ) await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) else: - payload = build_clarification_payload(task_id, user_id, downstream_agent_type, result.final_output.strip(), reason="Agent returned unstructured output") + payload = build_clarification_payload( + task_id, user_id, downstream_agent_type, + result.final_output.strip(), + reason="Agent returned unstructured output" + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} From 43bc8ae50fd7343b4c00fad6223584c3ba2c018f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 12:29:06 +0900 Subject: [PATCH 084/230] Update agent_server.py --- src/agents/agent_server.py | 85 +++++++++++++------------------------- 1 file changed, 29 insertions(+), 56 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 79d6c02b..b046511f 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -14,15 +14,15 @@ # Load environment variables load_dotenv() -# Import project modules sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + from agents import Agent, Runner from agents.util._types import MaybeAwaitable from .agent_onboarding import router as onboarding_router from .agent_profilebuilder import router as profilebuilder_router # ─────────────────────────────────────────────────────────── -# Agent Definitions +# Agents # ─────────────────────────────────────────────────────────── manager_agent = Agent( name="Manager", @@ -36,25 +36,25 @@ strategy_agent = Agent( name="StrategyAgent", - instructions="You create structured 7-day social media strategies. Only respond in JSON.", + instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.", tools=[] ) content_agent = Agent( name="ContentAgent", - instructions="You write structured social media content. Only respond in JSON.", + instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.", tools=[] ) repurpose_agent = Agent( name="RepurposeAgent", - instructions="You repurpose structured content across platforms. Only respond in JSON.", + instructions="You repurpose content across platforms. Respond ONLY in structured JSON.", tools=[] ) feedback_agent = Agent( name="FeedbackAgent", - instructions="You critique and edit content. Only respond in JSON.", + instructions="You critique content and suggest edits. Respond ONLY in structured JSON.", tools=[] ) @@ -71,7 +71,6 @@ # ─────────────────────────────────────────────────────────── # FastAPI Setup # ─────────────────────────────────────────────────────────── - app = FastAPI() app.add_middleware( CORSMiddleware, @@ -85,7 +84,7 @@ app.include_router(profilebuilder_router) # ─────────────────────────────────────────────────────────── -# Helper Functions +# Helpers # ─────────────────────────────────────────────────────────── def build_clarification_payload(task_id, user_id, agent_type, message_text, reason="Agent requested clarification"): @@ -111,7 +110,7 @@ async def dispatch_webhook(url, payload): async with httpx.AsyncClient() as client: print("=== Webhook Dispatch ===") print(f"Webhook URL: {url}") - print("Payload being sent:", json.dumps(payload, indent=2)) + print(json.dumps(payload, indent=2)) response = await client.post(url, json=payload) print(f"Response Status: {response.status_code}") print(f"Response Body: {response.text}") @@ -137,58 +136,42 @@ async def agent_endpoint(req: Request): user_id = data.get("user_id") if action == "new_task": + # Always first pass through manager manager_result = await Runner.run(manager_agent, input=user_input) + try: - parsed = json.loads(manager_result.final_output) - - if isinstance(parsed, dict) and "route_to" in parsed: - downstream_agent_type = parsed["route_to"] - - # ✅ Step 1: Send manager routing webhook (as clarification message) - routing_message = f"Routing user to {downstream_agent_type} because {parsed.get('reason', 'unspecified reason')}." - routing_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "manager", - "message": { "type": "routing", "content": routing_message }, - "metadata": { "reason": "Manager routed to downstream agent" }, - "created_at": datetime.utcnow().isoformat() - } - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, routing_payload) - - # ✅ Step 2: Actually run downstream agent - agent = AGENT_MAP.get(downstream_agent_type) + parsed_mgr = json.loads(manager_result.final_output) + if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: + # (1) Send Manager's routing decision as a webhook + payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager routing decision") + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + + # (2) Then run the downstream agent + downstream_type = parsed_mgr["route_to"] + agent = AGENT_MAP.get(downstream_type) if not agent: - raise HTTPException(400, f"Unknown agent type: {downstream_agent_type}") + raise HTTPException(400, f"Unknown agent type: {downstream_type}") result = await Runner.run(agent, input=user_input) else: - # Manager requested clarification directly - payload = build_clarification_payload( - task_id, user_id, "manager", - manager_result.final_output.strip(), - reason="Manager requested clarification" - ) + # Manager needs clarification directly + payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager requested clarification") await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} except Exception: - payload = build_clarification_payload( - task_id, user_id, "manager", - manager_result.final_output.strip(), - reason="Manager output parsing error" - ) + payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager output parsing error") await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} elif action == "new_message": agent_session = data.get("agent_session_id") - downstream_agent_type = agent_session if agent_session in AGENT_MAP else "manager" - agent = AGENT_MAP.get(downstream_agent_type, manager_agent) + downstream_type = agent_session if agent_session in AGENT_MAP else "manager" + agent = AGENT_MAP.get(downstream_type, manager_agent) result = await Runner.run(agent, input=user_input) - # COMMON SECTION: Dispatch agent result + # Common: Dispatch downstream agent output try: parsed = json.loads(result.final_output) is_structured = "output_type" in parsed @@ -197,23 +180,13 @@ async def agent_endpoint(req: Request): is_structured = False if getattr(result, "requires_user_input", None): - payload = build_clarification_payload( - task_id, user_id, downstream_agent_type, - result.requires_user_input - ) + payload = build_clarification_payload(task_id, user_id, downstream_type, result.requires_user_input) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) elif is_structured: - payload = build_structured_payload( - task_id, user_id, downstream_agent_type, - parsed - ) + payload = build_structured_payload(task_id, user_id, downstream_type, parsed) await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) else: - payload = build_clarification_payload( - task_id, user_id, downstream_agent_type, - result.final_output.strip(), - reason="Agent returned unstructured output" - ) + payload = build_clarification_payload(task_id, user_id, downstream_type, result.final_output.strip(), reason="Agent returned unstructured output") await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} From 616a6d51e6df179b8955942d3cc34a24a3d1575d Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 12:58:04 +0900 Subject: [PATCH 085/230] Update agent_server.py --- src/agents/agent_server.py | 78 +++++++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 30 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index b046511f..e2e31a8e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -136,34 +136,40 @@ async def agent_endpoint(req: Request): user_id = data.get("user_id") if action == "new_task": - # Always first pass through manager + # 1. Manager decides what to do manager_result = await Runner.run(manager_agent, input=user_input) - + + # ── parse manager output (tiny try/except) ──────────── try: - parsed_mgr = json.loads(manager_result.final_output) - if isinstance(parsed_mgr, dict) and "route_to" in parsed_mgr: - # (1) Send Manager's routing decision as a webhook - payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager routing decision") - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - - # (2) Then run the downstream agent - downstream_type = parsed_mgr["route_to"] - agent = AGENT_MAP.get(downstream_type) - if not agent: - raise HTTPException(400, f"Unknown agent type: {downstream_type}") - - result = await Runner.run(agent, input=user_input) - - else: - # Manager needs clarification directly - payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager requested clarification") - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - return {"ok": True} - + mgr_json = json.loads(manager_result.final_output) + route_to = mgr_json.get("route_to") except Exception: - payload = build_clarification_payload(task_id, user_id, "manager", manager_result.final_output.strip(), reason="Manager output parsing error") + # malformed manager output → ask for clarification and return + payload = build_clarification_payload( + task_id, user_id, "manager", + manager_result.final_output.strip(), + reason="Manager output parsing error" + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return {"ok": True} + + # 2. Send manager-routing webhook (for transparency) + payload = build_clarification_payload( + task_id, user_id, "manager", + manager_result.final_output.strip(), + reason="Manager routing decision" + ) + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + + # 3. Run the downstream agent *outside* the first try/except + if not route_to or route_to not in AGENT_MAP: + # manager needs clarification – nothing more to do + return {"ok": True} + + downstream_type = route_to + agent = AGENT_MAP[downstream_type] + result = await Runner.run(agent, input=user_input) + elif action == "new_message": agent_session = data.get("agent_session_id") @@ -173,20 +179,32 @@ async def agent_endpoint(req: Request): # Common: Dispatch downstream agent output try: - parsed = json.loads(result.final_output) - is_structured = "output_type" in parsed + parsed = json.loads(result.final_output) + is_structured = "output_type" in parsed except Exception: parsed = None is_structured = False - + if getattr(result, "requires_user_input", None): - payload = build_clarification_payload(task_id, user_id, downstream_type, result.requires_user_input) + payload = build_clarification_payload( + task_id, user_id, downstream_type, + result.requires_user_input, + reason="Agent requested clarification" + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + elif is_structured: - payload = build_structured_payload(task_id, user_id, downstream_type, parsed) + payload = build_structured_payload( + task_id, user_id, downstream_type, parsed + ) await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) + else: - payload = build_clarification_payload(task_id, user_id, downstream_type, result.final_output.strip(), reason="Agent returned unstructured output") + payload = build_clarification_payload( + task_id, user_id, downstream_type, + result.final_output.strip(), + reason="Agent returned unstructured output" + ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - + return {"ok": True} From 58ff32217f358510fd3a35ac408e753d86102931 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:06:12 +0900 Subject: [PATCH 086/230] Update agent_server.py --- src/agents/agent_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index e2e31a8e..2b186cae 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -150,8 +150,8 @@ async def agent_endpoint(req: Request): manager_result.final_output.strip(), reason="Manager output parsing error" ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - return {"ok": True} + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + return {"ok": True} # 2. Send manager-routing webhook (for transparency) payload = build_clarification_payload( From 2ca616a51e54162a2a051f79bd8906ecb58a4963 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:11:26 +0900 Subject: [PATCH 087/230] Update agent_server.py --- src/agents/agent_server.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 2b186cae..314e2a05 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -138,7 +138,7 @@ async def agent_endpoint(req: Request): if action == "new_task": # 1. Manager decides what to do manager_result = await Runner.run(manager_agent, input=user_input) - + # ── parse manager output (tiny try/except) ──────────── try: mgr_json = json.loads(manager_result.final_output) @@ -150,9 +150,9 @@ async def agent_endpoint(req: Request): manager_result.final_output.strip(), reason="Manager output parsing error" ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - return {"ok": True} - + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + return {"ok": True} + # 2. Send manager-routing webhook (for transparency) payload = build_clarification_payload( task_id, user_id, "manager", @@ -160,31 +160,30 @@ async def agent_endpoint(req: Request): reason="Manager routing decision" ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - - # 3. Run the downstream agent *outside* the first try/except + + # 3. Run the downstream agent if not route_to or route_to not in AGENT_MAP: # manager needs clarification – nothing more to do return {"ok": True} - + downstream_type = route_to agent = AGENT_MAP[downstream_type] result = await Runner.run(agent, input=user_input) - elif action == "new_message": agent_session = data.get("agent_session_id") downstream_type = agent_session if agent_session in AGENT_MAP else "manager" agent = AGENT_MAP.get(downstream_type, manager_agent) result = await Runner.run(agent, input=user_input) - # Common: Dispatch downstream agent output + # ── common dispatch (exactly once per request) ────────── try: - parsed = json.loads(result.final_output) - is_structured = "output_type" in parsed + parsed = json.loads(result.final_output) + is_structured = "output_type" in parsed except Exception: parsed = None is_structured = False - + if getattr(result, "requires_user_input", None): payload = build_clarification_payload( task_id, user_id, downstream_type, @@ -192,13 +191,13 @@ async def agent_endpoint(req: Request): reason="Agent requested clarification" ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - + elif is_structured: payload = build_structured_payload( task_id, user_id, downstream_type, parsed ) await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) - + else: payload = build_clarification_payload( task_id, user_id, downstream_type, @@ -206,5 +205,5 @@ async def agent_endpoint(req: Request): reason="Agent returned unstructured output" ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - + return {"ok": True} From 2c77457ff41497b7d58d067e5cc835a17a9935fd Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:17:54 +0900 Subject: [PATCH 088/230] Update agent_server.py --- src/agents/agent_server.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 314e2a05..884f43da 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -29,6 +29,8 @@ instructions=""" You are an intelligent router for user requests. Decide the intent: strategy, content, repurpose, feedback. +Never wrap your JSON in ``` fences or any extra text. +Respond with **only the JSON**. If unclear, ask a clarification. Otherwise respond strictly in JSON: { "route_to": "strategy", "reason": "..." } """ From 9b5be82122110e4e330ad2291a7969a59fc50501 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:24:57 +0900 Subject: [PATCH 089/230] Update agent_server.py --- src/agents/agent_server.py | 123 ++++++++++++++++++++----------------- 1 file changed, 68 insertions(+), 55 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 884f43da..c3591409 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -122,11 +122,56 @@ async def dispatch_webhook(url, payload): # Main Endpoint # ─────────────────────────────────────────────────────────── +def _clean_json(text: str) -> str: + """Strip ``` fences / leading text so json.loads can succeed.""" + text = text.strip() + if text.startswith("```"): + # take the first fenced block + parts = text.split("```") + if len(parts) >= 3: + text = parts[1] + # remove hints like ```json + if text.startswith("json"): + text = text[4:].strip() + return text.strip() + +async def _send_result(task_id, user_id, agent_type, result): + """Dispatch result from any agent.""" + # 1. agent asks clarification + if getattr(result, "requires_user_input", None): + payload = build_clarification_payload( + task_id, user_id, agent_type, + result.requires_user_input, + reason="Agent requested clarification" + ) + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + return + + # 2. try to parse structured output + try: + parsed = json.loads(result.final_output) + if "output_type" in parsed: + payload = build_structured_payload( + task_id, user_id, agent_type, parsed + ) + await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) + return + except Exception: + parsed = None # fallthrough to unstructured + + # 3. unstructured → clarification webhook + payload = build_clarification_payload( + task_id, user_id, agent_type, + result.final_output.strip(), + reason="Agent returned unstructured output" + ) + await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + @app.post("/agent") async def agent_endpoint(req: Request): data = await req.json() - action = data.get("action") + action = data.get("action") if action not in ("new_task", "new_message"): raise HTTPException(400, "Unknown action") @@ -137,75 +182,43 @@ async def agent_endpoint(req: Request): task_id = data.get("task_id") user_id = data.get("user_id") + # ── NEW TASK ────────────────────────────────────────── if action == "new_task": - # 1. Manager decides what to do manager_result = await Runner.run(manager_agent, input=user_input) - # ── parse manager output (tiny try/except) ──────────── + raw = manager_result.final_output + print("Manager raw output:", raw) # <-- keep while debugging try: - mgr_json = json.loads(manager_result.final_output) + mgr_json = json.loads(_clean_json(raw)) route_to = mgr_json.get("route_to") - except Exception: - # malformed manager output → ask for clarification and return - payload = build_clarification_payload( - task_id, user_id, "manager", - manager_result.final_output.strip(), - reason="Manager output parsing error" - ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + except Exception as e: + print("Manager parse error:", e) + await _send_result(task_id, user_id, "manager", manager_result) return {"ok": True} - # 2. Send manager-routing webhook (for transparency) + # (1) always send manager routing decision payload = build_clarification_payload( task_id, user_id, "manager", - manager_result.final_output.strip(), + json.dumps(mgr_json), # send clean JSON back reason="Manager routing decision" ) await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - # 3. Run the downstream agent - if not route_to or route_to not in AGENT_MAP: - # manager needs clarification – nothing more to do + # (2) no valid route → we're done + if route_to not in AGENT_MAP: return {"ok": True} + # (3) run downstream agent and dispatch its result downstream_type = route_to agent = AGENT_MAP[downstream_type] result = await Runner.run(agent, input=user_input) - - elif action == "new_message": - agent_session = data.get("agent_session_id") - downstream_type = agent_session if agent_session in AGENT_MAP else "manager" - agent = AGENT_MAP.get(downstream_type, manager_agent) - result = await Runner.run(agent, input=user_input) - - # ── common dispatch (exactly once per request) ────────── - try: - parsed = json.loads(result.final_output) - is_structured = "output_type" in parsed - except Exception: - parsed = None - is_structured = False - - if getattr(result, "requires_user_input", None): - payload = build_clarification_payload( - task_id, user_id, downstream_type, - result.requires_user_input, - reason="Agent requested clarification" - ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - - elif is_structured: - payload = build_structured_payload( - task_id, user_id, downstream_type, parsed - ) - await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) - - else: - payload = build_clarification_payload( - task_id, user_id, downstream_type, - result.final_output.strip(), - reason="Agent returned unstructured output" - ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - - return {"ok": True} + await _send_result(task_id, user_id, downstream_type, result) + return {"ok": True} + + # ── NEW MESSAGE ─────────────────────────────────────── + agent_session = data.get("agent_session_id") + downstream_type = agent_session if agent_session in AGENT_MAP else "manager" + agent = AGENT_MAP.get(downstream_type, manager_agent) + result = await Runner.run(agent, input=user_input) + await _send_result(task_id, user_id, downstream_type, result) + return {"ok": True"} From c01c13ba69b0ec7bbca092ba038d5b09cfa67055 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:26:45 +0900 Subject: [PATCH 090/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index c3591409..fe7d35b1 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -221,4 +221,4 @@ async def agent_endpoint(req: Request): agent = AGENT_MAP.get(downstream_type, manager_agent) result = await Runner.run(agent, input=user_input) await _send_result(task_id, user_id, downstream_type, result) - return {"ok": True"} + return {"ok": True} From f7901bb0936eef19ab502ea9ef266fa86bd078d9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:47:03 +0900 Subject: [PATCH 091/230] Update agent_server.py --- src/agents/agent_server.py | 293 +++++++++++++++---------------------- 1 file changed, 122 insertions(+), 171 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index fe7d35b1..d68a48c4 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,224 +1,175 @@ -# agents/agent_server.py +# agents/agent_server.py — tool‑call router version +""" +This revision keeps your existing webhook payloads & downstream agent instructions, but replaces the fragile +JSON‑string parsing with **OpenAI Agents SDK tool calls**. The manager literally calls +`route_to_strategy`, `route_to_content`, etc., so the routing decision is always structured. + +Prerequisites +------------- +* **openai‑python ≥ 1.14** (or any release that exposes `.tool_calls`). + Make sure `requirements.txt` (or `pyproject.toml`) pins `openai>=1.14.0`. +* No database changes; Bubble still sends back `agent_session_id` exactly like before. +""" -import os -import sys -import json +from __future__ import annotations +import os, sys, json from datetime import datetime -import asyncio - +from typing import Any +import httpx from dotenv import load_dotenv from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -import httpx +from pydantic import BaseModel -# Load environment variables +# --- Agent SDK imports -------------------------------------------------------- load_dotenv() - sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) - from agents import Agent, Runner -from agents.util._types import MaybeAwaitable -from .agent_onboarding import router as onboarding_router -from .agent_profilebuilder import router as profilebuilder_router - -# ─────────────────────────────────────────────────────────── -# Agents -# ─────────────────────────────────────────────────────────── -manager_agent = Agent( - name="Manager", - instructions=""" -You are an intelligent router for user requests. -Decide the intent: strategy, content, repurpose, feedback. -Never wrap your JSON in ``` fences or any extra text. -Respond with **only the JSON**. -If unclear, ask a clarification. Otherwise respond strictly in JSON: -{ "route_to": "strategy", "reason": "..." } -""" -) - -strategy_agent = Agent( - name="StrategyAgent", - instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.", - tools=[] -) - -content_agent = Agent( - name="ContentAgent", - instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.", - tools=[] -) - -repurpose_agent = Agent( - name="RepurposeAgent", - instructions="You repurpose content across platforms. Respond ONLY in structured JSON.", - tools=[] -) - -feedback_agent = Agent( - name="FeedbackAgent", - instructions="You critique content and suggest edits. Respond ONLY in structured JSON.", - tools=[] -) - -AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, - "repurpose": repurpose_agent, - "feedback": feedback_agent, -} -STRUCTURED_WEBHOOK_URL = os.getenv("BUBBLE_STRUCTURED_URL") -CLARIFICATION_WEBHOOK_URL = os.getenv("BUBBLE_CHAT_URL") +# ----------------------------------------------------------------------------- +# 1. Helper payload builders (unchanged) +# ----------------------------------------------------------------------------- -# ─────────────────────────────────────────────────────────── -# FastAPI Setup -# ─────────────────────────────────────────────────────────── -app = FastAPI() -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -app.include_router(onboarding_router) -app.include_router(profilebuilder_router) - -# ─────────────────────────────────────────────────────────── -# Helpers -# ─────────────────────────────────────────────────────────── +def _now() -> str: return datetime.utcnow().isoformat() -def build_clarification_payload(task_id, user_id, agent_type, message_text, reason="Agent requested clarification"): +def build_clarification_payload(task_id: str, user_id: str, agent_type: str, message_text: str, reason: str): return { "task_id": task_id, "user_id": user_id, "agent_type": agent_type, - "message": { "type": "text", "content": message_text }, - "metadata": { "reason": reason }, - "created_at": datetime.utcnow().isoformat() + "message": {"type": "text", "content": message_text}, + "metadata": {"reason": reason}, + "created_at": _now(), } -def build_structured_payload(task_id, user_id, agent_type, structured_obj): +def build_structured_payload(task_id: str, user_id: str, agent_type: str, obj: dict[str, Any]): return { "task_id": task_id, "user_id": user_id, "agent_type": agent_type, - "message": structured_obj, - "created_at": datetime.utcnow().isoformat() + "message": obj, + "created_at": _now(), } -async def dispatch_webhook(url, payload): +async def dispatch_webhook(url: str, payload: dict): async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===") - print(f"Webhook URL: {url}") - print(json.dumps(payload, indent=2)) - response = await client.post(url, json=payload) - print(f"Response Status: {response.status_code}") - print(f"Response Body: {response.text}") + print("=== Webhook Dispatch ===\n" + json.dumps(payload, indent=2)) + await client.post(url, json=payload) print("========================") -# ─────────────────────────────────────────────────────────── -# Main Endpoint -# ─────────────────────────────────────────────────────────── - -def _clean_json(text: str) -> str: - """Strip ``` fences / leading text so json.loads can succeed.""" - text = text.strip() - if text.startswith("```"): - # take the first fenced block - parts = text.split("```") - if len(parts) >= 3: - text = parts[1] - # remove hints like ```json - if text.startswith("json"): - text = text[4:].strip() - return text.strip() - -async def _send_result(task_id, user_id, agent_type, result): - """Dispatch result from any agent.""" - # 1. agent asks clarification +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # clarification webhooks +STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # structured‑output webhooks + +# ----------------------------------------------------------------------------- +# 2. Agent definitions (instructions untouched) +# ----------------------------------------------------------------------------- +class RouteCall(BaseModel): + reason: str + +TOOLS = [ + {"name": "route_to_strategy", "description": "Send task to StrategyAgent", "parameters": RouteCall.schema()}, + {"name": "route_to_content", "description": "Send task to ContentAgent", "parameters": RouteCall.schema()}, + {"name": "route_to_repurpose", "description": "Send task to RepurposeAgent", "parameters": RouteCall.schema()}, + {"name": "route_to_feedback", "description": "Send task to FeedbackAgent", "parameters": RouteCall.schema()}, +] + +manager_agent = Agent( + name="Manager", + instructions=( + "You are an intelligent router for user requests.\n" + "First decide if you need clarification. If so, set requires_user_input.\n" + "Otherwise, call exactly ONE of the route_to_* tools with a reason." + ), + tools=TOOLS, +) + +strategy_agent = Agent("StrategyAgent", instructions="You create 7‑day social media strategies. Respond ONLY in structured JSON.") +content_agent = Agent("ContentAgent", instructions="You write brand‑aligned social posts. Respond ONLY in structured JSON.") +repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") +feedback_agent = Agent("FeedbackAgent", instructions="You critique content and suggest edits. Respond ONLY in structured JSON.") + +AGENT_MAP = { + "strategy": strategy_agent, + "content": content_agent, + "repurpose": repurpose_agent, + "feedback": feedback_agent, +} + +# ----------------------------------------------------------------------------- +# 3. Common dispatcher for any agent result +# ----------------------------------------------------------------------------- +async def _dispatch_result(task_id: str, user_id: str, agent_key: str, result): + # A) agent asks a question ----------------------------------------------- if getattr(result, "requires_user_input", None): - payload = build_clarification_payload( - task_id, user_id, agent_type, - result.requires_user_input, - reason="Agent requested clarification" + await dispatch_webhook( + CHAT_URL, + build_clarification_payload(task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification"), ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) return - # 2. try to parse structured output + # B) structured JSON ------------------------------------------------------ try: parsed = json.loads(result.final_output) if "output_type" in parsed: - payload = build_structured_payload( - task_id, user_id, agent_type, parsed + await dispatch_webhook( + STRUCT_URL, + build_structured_payload(task_id, user_id, agent_key, parsed), ) - await dispatch_webhook(STRUCTURED_WEBHOOK_URL, payload) return except Exception: - parsed = None # fallthrough to unstructured + pass - # 3. unstructured → clarification webhook - payload = build_clarification_payload( - task_id, user_id, agent_type, - result.final_output.strip(), - reason="Agent returned unstructured output" + # C) fallback clarification ---------------------------------------------- + await dispatch_webhook( + CHAT_URL, + build_clarification_payload(task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output"), ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) + +# ----------------------------------------------------------------------------- +# 4. FastAPI setup +# ----------------------------------------------------------------------------- +app = FastAPI() +app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],) @app.post("/agent") -async def agent_endpoint(req: Request): +async def main_endpoint(req: Request): data = await req.json() - action = data.get("action") if action not in ("new_task", "new_message"): raise HTTPException(400, "Unknown action") - user_input = data.get("user_prompt") or data.get("message") - if not user_input: - raise HTTPException(422, "Missing 'user_prompt' or 'message'") - - task_id = data.get("task_id") - user_id = data.get("user_id") - - # ── NEW TASK ────────────────────────────────────────── - if action == "new_task": - manager_result = await Runner.run(manager_agent, input=user_input) - - raw = manager_result.final_output - print("Manager raw output:", raw) # <-- keep while debugging - try: - mgr_json = json.loads(_clean_json(raw)) - route_to = mgr_json.get("route_to") - except Exception as e: - print("Manager parse error:", e) - await _send_result(task_id, user_id, "manager", manager_result) - return {"ok": True} + task_id = data["task_id"] + user_id = data["user_id"] + user_text = data.get("user_prompt") or data.get("message") + if not user_text: + raise HTTPException(422, "Missing user_prompt or message") + + # Determine which agent should run --------------------------------------- + agent_key = "manager" if action == "new_task" else data.get("agent_session_id", "manager") + agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) - # (1) always send manager routing decision - payload = build_clarification_payload( - task_id, user_id, "manager", - json.dumps(mgr_json), # send clean JSON back - reason="Manager routing decision" + result = await Runner.run(agent_obj, input=user_text) + + # Special handling for Manager tool calls -------------------------------- + if agent_key == "manager" and result.tool_calls: + tool_call = result.tool_calls[0] + route_to = tool_call["name"].removeprefix("route_to_") + reason = tool_call["arguments"]["reason"] + + # ① Send manager routing decision webhook + await dispatch_webhook( + CHAT_URL, + build_clarification_payload(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": reason}), "Manager routing decision"), ) - await dispatch_webhook(CLARIFICATION_WEBHOOK_URL, payload) - # (2) no valid route → we're done - if route_to not in AGENT_MAP: + # ② Run downstream agent immediately + downstream = AGENT_MAP.get(route_to) + if downstream is None: return {"ok": True} - - # (3) run downstream agent and dispatch its result - downstream_type = route_to - agent = AGENT_MAP[downstream_type] - result = await Runner.run(agent, input=user_input) - await _send_result(task_id, user_id, downstream_type, result) + downstream_result = await Runner.run(downstream, input=user_text) + await _dispatch_result(task_id, user_id, route_to, downstream_result) return {"ok": True} - # ── NEW MESSAGE ─────────────────────────────────────── - agent_session = data.get("agent_session_id") - downstream_type = agent_session if agent_session in AGENT_MAP else "manager" - agent = AGENT_MAP.get(downstream_type, manager_agent) - result = await Runner.run(agent, input=user_input) - await _send_result(task_id, user_id, downstream_type, result) + # Manager asked clarification OR we are in downstream flow --------------- + await _dispatch_result(task_id, user_id, agent_key, result) return {"ok": True} From 1a933b072527cfe6046d0d0a7f08750a2f5e86ce Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:51:15 +0900 Subject: [PATCH 092/230] Update agent_server.py use proper Tool objects for manager routing --- src/agents/agent_server.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index d68a48c4..d51c2215 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -66,11 +66,13 @@ async def dispatch_webhook(url: str, payload: dict): class RouteCall(BaseModel): reason: str +from agents import Tool # <- make sure this exists in your SDK version + TOOLS = [ - {"name": "route_to_strategy", "description": "Send task to StrategyAgent", "parameters": RouteCall.schema()}, - {"name": "route_to_content", "description": "Send task to ContentAgent", "parameters": RouteCall.schema()}, - {"name": "route_to_repurpose", "description": "Send task to RepurposeAgent", "parameters": RouteCall.schema()}, - {"name": "route_to_feedback", "description": "Send task to FeedbackAgent", "parameters": RouteCall.schema()}, + Tool(name="route_to_strategy", description="Send task to StrategyAgent", parameters=RouteCall.schema()), + Tool(name="route_to_content", description="Send task to ContentAgent", parameters=RouteCall.schema()), + Tool(name="route_to_repurpose", description="Send task to RepurposeAgent", parameters=RouteCall.schema()), + Tool(name="route_to_feedback", description="Send task to FeedbackAgent", parameters=RouteCall.schema()), ] manager_agent = Agent( From 24beaddea0cc6447e090900a593ad288481b1ced Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:55:26 +0900 Subject: [PATCH 093/230] Update agent_server.py --- src/agents/agent_server.py | 48 ++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index d51c2215..38226036 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -69,10 +69,50 @@ class RouteCall(BaseModel): from agents import Tool # <- make sure this exists in your SDK version TOOLS = [ - Tool(name="route_to_strategy", description="Send task to StrategyAgent", parameters=RouteCall.schema()), - Tool(name="route_to_content", description="Send task to ContentAgent", parameters=RouteCall.schema()), - Tool(name="route_to_repurpose", description="Send task to RepurposeAgent", parameters=RouteCall.schema()), - Tool(name="route_to_feedback", description="Send task to FeedbackAgent", parameters=RouteCall.schema()), + { + "name": "route_to_strategy", + "description": "Send task to StrategyAgent", + "parameters": { + "type": "object", + "properties": { + "reason": {"type": "string"} + }, + "required": ["reason"], + }, + }, + { + "name": "route_to_content", + "description": "Send task to ContentAgent", + "parameters": { + "type": "object", + "properties": { + "reason": {"type": "string"} + }, + "required": ["reason"], + }, + }, + { + "name": "route_to_repurpose", + "description": "Send task to RepurposeAgent", + "parameters": { + "type": "object", + "properties": { + "reason": {"type": "string"} + }, + "required": ["reason"], + }, + }, + { + "name": "route_to_feedback", + "description": "Send task to FeedbackAgent", + "parameters": { + "type": "object", + "properties": { + "reason": {"type": "string"} + }, + "required": ["reason"], + }, + }, ] manager_agent = Agent( From e0a9ec06ecdef4b1860cc646adb5a7fc26cfba2e Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:00:09 +0900 Subject: [PATCH 094/230] Update agent_server.py --- src/agents/agent_server.py | 56 ++++++++------------------------------ 1 file changed, 11 insertions(+), 45 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 38226036..87bfb065 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -66,53 +66,19 @@ async def dispatch_webhook(url: str, payload: dict): class RouteCall(BaseModel): reason: str -from agents import Tool # <- make sure this exists in your SDK version +from agents import Tool # Tool dataclass from Agents SDK + +_JSON_PARAM = { + "type": "object", + "properties": {"reason": {"type": "string"}}, + "required": ["reason"], +} TOOLS = [ - { - "name": "route_to_strategy", - "description": "Send task to StrategyAgent", - "parameters": { - "type": "object", - "properties": { - "reason": {"type": "string"} - }, - "required": ["reason"], - }, - }, - { - "name": "route_to_content", - "description": "Send task to ContentAgent", - "parameters": { - "type": "object", - "properties": { - "reason": {"type": "string"} - }, - "required": ["reason"], - }, - }, - { - "name": "route_to_repurpose", - "description": "Send task to RepurposeAgent", - "parameters": { - "type": "object", - "properties": { - "reason": {"type": "string"} - }, - "required": ["reason"], - }, - }, - { - "name": "route_to_feedback", - "description": "Send task to FeedbackAgent", - "parameters": { - "type": "object", - "properties": { - "reason": {"type": "string"} - }, - "required": ["reason"], - }, - }, + Tool(name="route_to_strategy", description="Send task to StrategyAgent", parameters=_JSON_PARAM), + Tool(name="route_to_content", description="Send task to ContentAgent", parameters=_JSON_PARAM), + Tool(name="route_to_repurpose", description="Send task to RepurposeAgent", parameters=_JSON_PARAM), + Tool(name="route_to_feedback", description="Send task to FeedbackAgent", parameters=_JSON_PARAM), ] manager_agent = Agent( From d4215d5e815adbeabd630b53cb4428ecbaf75324 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:08:04 +0900 Subject: [PATCH 095/230] Update agent_server.py --- src/agents/agent_server.py | 39 ++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 87bfb065..22d5af17 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -60,35 +60,38 @@ async def dispatch_webhook(url: str, payload: dict): CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # clarification webhooks STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # structured‑output webhooks -# ----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # 2. Agent definitions (instructions untouched) # ----------------------------------------------------------------------------- -class RouteCall(BaseModel): - reason: str - -from agents import Tool # Tool dataclass from Agents SDK +from types import SimpleNamespace -_JSON_PARAM = { - "type": "object", - "properties": {"reason": {"type": "string"}}, - "required": ["reason"], -} +def make_tool(name: str, desc: str): + schema = { + "type": "object", + "properties": {"reason": {"type": "string"}}, + "required": ["reason"], + } + obj = SimpleNamespace( + name=name, + description=desc, + parameters=schema, + ) + obj.as_dict = {"name": name, "description": desc, "parameters": schema} + return obj TOOLS = [ - Tool(name="route_to_strategy", description="Send task to StrategyAgent", parameters=_JSON_PARAM), - Tool(name="route_to_content", description="Send task to ContentAgent", parameters=_JSON_PARAM), - Tool(name="route_to_repurpose", description="Send task to RepurposeAgent", parameters=_JSON_PARAM), - Tool(name="route_to_feedback", description="Send task to FeedbackAgent", parameters=_JSON_PARAM), + make_tool("route_to_strategy", "Send task to StrategyAgent"), + make_tool("route_to_content", "Send task to ContentAgent"), + make_tool("route_to_repurpose", "Send task to RepurposeAgent"), + make_tool("route_to_feedback", "Send task to FeedbackAgent"), ] manager_agent = Agent( name="Manager", instructions=( - "You are an intelligent router for user requests.\n" - "First decide if you need clarification. If so, set requires_user_input.\n" - "Otherwise, call exactly ONE of the route_to_* tools with a reason." + "You are an intelligent router … call ONE of the route_to_* tools with a reason." ), - tools=TOOLS, + tools=[t.as_dict for t in TOOLS], # list of dicts for the API ) strategy_agent = Agent("StrategyAgent", instructions="You create 7‑day social media strategies. Respond ONLY in structured JSON.") From 576e8ba34bd477efca46bfb23a6a085ed5b9cf85 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:13:41 +0900 Subject: [PATCH 096/230] Update agent_server.py --- src/agents/agent_server.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 22d5af17..4e17522e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -63,21 +63,23 @@ async def dispatch_webhook(url: str, payload: dict): # ----------------------------------------------------------------------------- # 2. Agent definitions (instructions untouched) # ----------------------------------------------------------------------------- +# ------------------------------------------------------------------ helpers -- from types import SimpleNamespace -def make_tool(name: str, desc: str): - schema = { - "type": "object", - "properties": {"reason": {"type": "string"}}, - "required": ["reason"], - } - obj = SimpleNamespace( - name=name, - description=desc, - parameters=schema, - ) - obj.as_dict = {"name": name, "description": desc, "parameters": schema} - return obj +class ToolDict(dict): + """Behaves like a dict (for the OpenAI API) and has .name (for Runner).""" + def __init__(self, name: str, desc: str, schema: dict): + super().__init__(name=name, description=desc, parameters=schema) + self.name = name # Runner.run needs this attribute + +_SCHEMA = { + "type": "object", + "properties": {"reason": {"type": "string"}}, + "required": ["reason"], +} + +def make_tool(name: str, desc: str) -> ToolDict: + return ToolDict(name, desc, _SCHEMA) TOOLS = [ make_tool("route_to_strategy", "Send task to StrategyAgent"), @@ -86,6 +88,13 @@ def make_tool(name: str, desc: str): make_tool("route_to_feedback", "Send task to FeedbackAgent"), ] +manager_agent = Agent( + name="Manager", + instructions=( + "You are an intelligent router … call ONE of the route_to_* tools with a reason." + ), + tools=TOOLS, # list of ToolDict objects – satisfies both Runner & API +) manager_agent = Agent( name="Manager", instructions=( From 4e26a25071b7eb42e94536f911a9c8fc74ebc264 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:17:45 +0900 Subject: [PATCH 097/230] Update agent_server.py --- src/agents/agent_server.py | 42 +++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 4e17522e..8bb98182 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -62,24 +62,24 @@ async def dispatch_webhook(url: str, payload: dict): # ----------------------------------------------------------------------------- # 2. Agent definitions (instructions untouched) -# ----------------------------------------------------------------------------- -# ------------------------------------------------------------------ helpers -- +# -------------------------------------------------------------------- tools -- from types import SimpleNamespace -class ToolDict(dict): - """Behaves like a dict (for the OpenAI API) and has .name (for Runner).""" - def __init__(self, name: str, desc: str, schema: dict): - super().__init__(name=name, description=desc, parameters=schema) - self.name = name # Runner.run needs this attribute - -_SCHEMA = { - "type": "object", - "properties": {"reason": {"type": "string"}}, - "required": ["reason"], -} - -def make_tool(name: str, desc: str) -> ToolDict: - return ToolDict(name, desc, _SCHEMA) +def make_tool(name: str, desc: str): + """ + Returns an object that: + • behaves like a dict (so Agent.serialise works) + • has .name (so Runner.run can list tool names) + """ + schema = { + "type": "object", + "properties": {"reason": {"type": "string"}}, + "required": ["reason"], + } + # dict part (for the OpenAI HTTP payload) + tool_dict = {"name": name, "description": desc, "parameters": schema} + # add attributes so Runner is happy + return SimpleNamespace(**tool_dict) TOOLS = [ make_tool("route_to_strategy", "Send task to StrategyAgent"), @@ -88,6 +88,16 @@ def make_tool(name: str, desc: str) -> ToolDict: make_tool("route_to_feedback", "Send task to FeedbackAgent"), ] +manager_agent = Agent( + name="Manager", + instructions=( + "You are an intelligent router for user requests.\n" + "First decide if you need clarification. If so, set requires_user_input.\n" + "Otherwise, call exactly ONE of the route_to_* tools with a reason." + ), + tools=[dict(t.__dict__) for t in TOOLS], # pure dicts for the API +) + manager_agent = Agent( name="Manager", instructions=( From 57630cd547f64bafed5ebb8c84cecc943b764f25 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:22:28 +0900 Subject: [PATCH 098/230] Update agent_server.py --- src/agents/agent_server.py | 199 ++++++++++++++----------------------- 1 file changed, 77 insertions(+), 122 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 8bb98182..382dc35e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,85 +1,64 @@ -# agents/agent_server.py — tool‑call router version -""" -This revision keeps your existing webhook payloads & downstream agent instructions, but replaces the fragile -JSON‑string parsing with **OpenAI Agents SDK tool calls**. The manager literally calls -`route_to_strategy`, `route_to_content`, etc., so the routing decision is always structured. - -Prerequisites -------------- -* **openai‑python ≥ 1.14** (or any release that exposes `.tool_calls`). - Make sure `requirements.txt` (or `pyproject.toml`) pins `openai>=1.14.0`. -* No database changes; Bubble still sends back `agent_session_id` exactly like before. -""" - +# agents/agent_server.py — stable tool-call router from __future__ import annotations -import os, sys, json +import os, sys, json, httpx from datetime import datetime from typing import Any -import httpx +from types import SimpleNamespace + from dotenv import load_dotenv from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel -# --- Agent SDK imports -------------------------------------------------------- +# ── OpenAI Agents SDK imports ──────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner +from agents import Agent, Runner # <- your existing SDK -# ----------------------------------------------------------------------------- -# 1. Helper payload builders (unchanged) -# ----------------------------------------------------------------------------- +# ---------------------------------------------------------------------------- +# Helper payload builders (unchanged) +# ---------------------------------------------------------------------------- +_now = lambda: datetime.utcnow().isoformat() -def _now() -> str: return datetime.utcnow().isoformat() - -def build_clarification_payload(task_id: str, user_id: str, agent_type: str, message_text: str, reason: str): +def build_clarification_payload(task_id, user_id, agent_type, text, reason): return { - "task_id": task_id, - "user_id": user_id, - "agent_type": agent_type, - "message": {"type": "text", "content": message_text}, + "task_id": task_id, "user_id": user_id, "agent_type": agent_type, + "message": {"type": "text", "content": text}, "metadata": {"reason": reason}, "created_at": _now(), } -def build_structured_payload(task_id: str, user_id: str, agent_type: str, obj: dict[str, Any]): +def build_structured_payload(task_id, user_id, agent_type, obj): return { - "task_id": task_id, - "user_id": user_id, - "agent_type": agent_type, + "task_id": task_id, "user_id": user_id, "agent_type": agent_type, "message": obj, "created_at": _now(), } -async def dispatch_webhook(url: str, payload: dict): - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===\n" + json.dumps(payload, indent=2)) - await client.post(url, json=payload) +async def dispatch(url: str, payload: dict): + async with httpx.AsyncClient() as c: + print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) + await c.post(url, json=payload) print("========================") -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # clarification webhooks -STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # structured‑output webhooks - -# ----------------------------------------------------------------------------- -# 2. Agent definitions (instructions untouched) -# -------------------------------------------------------------------- tools -- -from types import SimpleNamespace +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") +STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") -def make_tool(name: str, desc: str): - """ - Returns an object that: - • behaves like a dict (so Agent.serialise works) - • has .name (so Runner.run can list tool names) - """ +# ---------------------------------------------------------------------------- +# Tool wrapper that works with any SDK version +# ---------------------------------------------------------------------------- +def make_tool(name: str, desc: str) -> SimpleNamespace: schema = { "type": "object", "properties": {"reason": {"type": "string"}}, "required": ["reason"], } - # dict part (for the OpenAI HTTP payload) - tool_dict = {"name": name, "description": desc, "parameters": schema} - # add attributes so Runner is happy - return SimpleNamespace(**tool_dict) + # SimpleNamespace gives us attribute access (Runner needs .name) + tool_obj = SimpleNamespace(name=name, + description=desc, + parameters=schema) + # store the dict version for later serialisation + tool_obj._asdict = {"name": name, "description": desc, "parameters": schema} + return tool_obj TOOLS = [ make_tool("route_to_strategy", "Send task to StrategyAgent"), @@ -88,118 +67,94 @@ def make_tool(name: str, desc: str): make_tool("route_to_feedback", "Send task to FeedbackAgent"), ] -manager_agent = Agent( - name="Manager", - instructions=( - "You are an intelligent router for user requests.\n" - "First decide if you need clarification. If so, set requires_user_input.\n" - "Otherwise, call exactly ONE of the route_to_* tools with a reason." - ), - tools=[dict(t.__dict__) for t in TOOLS], # pure dicts for the API -) +# convert to list-of-dicts for the actual API call +TOOLS_FOR_API = [t._asdict for t in TOOLS] +# ---------------------------------------------------------------------------- +# Agents +# ---------------------------------------------------------------------------- manager_agent = Agent( name="Manager", instructions=( - "You are an intelligent router … call ONE of the route_to_* tools with a reason." - ), - tools=TOOLS, # list of ToolDict objects – satisfies both Runner & API -) -manager_agent = Agent( - name="Manager", - instructions=( - "You are an intelligent router … call ONE of the route_to_* tools with a reason." + "You are an intelligent router for user requests.\n" + "If you need more info ask a question (requires_user_input).\n" + "Otherwise call exactly ONE of the route_to_* tools with a reason." ), - tools=[t.as_dict for t in TOOLS], # list of dicts for the API + tools=TOOLS_FOR_API, ) -strategy_agent = Agent("StrategyAgent", instructions="You create 7‑day social media strategies. Respond ONLY in structured JSON.") -content_agent = Agent("ContentAgent", instructions="You write brand‑aligned social posts. Respond ONLY in structured JSON.") +strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") +content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") feedback_agent = Agent("FeedbackAgent", instructions="You critique content and suggest edits. Respond ONLY in structured JSON.") AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } -# ----------------------------------------------------------------------------- -# 3. Common dispatcher for any agent result -# ----------------------------------------------------------------------------- -async def _dispatch_result(task_id: str, user_id: str, agent_key: str, result): - # A) agent asks a question ----------------------------------------------- +# ---------------------------------------------------------------------------- +# Common dispatcher +# ---------------------------------------------------------------------------- +async def _dispatch(task_id, user_id, agent_key, result): if getattr(result, "requires_user_input", None): - await dispatch_webhook( - CHAT_URL, - build_clarification_payload(task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification"), - ) + await dispatch(CHAT_URL, build_clarification_payload( + task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification")) return - - # B) structured JSON ------------------------------------------------------ try: parsed = json.loads(result.final_output) if "output_type" in parsed: - await dispatch_webhook( - STRUCT_URL, - build_structured_payload(task_id, user_id, agent_key, parsed), - ) + await dispatch(STRUCT_URL, build_structured_payload( + task_id, user_id, agent_key, parsed)) return except Exception: pass + await dispatch(CHAT_URL, build_clarification_payload( + task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output")) - # C) fallback clarification ---------------------------------------------- - await dispatch_webhook( - CHAT_URL, - build_clarification_payload(task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output"), - ) - -# ----------------------------------------------------------------------------- -# 4. FastAPI setup -# ----------------------------------------------------------------------------- +# ---------------------------------------------------------------------------- +# FastAPI +# ---------------------------------------------------------------------------- app = FastAPI() -app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],) +app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, + allow_methods=["*"], allow_headers=["*"]) @app.post("/agent") -async def main_endpoint(req: Request): +async def endpoint(req: Request): data = await req.json() action = data.get("action") if action not in ("new_task", "new_message"): raise HTTPException(400, "Unknown action") - task_id = data["task_id"] - user_id = data["user_id"] + task_id = data["task_id"]; user_id = data["user_id"] user_text = data.get("user_prompt") or data.get("message") - if not user_text: - raise HTTPException(422, "Missing user_prompt or message") + if not user_text: raise HTTPException(422, "Missing user_prompt or message") - # Determine which agent should run --------------------------------------- agent_key = "manager" if action == "new_task" else data.get("agent_session_id", "manager") agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) result = await Runner.run(agent_obj, input=user_text) - # Special handling for Manager tool calls -------------------------------- + # Manager tool-call handling if agent_key == "manager" and result.tool_calls: - tool_call = result.tool_calls[0] - route_to = tool_call["name"].removeprefix("route_to_") - reason = tool_call["arguments"]["reason"] + call = result.tool_calls[0] + route_to = call["name"].removeprefix("route_to_") + reason = call["arguments"]["reason"] - # ① Send manager routing decision webhook - await dispatch_webhook( - CHAT_URL, - build_clarification_payload(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": reason}), "Manager routing decision"), - ) + await dispatch(CHAT_URL, build_clarification_payload( + task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": reason}), + "Manager routing decision")) - # ② Run downstream agent immediately downstream = AGENT_MAP.get(route_to) - if downstream is None: + if not downstream: return {"ok": True} - downstream_result = await Runner.run(downstream, input=user_text) - await _dispatch_result(task_id, user_id, route_to, downstream_result) + + res2 = await Runner.run(downstream, input=user_text) + await _dispatch(task_id, user_id, route_to, res2) return {"ok": True} - # Manager asked clarification OR we are in downstream flow --------------- - await _dispatch_result(task_id, user_id, agent_key, result) + # Manager asked clarification OR downstream flow + await _dispatch(task_id, user_id, agent_key, result) return {"ok": True} From 729dc76c67ce15e9017746ea58f2e8752fb162f1 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:28:32 +0900 Subject: [PATCH 099/230] Update agent_server.py --- src/agents/agent_server.py | 46 ++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 27 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 382dc35e..7fa08c63 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -43,36 +43,28 @@ async def dispatch(url: str, payload: dict): CHAT_URL = os.getenv("BUBBLE_CHAT_URL") STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") -# ---------------------------------------------------------------------------- -# Tool wrapper that works with any SDK version -# ---------------------------------------------------------------------------- -def make_tool(name: str, desc: str) -> SimpleNamespace: - schema = { - "type": "object", - "properties": {"reason": {"type": "string"}}, - "required": ["reason"], - } - # SimpleNamespace gives us attribute access (Runner needs .name) - tool_obj = SimpleNamespace(name=name, - description=desc, - parameters=schema) - # store the dict version for later serialisation - tool_obj._asdict = {"name": name, "description": desc, "parameters": schema} - return tool_obj +# -------------------------------------------------------------------- tools -- +class ToolDict(dict): + """ + Subclass of dict that also exposes .name so Runner.run can access it. + Works with any SDK version because it's still a plain dict at JSON time. + """ + def __init__(self, name: str, desc: str): + schema = { + "type": "object", + "properties": {"reason": {"type": "string"}}, + "required": ["reason"], + } + super().__init__(name=name, description=desc, parameters=schema) + self.name = name # Runner needs this attribute TOOLS = [ - make_tool("route_to_strategy", "Send task to StrategyAgent"), - make_tool("route_to_content", "Send task to ContentAgent"), - make_tool("route_to_repurpose", "Send task to RepurposeAgent"), - make_tool("route_to_feedback", "Send task to FeedbackAgent"), + ToolDict("route_to_strategy", "Send task to StrategyAgent"), + ToolDict("route_to_content", "Send task to ContentAgent"), + ToolDict("route_to_repurpose", "Send task to RepurposeAgent"), + ToolDict("route_to_feedback", "Send task to FeedbackAgent"), ] -# convert to list-of-dicts for the actual API call -TOOLS_FOR_API = [t._asdict for t in TOOLS] - -# ---------------------------------------------------------------------------- -# Agents -# ---------------------------------------------------------------------------- manager_agent = Agent( name="Manager", instructions=( @@ -80,7 +72,7 @@ def make_tool(name: str, desc: str) -> SimpleNamespace: "If you need more info ask a question (requires_user_input).\n" "Otherwise call exactly ONE of the route_to_* tools with a reason." ), - tools=TOOLS_FOR_API, + tools=TOOLS, # each element is both dict-like and has .name ) strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") From 9422d66c09d4270cc0d2db79f0463cb7892c1ab2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:32:35 +0900 Subject: [PATCH 100/230] Update agent_server.py --- src/agents/agent_server.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 7fa08c63..f71c1a72 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -46,17 +46,17 @@ async def dispatch(url: str, payload: dict): # -------------------------------------------------------------------- tools -- class ToolDict(dict): """ - Subclass of dict that also exposes .name so Runner.run can access it. - Works with any SDK version because it's still a plain dict at JSON time. + A dict (for OpenAI API) that ALSO has `.name` (for Runner.run). + Works on every Agents SDK version. """ - def __init__(self, name: str, desc: str): + def __init__(self, name: str, description: str): schema = { "type": "object", "properties": {"reason": {"type": "string"}}, "required": ["reason"], } - super().__init__(name=name, description=desc, parameters=schema) - self.name = name # Runner needs this attribute + super().__init__(name=name, description=description, parameters=schema) + self.name = name # <-- Runner looks for this TOOLS = [ ToolDict("route_to_strategy", "Send task to StrategyAgent"), @@ -72,9 +72,10 @@ def __init__(self, name: str, desc: str): "If you need more info ask a question (requires_user_input).\n" "Otherwise call exactly ONE of the route_to_* tools with a reason." ), - tools=TOOLS, # each element is both dict-like and has .name + tools=TOOLS, # ← list contains ToolDict objects ) + strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") From bcb54774a8303e4e7a03d867425dace907b44cea Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 14:36:22 +0900 Subject: [PATCH 101/230] Update agent_server.py --- src/agents/agent_server.py | 165 ++++++++++++++++++++++--------------- 1 file changed, 98 insertions(+), 67 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index f71c1a72..88131b3a 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,54 +1,71 @@ -# agents/agent_server.py — stable tool-call router +# agents/agent_server.py — tool‑call router version +""" +This revision keeps your existing webhook payloads & downstream agent instructions, but replaces the fragile +JSON‑string parsing with **OpenAI Agents SDK tool calls**. The manager literally calls +`route_to_strategy`, `route_to_content`, etc., so the routing decision is always structured. + +Prerequisites +------------- +* **openai‑python ≥ 1.14** (or any release that exposes `.tool_calls`). + Make sure `requirements.txt` (or `pyproject.toml`) pins `openai>=1.14.0`. +* No database changes; Bubble still sends back `agent_session_id` exactly like before. +""" + from __future__ import annotations -import os, sys, json, httpx +import os, sys, json from datetime import datetime from typing import Any -from types import SimpleNamespace - +import httpx from dotenv import load_dotenv from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel -# ── OpenAI Agents SDK imports ──────────────────────────────────────────────── +# --- Agent SDK imports -------------------------------------------------------- load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner # <- your existing SDK +from agents import Agent, Runner -# ---------------------------------------------------------------------------- -# Helper payload builders (unchanged) -# ---------------------------------------------------------------------------- -_now = lambda: datetime.utcnow().isoformat() +# ----------------------------------------------------------------------------- +# 1. Helper payload builders (unchanged) +# ----------------------------------------------------------------------------- -def build_clarification_payload(task_id, user_id, agent_type, text, reason): +def _now() -> str: return datetime.utcnow().isoformat() + +def build_clarification_payload(task_id: str, user_id: str, agent_type: str, message_text: str, reason: str): return { - "task_id": task_id, "user_id": user_id, "agent_type": agent_type, - "message": {"type": "text", "content": text}, + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": {"type": "text", "content": message_text}, "metadata": {"reason": reason}, "created_at": _now(), } -def build_structured_payload(task_id, user_id, agent_type, obj): +def build_structured_payload(task_id: str, user_id: str, agent_type: str, obj: dict[str, Any]): return { - "task_id": task_id, "user_id": user_id, "agent_type": agent_type, + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, "message": obj, "created_at": _now(), } -async def dispatch(url: str, payload: dict): - async with httpx.AsyncClient() as c: - print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) - await c.post(url, json=payload) +async def dispatch_webhook(url: str, payload: dict): + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===\n" + json.dumps(payload, indent=2)) + await client.post(url, json=payload) print("========================") -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") -STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # clarification webhooks +STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # structured‑output webhooks + +# ----------------------------------------------------------------------------- +# 2. Agent & tool definitions (instructions unchanged) +# ----------------------------------------------------------------------------- -# -------------------------------------------------------------------- tools -- class ToolDict(dict): - """ - A dict (for OpenAI API) that ALSO has `.name` (for Runner.run). - Works on every Agents SDK version. - """ + """Dict that also exposes .name so Runner.run works on any SDK version.""" def __init__(self, name: str, description: str): schema = { "type": "object", @@ -56,7 +73,7 @@ def __init__(self, name: str, description: str): "required": ["reason"], } super().__init__(name=name, description=description, parameters=schema) - self.name = name # <-- Runner looks for this + self.name = name TOOLS = [ ToolDict("route_to_strategy", "Send task to StrategyAgent"), @@ -69,85 +86,99 @@ def __init__(self, name: str, description: str): name="Manager", instructions=( "You are an intelligent router for user requests.\n" - "If you need more info ask a question (requires_user_input).\n" - "Otherwise call exactly ONE of the route_to_* tools with a reason." + "First decide if you need clarification. If so, set requires_user_input.\n" + "Otherwise, call exactly ONE of the route_to_* tools with a reason." ), - tools=TOOLS, # ← list contains ToolDict objects + tools=TOOLS, ) - -strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") -content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") +strategy_agent = Agent("StrategyAgent", instructions="You create 7‑day social media strategies. Respond ONLY in structured JSON.") +content_agent = Agent("ContentAgent", instructions="You write brand‑aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") feedback_agent = Agent("FeedbackAgent", instructions="You critique content and suggest edits. Respond ONLY in structured JSON.") AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } -# ---------------------------------------------------------------------------- -# Common dispatcher -# ---------------------------------------------------------------------------- -async def _dispatch(task_id, user_id, agent_key, result): +# ----------------------------------------------------------------------------- +# 3. Common dispatcher for any agent result +# ----------------------------------------------------------------------------- +async def _dispatch_result(task_id: str, user_id: str, agent_key: str, result): + # A) agent asks a question ----------------------------------------------- if getattr(result, "requires_user_input", None): - await dispatch(CHAT_URL, build_clarification_payload( - task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification")) + await dispatch_webhook( + CHAT_URL, + build_clarification_payload(task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification"), + ) return + + # B) structured JSON ------------------------------------------------------ try: parsed = json.loads(result.final_output) if "output_type" in parsed: - await dispatch(STRUCT_URL, build_structured_payload( - task_id, user_id, agent_key, parsed)) + await dispatch_webhook( + STRUCT_URL, + build_structured_payload(task_id, user_id, agent_key, parsed), + ) return except Exception: pass - await dispatch(CHAT_URL, build_clarification_payload( - task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output")) -# ---------------------------------------------------------------------------- -# FastAPI -# ---------------------------------------------------------------------------- + # C) fallback clarification ---------------------------------------------- + await dispatch_webhook( + CHAT_URL, + build_clarification_payload(task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output"), + ) + +# ----------------------------------------------------------------------------- +# 4. FastAPI setup +# ----------------------------------------------------------------------------- app = FastAPI() -app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, - allow_methods=["*"], allow_headers=["*"]) +app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],) @app.post("/agent") -async def endpoint(req: Request): +async def main_endpoint(req: Request): data = await req.json() action = data.get("action") if action not in ("new_task", "new_message"): raise HTTPException(400, "Unknown action") - task_id = data["task_id"]; user_id = data["user_id"] + task_id = data["task_id"] + user_id = data["user_id"] user_text = data.get("user_prompt") or data.get("message") - if not user_text: raise HTTPException(422, "Missing user_prompt or message") + if not user_text: + raise HTTPException(422, "Missing user_prompt or message") + # Determine which agent should run --------------------------------------- agent_key = "manager" if action == "new_task" else data.get("agent_session_id", "manager") agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) result = await Runner.run(agent_obj, input=user_text) - # Manager tool-call handling + # Special handling for Manager tool calls -------------------------------- if agent_key == "manager" and result.tool_calls: - call = result.tool_calls[0] - route_to = call["name"].removeprefix("route_to_") - reason = call["arguments"]["reason"] + tool_call = result.tool_calls[0] + route_to = tool_call["name"].removeprefix("route_to_") + reason = tool_call["arguments"]["reason"] - await dispatch(CHAT_URL, build_clarification_payload( - task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": reason}), - "Manager routing decision")) + # ① Send manager routing decision webhook + await dispatch_webhook( + CHAT_URL, + build_clarification_payload(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": reason}), "Manager routing decision"), + ) + # ② Run downstream agent immediately downstream = AGENT_MAP.get(route_to) - if not downstream: + if downstream is None: return {"ok": True} - - res2 = await Runner.run(downstream, input=user_text) - await _dispatch(task_id, user_id, route_to, res2) + downstream_result = await Runner.run(downstream, input=user_text) + await _dispatch_result(task_id, user_id, route_to, downstream_result) return {"ok": True} - # Manager asked clarification OR downstream flow - await _dispatch(task_id, user_id, agent_key, result) + # Manager asked clarification OR we are in downstream flow --------------- + await _dispatch_result(task_id, user_id, agent_key, result) return {"ok": True} From 18cc00ac883362cd7077712f0efc8e7070496b5d Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 19:26:30 +0900 Subject: [PATCH 102/230] Update agent_server.py 04.26 v3 change to handoff logic --- src/agents/agent_server.py | 208 +++++++++++++++---------------------- 1 file changed, 84 insertions(+), 124 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 88131b3a..ea020ee4 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,184 +1,144 @@ -# agents/agent_server.py — tool‑call router version +# agents/agent_server.py — handoff‑based, dual‑webhook version 2025‑04‑26 """ -This revision keeps your existing webhook payloads & downstream agent instructions, but replaces the fragile -JSON‑string parsing with **OpenAI Agents SDK tool calls**. The manager literally calls -`route_to_strategy`, `route_to_content`, etc., so the routing decision is always structured. - -Prerequisites -------------- -* **openai‑python ≥ 1.14** (or any release that exposes `.tool_calls`). - Make sure `requirements.txt` (or `pyproject.toml`) pins `openai>=1.14.0`. -* No database changes; Bubble still sends back `agent_session_id` exactly like before. +This file replaces the custom tool‑routing logic with **native SDK handoffs** while +keeping: +• Your five specialist agents with the same instructions. +• Two‑layer webhook scheme (manager routing/clarifications → CHAT_URL; downstream + clarifications or structured JSON → CHAT_URL or STRUCT_URL). +• Exact payload shapes Bubble already consumes. +• No DB; Bubble still controls state via `agent_session_id`. """ from __future__ import annotations -import os, sys, json +import os, sys, json, httpx, asyncio from datetime import datetime from typing import Any -import httpx + from dotenv import load_dotenv from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel -# --- Agent SDK imports -------------------------------------------------------- +# ── OpenAI Agents SDK imports ──────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from agents import Agent, Runner +from agents.extensions.handoff_prompt import prompt_with_handoff_instructions -# ----------------------------------------------------------------------------- -# 1. Helper payload builders (unchanged) -# ----------------------------------------------------------------------------- - -def _now() -> str: return datetime.utcnow().isoformat() +# ---------------------------------------------------------------------------- +# Helper payload builders +# ---------------------------------------------------------------------------- +_now = lambda: datetime.utcnow().isoformat() -def build_clarification_payload(task_id: str, user_id: str, agent_type: str, message_text: str, reason: str): +def clarify(task, user, agent, text, reason): return { - "task_id": task_id, - "user_id": user_id, - "agent_type": agent_type, - "message": {"type": "text", "content": message_text}, + "task_id": task, "user_id": user, "agent_type": agent, + "message": {"type": "text", "content": text}, "metadata": {"reason": reason}, "created_at": _now(), } -def build_structured_payload(task_id: str, user_id: str, agent_type: str, obj: dict[str, Any]): +def structured(task, user, agent, obj): return { - "task_id": task_id, - "user_id": user_id, - "agent_type": agent_type, - "message": obj, - "created_at": _now(), + "task_id": task, "user_id": user, "agent_type": agent, + "message": obj, "created_at": _now(), } -async def dispatch_webhook(url: str, payload: dict): - async with httpx.AsyncClient() as client: - print("=== Webhook Dispatch ===\n" + json.dumps(payload, indent=2)) - await client.post(url, json=payload) +async def dispatch(url: str, payload: dict): + async with httpx.AsyncClient() as c: + print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) + await c.post(url, json=payload) print("========================") -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # clarification webhooks -STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # structured‑output webhooks - -# ----------------------------------------------------------------------------- -# 2. Agent & tool definitions (instructions unchanged) -# ----------------------------------------------------------------------------- - -class ToolDict(dict): - """Dict that also exposes .name so Runner.run works on any SDK version.""" - def __init__(self, name: str, description: str): - schema = { - "type": "object", - "properties": {"reason": {"type": "string"}}, - "required": ["reason"], - } - super().__init__(name=name, description=description, parameters=schema) - self.name = name - -TOOLS = [ - ToolDict("route_to_strategy", "Send task to StrategyAgent"), - ToolDict("route_to_content", "Send task to ContentAgent"), - ToolDict("route_to_repurpose", "Send task to RepurposeAgent"), - ToolDict("route_to_feedback", "Send task to FeedbackAgent"), -] - -manager_agent = Agent( - name="Manager", - instructions=( - "You are an intelligent router for user requests.\n" - "First decide if you need clarification. If so, set requires_user_input.\n" - "Otherwise, call exactly ONE of the route_to_* tools with a reason." - ), - tools=TOOLS, -) +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") +STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") +# ---------------------------------------------------------------------------- +# Specialist agents (instructions unchanged) +# ---------------------------------------------------------------------------- strategy_agent = Agent("StrategyAgent", instructions="You create 7‑day social media strategies. Respond ONLY in structured JSON.") content_agent = Agent("ContentAgent", instructions="You write brand‑aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") feedback_agent = Agent("FeedbackAgent", instructions="You critique content and suggest edits. Respond ONLY in structured JSON.") AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, + "strategy": strategy_agent, + "content": content_agent, "repurpose": repurpose_agent, - "feedback": feedback_agent, + "feedback": feedback_agent, } -# ----------------------------------------------------------------------------- -# 3. Common dispatcher for any agent result -# ----------------------------------------------------------------------------- -async def _dispatch_result(task_id: str, user_id: str, agent_key: str, result): - # A) agent asks a question ----------------------------------------------- +# ---------------------------------------------------------------------------- +# Manager with handoffs +# ---------------------------------------------------------------------------- +MANAGER_TXT = ( + "You are an intelligent router for user requests.\n" + "If you need clarification, ask a question (requires_user_input).\n" + "Otherwise delegate via a handoff to the correct agent." +) + +manager_agent = Agent( + name="Manager", + instructions=prompt_with_handoff_instructions(MANAGER_TXT), + handoffs=list(AGENT_MAP.values()), +) + +# ---------------------------------------------------------------------------- +# Dispatcher for any agent result (unchanged logic) +# ---------------------------------------------------------------------------- +async def _dispatch(task_id: str, user_id: str, agent_key: str, result): if getattr(result, "requires_user_input", None): - await dispatch_webhook( - CHAT_URL, - build_clarification_payload(task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification"), - ) + await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, + result.requires_user_input, "Agent requested clarification")) return - - # B) structured JSON ------------------------------------------------------ try: parsed = json.loads(result.final_output) if "output_type" in parsed: - await dispatch_webhook( - STRUCT_URL, - build_structured_payload(task_id, user_id, agent_key, parsed), - ) + await dispatch(STRUCT_URL, structured(task_id, user_id, agent_key, parsed)) return except Exception: pass + await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, + result.final_output.strip(), "Agent returned unstructured output")) - # C) fallback clarification ---------------------------------------------- - await dispatch_webhook( - CHAT_URL, - build_clarification_payload(task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output"), - ) - -# ----------------------------------------------------------------------------- -# 4. FastAPI setup -# ----------------------------------------------------------------------------- +# ---------------------------------------------------------------------------- +# FastAPI setup +# ---------------------------------------------------------------------------- app = FastAPI() -app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],) +app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, + allow_methods=["*"], allow_headers=["*"]) +# ---------------------------------------------------------------------------- +# Main endpoint +# ---------------------------------------------------------------------------- @app.post("/agent") -async def main_endpoint(req: Request): +async def endpoint(req: Request): data = await req.json() action = data.get("action") if action not in ("new_task", "new_message"): raise HTTPException(400, "Unknown action") - task_id = data["task_id"] - user_id = data["user_id"] + task_id, user_id = data["task_id"], data["user_id"] user_text = data.get("user_prompt") or data.get("message") if not user_text: raise HTTPException(422, "Missing user_prompt or message") - # Determine which agent should run --------------------------------------- + # Determine which agent handles this turn agent_key = "manager" if action == "new_task" else data.get("agent_session_id", "manager") agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) - result = await Runner.run(agent_obj, input=user_text) - - # Special handling for Manager tool calls -------------------------------- - if agent_key == "manager" and result.tool_calls: - tool_call = result.tool_calls[0] - route_to = tool_call["name"].removeprefix("route_to_") - reason = tool_call["arguments"]["reason"] - - # ① Send manager routing decision webhook - await dispatch_webhook( - CHAT_URL, - build_clarification_payload(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": reason}), "Manager routing decision"), - ) - - # ② Run downstream agent immediately - downstream = AGENT_MAP.get(route_to) - if downstream is None: - return {"ok": True} - downstream_result = await Runner.run(downstream, input=user_text) - await _dispatch_result(task_id, user_id, route_to, downstream_result) - return {"ok": True} - - # Manager asked clarification OR we are in downstream flow --------------- - await _dispatch_result(task_id, user_id, agent_key, result) + # Run exactly **one** turn (max_turns=1) so we stay in control of webhooks + result = await Runner.run(agent_obj, input=user_text, max_turns=1) + + # If Manager handed off, Runner returned the *downstream* result but we need + # the routing‑decision webhook first. We can check result.turns[0].role. + if agent_key == "manager" and result.turns and result.turns[0].role == "assistant" and "handoff" in result.turns[0].content: + handoff_info = json.loads(result.turns[0].content) # {'handoff': 'strategy', ...} + route_to = handoff_info.get("handoff") + await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", + json.dumps({"route_to": route_to, "reason": handoff_info.get("reason", "")}), + "Manager routing decision")) + agent_key = route_to # downstream agent for webhook labeling + + # Send downstream agent output / clarifications + await _dispatch(task_id, user_id, agent_key, result) return {"ok": True} From dcf6c093b7de8958f186b692b3c329a87ebc2409 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 19:31:32 +0900 Subject: [PATCH 103/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index ea020ee4..3dff93f6 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -127,7 +127,7 @@ async def endpoint(req: Request): agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) # Run exactly **one** turn (max_turns=1) so we stay in control of webhooks - result = await Runner.run(agent_obj, input=user_text, max_turns=1) + result = await Runner.run(agent_obj, input=user_text, max_turns=5) # plenty for manager + 1 downstream # If Manager handed off, Runner returned the *downstream* result but we need # the routing‑decision webhook first. We can check result.turns[0].role. From eac23619c081d645924c2e5e564b93a38cb87c7f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 19:39:09 +0900 Subject: [PATCH 104/230] Update agent_server.py --- src/agents/agent_server.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 3dff93f6..6064d128 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -30,6 +30,8 @@ _now = lambda: datetime.utcnow().isoformat() def clarify(task, user, agent, text, reason): + if agent is None: + agent = "manager" return { "task_id": task, "user_id": user, "agent_type": agent, "message": {"type": "text", "content": text}, @@ -38,6 +40,8 @@ def clarify(task, user, agent, text, reason): } def structured(task, user, agent, obj): + if agent is None: + agent = "manager" return { "task_id": task, "user_id": user, "agent_type": agent, "message": obj, "created_at": _now(), @@ -123,11 +127,13 @@ async def endpoint(req: Request): raise HTTPException(422, "Missing user_prompt or message") # Determine which agent handles this turn - agent_key = "manager" if action == "new_task" else data.get("agent_session_id", "manager") + agent_key = data.get("agent_session_id") if action == "new_message" else "manager" + if agent_key is None: + agent_key = "manager" agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) - # Run exactly **one** turn (max_turns=1) so we stay in control of webhooks - result = await Runner.run(agent_obj, input=user_text, max_turns=5) # plenty for manager + 1 downstream + # Allow up to 5 turns so Manager + downstream can complete. + result = await Runner.run(agent_obj, input=user_text, max_turns=10) # If Manager handed off, Runner returned the *downstream* result but we need # the routing‑decision webhook first. We can check result.turns[0].role. From 7a884bab924779acd6f0beb4d73082fa162c2c95 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 19:47:53 +0900 Subject: [PATCH 105/230] Update agent_server.py From 833648b6be44304b798af577bc21fa6f06a99fe4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 19:52:54 +0900 Subject: [PATCH 106/230] Update agent_server.py From 3d98e34c361f0df3bdafbc5f328dd4b44a9f5147 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 20:02:47 +0900 Subject: [PATCH 107/230] Update agent_server.py --- src/agents/agent_server.py | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 6064d128..d010dc5d 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,13 +1,4 @@ -# agents/agent_server.py — handoff‑based, dual‑webhook version 2025‑04‑26 -""" -This file replaces the custom tool‑routing logic with **native SDK handoffs** while -keeping: -• Your five specialist agents with the same instructions. -• Two‑layer webhook scheme (manager routing/clarifications → CHAT_URL; downstream - clarifications or structured JSON → CHAT_URL or STRUCT_URL). -• Exact payload shapes Bubble already consumes. -• No DB; Bubble still controls state via `agent_session_id`. -""" +# agents/agent_server.py — handoff-based, dual-webhook from __future__ import annotations import os, sys, json, httpx, asyncio @@ -30,7 +21,7 @@ _now = lambda: datetime.utcnow().isoformat() def clarify(task, user, agent, text, reason): - if agent is None: + if not agent: agent = "manager" return { "task_id": task, "user_id": user, "agent_type": agent, @@ -40,7 +31,7 @@ def clarify(task, user, agent, text, reason): } def structured(task, user, agent, obj): - if agent is None: + if not agent: agent = "manager" return { "task_id": task, "user_id": user, "agent_type": agent, @@ -57,10 +48,10 @@ async def dispatch(url: str, payload: dict): STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # ---------------------------------------------------------------------------- -# Specialist agents (instructions unchanged) +# Specialist agents # ---------------------------------------------------------------------------- -strategy_agent = Agent("StrategyAgent", instructions="You create 7‑day social media strategies. Respond ONLY in structured JSON.") -content_agent = Agent("ContentAgent", instructions="You write brand‑aligned social posts. Respond ONLY in structured JSON.") +strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") +content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") feedback_agent = Agent("FeedbackAgent", instructions="You critique content and suggest edits. Respond ONLY in structured JSON.") @@ -87,7 +78,7 @@ async def dispatch(url: str, payload: dict): ) # ---------------------------------------------------------------------------- -# Dispatcher for any agent result (unchanged logic) +# Dispatcher for any agent result # ---------------------------------------------------------------------------- async def _dispatch(task_id: str, user_id: str, agent_key: str, result): if getattr(result, "requires_user_input", None): @@ -128,15 +119,15 @@ async def endpoint(req: Request): # Determine which agent handles this turn agent_key = data.get("agent_session_id") if action == "new_message" else "manager" - if agent_key is None: + if not agent_key or (agent_key not in AGENT_MAP and agent_key != "manager"): agent_key = "manager" - agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP.get(agent_key, manager_agent) + agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP[agent_key] - # Allow up to 5 turns so Manager + downstream can complete. + # Allow up to 10 turns so Manager + downstream can complete. result = await Runner.run(agent_obj, input=user_text, max_turns=10) - # If Manager handed off, Runner returned the *downstream* result but we need - # the routing‑decision webhook first. We can check result.turns[0].role. + # If Manager handed off, Runner returned the downstream result but we need + # the routing-decision webhook first. if agent_key == "manager" and result.turns and result.turns[0].role == "assistant" and "handoff" in result.turns[0].content: handoff_info = json.loads(result.turns[0].content) # {'handoff': 'strategy', ...} route_to = handoff_info.get("handoff") From a39476b62475e5fbf137a6dbaf2d2f7f51c55bb0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 20:07:43 +0900 Subject: [PATCH 108/230] Update agent_server.py --- src/agents/agent_server.py | 58 ++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index d010dc5d..55ff5b0d 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -117,25 +117,47 @@ async def endpoint(req: Request): if not user_text: raise HTTPException(422, "Missing user_prompt or message") - # Determine which agent handles this turn - agent_key = data.get("agent_session_id") if action == "new_message" else "manager" - if not agent_key or (agent_key not in AGENT_MAP and agent_key != "manager"): + # 1) NEW TASK → start with manager + if action == "new_task": + mgr_res = await Runner.run(manager_agent, input=user_text, max_turns=1) + + # a) manager asks question + if getattr(mgr_res, "requires_user_input", None): + await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", + mgr_res.requires_user_input, + "Manager requested clarification")) + return {"ok": True} + + # b) manager hands off + try: + handoff_info = json.loads(mgr_res.final_output) + route_to = handoff_info.get("handoff") + except Exception: + route_to = None + + if route_to in AGENT_MAP: + # send routing decision webhook + await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", + json.dumps({"route_to": route_to, + "reason": handoff_info.get("reason", "")}), + "Manager routing decision")) + + # run downstream agent + ds_res = await Runner.run(AGENT_MAP[route_to], input=user_text, max_turns=10) + await _dispatch(task_id, user_id, route_to, ds_res) + return {"ok": True} + + # c) manager returned plain text + await _dispatch(task_id, user_id, "manager", mgr_res) + return {"ok": True} + + # 2) NEW MESSAGE → continue with given agent or manager + agent_key = data.get("agent_session_id") or "manager" + if agent_key not in AGENT_MAP and agent_key != "manager": agent_key = "manager" agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP[agent_key] - # Allow up to 10 turns so Manager + downstream can complete. - result = await Runner.run(agent_obj, input=user_text, max_turns=10) - - # If Manager handed off, Runner returned the downstream result but we need - # the routing-decision webhook first. - if agent_key == "manager" and result.turns and result.turns[0].role == "assistant" and "handoff" in result.turns[0].content: - handoff_info = json.loads(result.turns[0].content) # {'handoff': 'strategy', ...} - route_to = handoff_info.get("handoff") - await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", - json.dumps({"route_to": route_to, "reason": handoff_info.get("reason", "")}), - "Manager routing decision")) - agent_key = route_to # downstream agent for webhook labeling - - # Send downstream agent output / clarifications - await _dispatch(task_id, user_id, agent_key, result) + res = await Runner.run(agent_obj, input=user_text, max_turns=10) + await _dispatch(task_id, user_id, agent_key, res) return {"ok": True} + From 3dbbcd14a25259483d0728210fe0107bb014468c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 20:58:48 +0900 Subject: [PATCH 109/230] Update agent_server.py --- src/agents/agent_server.py | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 55ff5b0d..0534793b 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -38,11 +38,32 @@ def structured(task, user, agent, obj): "message": obj, "created_at": _now(), } -async def dispatch(url: str, payload: dict): - async with httpx.AsyncClient() as c: - print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) - await c.post(url, json=payload) - print("========================") +async def _dispatch(task_id: str, user_id: str, agent_key: str, result): + if getattr(result, "requires_user_input", None): + await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, + result.requires_user_input, + "Agent requested clarification")) + return + + # --- NEW robust JSON parse --------------------------------------------- + try: + clean = result.final_output.strip() + if clean.startswith("```"): # strip code fences + parts = clean.split("```", 2) + if len(parts) >= 3: + clean = parts[1].strip() + parsed = json.loads(clean) + if "output_type" in parsed: + await dispatch(STRUCT_URL, + structured(task_id, user_id, agent_key, parsed)) + return + except Exception: + pass + # ----------------------------------------------------------------------- + + await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, + result.final_output.strip(), + "Agent returned unstructured output")) CHAT_URL = os.getenv("BUBBLE_CHAT_URL") STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") @@ -69,6 +90,9 @@ async def dispatch(url: str, payload: dict): "You are an intelligent router for user requests.\n" "If you need clarification, ask a question (requires_user_input).\n" "Otherwise delegate via a handoff to the correct agent." + If you are not asking a question, you MUST emit: {"handoff": "", "reason": "..."} and nothing else. + Never output the plan yourself. + Never wrap the JSON in code fences. ) manager_agent = Agent( From f13b28302ccfab6806dcda84b0436f4e0437573f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:00:34 +0900 Subject: [PATCH 110/230] Update agent_server.py --- src/agents/agent_server.py | 108 +++++++++++-------------------------- 1 file changed, 32 insertions(+), 76 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 0534793b..0c6828c6 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,23 +1,19 @@ # agents/agent_server.py — handoff-based, dual-webhook from __future__ import annotations -import os, sys, json, httpx, asyncio +import os, sys, json, httpx from datetime import datetime -from typing import Any - from dotenv import load_dotenv from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -# ── OpenAI Agents SDK imports ──────────────────────────────────────────────── +# SDK load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from agents import Agent, Runner from agents.extensions.handoff_prompt import prompt_with_handoff_instructions -# ---------------------------------------------------------------------------- -# Helper payload builders -# ---------------------------------------------------------------------------- +# ── helpers ──────────────────────────────────────────────────────────────── _now = lambda: datetime.utcnow().isoformat() def clarify(task, user, agent, text, reason): @@ -38,39 +34,16 @@ def structured(task, user, agent, obj): "message": obj, "created_at": _now(), } -async def _dispatch(task_id: str, user_id: str, agent_key: str, result): - if getattr(result, "requires_user_input", None): - await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, - result.requires_user_input, - "Agent requested clarification")) - return - - # --- NEW robust JSON parse --------------------------------------------- - try: - clean = result.final_output.strip() - if clean.startswith("```"): # strip code fences - parts = clean.split("```", 2) - if len(parts) >= 3: - clean = parts[1].strip() - parsed = json.loads(clean) - if "output_type" in parsed: - await dispatch(STRUCT_URL, - structured(task_id, user_id, agent_key, parsed)) - return - except Exception: - pass - # ----------------------------------------------------------------------- - - await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, - result.final_output.strip(), - "Agent returned unstructured output")) +async def dispatch(url: str, payload: dict): + async with httpx.AsyncClient() as c: + print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) + await c.post(url, json=payload) + print("========================") CHAT_URL = os.getenv("BUBBLE_CHAT_URL") STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") -# ---------------------------------------------------------------------------- -# Specialist agents -# ---------------------------------------------------------------------------- +# ── specialist agents ───────────────────────────────────────────────────── strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") @@ -83,34 +56,29 @@ async def _dispatch(task_id: str, user_id: str, agent_key: str, result): "feedback": feedback_agent, } -# ---------------------------------------------------------------------------- -# Manager with handoffs -# ---------------------------------------------------------------------------- -MANAGER_TXT = ( - "You are an intelligent router for user requests.\n" - "If you need clarification, ask a question (requires_user_input).\n" - "Otherwise delegate via a handoff to the correct agent." - If you are not asking a question, you MUST emit: {"handoff": "", "reason": "..."} and nothing else. - Never output the plan yourself. - Never wrap the JSON in code fences. -) - -manager_agent = Agent( - name="Manager", - instructions=prompt_with_handoff_instructions(MANAGER_TXT), - handoffs=list(AGENT_MAP.values()), -) - -# ---------------------------------------------------------------------------- -# Dispatcher for any agent result -# ---------------------------------------------------------------------------- +# ── manager with handoffs ───────────────────────────────────────────────── +MANAGER_TXT = prompt_with_handoff_instructions(""" +You are an intelligent router for user requests. +If you need clarification, ask a question (requires_user_input). +Otherwise delegate via a handoff to the correct agent. +If you are not asking a question, you MUST emit: {"handoff": "", "reason": "..."} and nothing else. +Never output the plan yourself. +Never wrap the JSON in code fences. +""") + +manager_agent = Agent("Manager", instructions=MANAGER_TXT, handoffs=list(AGENT_MAP.values())) + +# ── dispatch helper (robust JSON parse) ─────────────────────────────────── async def _dispatch(task_id: str, user_id: str, agent_key: str, result): if getattr(result, "requires_user_input", None): await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, result.requires_user_input, "Agent requested clarification")) return try: - parsed = json.loads(result.final_output) + clean = result.final_output.strip() + if clean.startswith("```"): + clean = clean.split("```", 2)[1].strip() + parsed = json.loads(clean) if "output_type" in parsed: await dispatch(STRUCT_URL, structured(task_id, user_id, agent_key, parsed)) return @@ -119,16 +87,12 @@ async def _dispatch(task_id: str, user_id: str, agent_key: str, result): await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output")) -# ---------------------------------------------------------------------------- -# FastAPI setup -# ---------------------------------------------------------------------------- +# ── FastAPI ─────────────────────────────────────────────────────────────── app = FastAPI() app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]) -# ---------------------------------------------------------------------------- -# Main endpoint -# ---------------------------------------------------------------------------- +# ── main endpoint ───────────────────────────────────────────────────────── @app.post("/agent") async def endpoint(req: Request): data = await req.json() @@ -141,18 +105,15 @@ async def endpoint(req: Request): if not user_text: raise HTTPException(422, "Missing user_prompt or message") - # 1) NEW TASK → start with manager + # NEW TASK -------------------------------------------------------------- if action == "new_task": mgr_res = await Runner.run(manager_agent, input=user_text, max_turns=1) - # a) manager asks question if getattr(mgr_res, "requires_user_input", None): await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", - mgr_res.requires_user_input, - "Manager requested clarification")) + mgr_res.requires_user_input, "Manager requested clarification")) return {"ok": True} - # b) manager hands off try: handoff_info = json.loads(mgr_res.final_output) route_to = handoff_info.get("handoff") @@ -160,22 +121,18 @@ async def endpoint(req: Request): route_to = None if route_to in AGENT_MAP: - # send routing decision webhook await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": handoff_info.get("reason", "")}), "Manager routing decision")) - - # run downstream agent ds_res = await Runner.run(AGENT_MAP[route_to], input=user_text, max_turns=10) await _dispatch(task_id, user_id, route_to, ds_res) return {"ok": True} - # c) manager returned plain text await _dispatch(task_id, user_id, "manager", mgr_res) return {"ok": True} - # 2) NEW MESSAGE → continue with given agent or manager + # NEW MESSAGE ----------------------------------------------------------- agent_key = data.get("agent_session_id") or "manager" if agent_key not in AGENT_MAP and agent_key != "manager": agent_key = "manager" @@ -183,5 +140,4 @@ async def endpoint(req: Request): res = await Runner.run(agent_obj, input=user_text, max_turns=10) await _dispatch(task_id, user_id, agent_key, res) - return {"ok": True} - + return {"ok": True"} From bf46b8067598a5791e106b7b00b09669418df9e3 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:02:38 +0900 Subject: [PATCH 111/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 0c6828c6..a3f075df 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -140,4 +140,4 @@ async def endpoint(req: Request): res = await Runner.run(agent_obj, input=user_text, max_turns=10) await _dispatch(task_id, user_id, agent_key, res) - return {"ok": True"} + return {"ok": True} From fb0a5ebdf8126d9b69b68df23b4525603c88b06b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:10:57 +0900 Subject: [PATCH 112/230] Update agent_server.py --- src/agents/agent_server.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index a3f075df..3537528e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -109,31 +109,41 @@ async def endpoint(req: Request): if action == "new_task": mgr_res = await Runner.run(manager_agent, input=user_text, max_turns=1) + # a) manager asks clarification if getattr(mgr_res, "requires_user_input", None): await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", - mgr_res.requires_user_input, "Manager requested clarification")) + mgr_res.requires_user_input, + "Manager requested clarification")) return {"ok": True} + # b) manager hand-off try: handoff_info = json.loads(mgr_res.final_output) route_to = handoff_info.get("handoff") except Exception: route_to = None + # **normalize key** ("ContentAgent" → "content") + if route_to: + route_to = route_to.lower().removesuffix("agent") + if route_to in AGENT_MAP: + # routing decision webhook await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": handoff_info.get("reason", "")}), "Manager routing decision")) + # downstream run + webhook ds_res = await Runner.run(AGENT_MAP[route_to], input=user_text, max_turns=10) await _dispatch(task_id, user_id, route_to, ds_res) return {"ok": True} + # c) manager returned plain text await _dispatch(task_id, user_id, "manager", mgr_res) return {"ok": True} # NEW MESSAGE ----------------------------------------------------------- - agent_key = data.get("agent_session_id") or "manager" + agent_key = (data.get("agent_session_id") or "manager").lower().removesuffix("agent") if agent_key not in AGENT_MAP and agent_key != "manager": agent_key = "manager" agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP[agent_key] From 2541cc83c1cc1d556ba269878e6cb0540860d2c0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:17:31 +0900 Subject: [PATCH 113/230] Update agent_server.py --- src/agents/agent_server.py | 43 ++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 3537528e..8e6eb2d9 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -7,15 +7,24 @@ from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -# SDK +# ── SDK -------------------------------------------------------------------- load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from agents import Agent, Runner from agents.extensions.handoff_prompt import prompt_with_handoff_instructions -# ── helpers ──────────────────────────────────────────────────────────────── +# ── helpers ---------------------------------------------------------------- _now = lambda: datetime.utcnow().isoformat() +def canon(name: str | None) -> str: + """Return canonical agent key ('ContentAgent' → 'content').""" + if not name: + return "" + name = name.lower().replace(" ", "").replace("_", "") + if name.endswith("agent"): + name = name[:-5] + return name + def clarify(task, user, agent, text, reason): if not agent: agent = "manager" @@ -43,7 +52,7 @@ async def dispatch(url: str, payload: dict): CHAT_URL = os.getenv("BUBBLE_CHAT_URL") STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") -# ── specialist agents ───────────────────────────────────────────────────── +# ── specialist agents ------------------------------------------------------ strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") @@ -56,7 +65,7 @@ async def dispatch(url: str, payload: dict): "feedback": feedback_agent, } -# ── manager with handoffs ───────────────────────────────────────────────── +# ── manager ---------------------------------------------------------------- MANAGER_TXT = prompt_with_handoff_instructions(""" You are an intelligent router for user requests. If you need clarification, ask a question (requires_user_input). @@ -68,7 +77,7 @@ async def dispatch(url: str, payload: dict): manager_agent = Agent("Manager", instructions=MANAGER_TXT, handoffs=list(AGENT_MAP.values())) -# ── dispatch helper (robust JSON parse) ─────────────────────────────────── +# ── dispatch helper --------------------------------------------------------- async def _dispatch(task_id: str, user_id: str, agent_key: str, result): if getattr(result, "requires_user_input", None): await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, @@ -87,12 +96,12 @@ async def _dispatch(task_id: str, user_id: str, agent_key: str, result): await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, result.final_output.strip(), "Agent returned unstructured output")) -# ── FastAPI ─────────────────────────────────────────────────────────────── +# ── FastAPI set-up ---------------------------------------------------------- app = FastAPI() app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]) -# ── main endpoint ───────────────────────────────────────────────────────── +# ── main endpoint ----------------------------------------------------------- @app.post("/agent") async def endpoint(req: Request): data = await req.json() @@ -109,45 +118,39 @@ async def endpoint(req: Request): if action == "new_task": mgr_res = await Runner.run(manager_agent, input=user_text, max_turns=1) - # a) manager asks clarification + # a) clarification if getattr(mgr_res, "requires_user_input", None): await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", mgr_res.requires_user_input, "Manager requested clarification")) return {"ok": True} - # b) manager hand-off + # b) hand-off try: handoff_info = json.loads(mgr_res.final_output) - route_to = handoff_info.get("handoff") + route_to = canon(handoff_info.get("handoff")) except Exception: - route_to = None - - # **normalize key** ("ContentAgent" → "content") - if route_to: - route_to = route_to.lower().removesuffix("agent") + route_to = "" if route_to in AGENT_MAP: - # routing decision webhook await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", json.dumps({"route_to": route_to, "reason": handoff_info.get("reason", "")}), "Manager routing decision")) - # downstream run + webhook ds_res = await Runner.run(AGENT_MAP[route_to], input=user_text, max_turns=10) await _dispatch(task_id, user_id, route_to, ds_res) return {"ok": True} - # c) manager returned plain text + # c) plain text await _dispatch(task_id, user_id, "manager", mgr_res) return {"ok": True} # NEW MESSAGE ----------------------------------------------------------- - agent_key = (data.get("agent_session_id") or "manager").lower().removesuffix("agent") + agent_key = canon(data.get("agent_session_id") or "manager") if agent_key not in AGENT_MAP and agent_key != "manager": agent_key = "manager" agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP[agent_key] res = await Runner.run(agent_obj, input=user_text, max_turns=10) await _dispatch(task_id, user_id, agent_key, res) - return {"ok": True} + return {"ok": True"} From 7f75b0e215dd98b88e8936653a67ed348482b1ca Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:19:29 +0900 Subject: [PATCH 114/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 8e6eb2d9..086db240 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -153,4 +153,4 @@ async def endpoint(req: Request): res = await Runner.run(agent_obj, input=user_text, max_turns=10) await _dispatch(task_id, user_id, agent_key, res) - return {"ok": True"} + return {"ok": True} From 5ae15169c562c5cde3b52f9fe03e04c86b8d41e9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:37:10 +0900 Subject: [PATCH 115/230] Update agent_server.py 0426. V4 updated single run --- src/agents/agent_server.py | 186 ++++++++++++++----------------------- 1 file changed, 70 insertions(+), 116 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 086db240..9abe03b1 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,5 +1,4 @@ -# agents/agent_server.py — handoff-based, dual-webhook - +# agents/agent_server.py — single webhook with full trace from __future__ import annotations import os, sys, json, httpx from datetime import datetime @@ -7,150 +6,105 @@ from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware -# ── SDK -------------------------------------------------------------------- +# ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from agents import Agent, Runner + +# helper to prepend handoff instructions from agents.extensions.handoff_prompt import prompt_with_handoff_instructions -# ── helpers ---------------------------------------------------------------- -_now = lambda: datetime.utcnow().isoformat() - -def canon(name: str | None) -> str: - """Return canonical agent key ('ContentAgent' → 'content').""" - if not name: - return "" - name = name.lower().replace(" ", "").replace("_", "") - if name.endswith("agent"): - name = name[:-5] - return name - -def clarify(task, user, agent, text, reason): - if not agent: - agent = "manager" - return { - "task_id": task, "user_id": user, "agent_type": agent, - "message": {"type": "text", "content": text}, - "metadata": {"reason": reason}, - "created_at": _now(), - } +# ── common helpers ───────────────────────────────────────────────────────── +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # one endpoint is enough +_now = lambda: datetime.utcnow().isoformat() -def structured(task, user, agent, obj): - if not agent: - agent = "manager" - return { - "task_id": task, "user_id": user, "agent_type": agent, - "message": obj, "created_at": _now(), - } - -async def dispatch(url: str, payload: dict): +async def send_webhook(payload: dict): async with httpx.AsyncClient() as c: print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) - await c.post(url, json=payload) + await c.post(CHAT_URL, json=payload) # always same URL print("========================") -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") -STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") +def build_payload(task_id, user_id, agent_type, message, reason, trace): + return { + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": message, # text | structured JSON + "metadata": {"reason": reason}, + "trace": trace, # full execution chain + "created_at": _now(), + } -# ── specialist agents ------------------------------------------------------ -strategy_agent = Agent("StrategyAgent", instructions="You create 7-day social media strategies. Respond ONLY in structured JSON.") -content_agent = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") -repurpose_agent = Agent("RepurposeAgent", instructions="You repurpose content across platforms. Respond ONLY in structured JSON.") -feedback_agent = Agent("FeedbackAgent", instructions="You critique content and suggest edits. Respond ONLY in structured JSON.") +# ── specialist agents (instructions unchanged) ──────────────────────────── +strategy = Agent("StrategyAgent", instructions="You create 7-day social strategies. Respond ONLY in structured JSON.") +content = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") +repurpose = Agent("RepurposeAgent", instructions="You repurpose content. Respond ONLY in structured JSON.") +feedback = Agent("FeedbackAgent", instructions="You critique content. Respond ONLY in structured JSON.") -AGENT_MAP = { - "strategy": strategy_agent, - "content": content_agent, - "repurpose": repurpose_agent, - "feedback": feedback_agent, -} +AGENTS = { "strategy": strategy, "content": content, "repurpose": repurpose, "feedback": feedback } -# ── manager ---------------------------------------------------------------- -MANAGER_TXT = prompt_with_handoff_instructions(""" +# ── Manager with native handoffs ─────────────────────────────────────────── +MANAGER_TXT = """ You are an intelligent router for user requests. If you need clarification, ask a question (requires_user_input). Otherwise delegate via a handoff to the correct agent. -If you are not asking a question, you MUST emit: {"handoff": "", "reason": "..."} and nothing else. -Never output the plan yourself. -Never wrap the JSON in code fences. -""") - -manager_agent = Agent("Manager", instructions=MANAGER_TXT, handoffs=list(AGENT_MAP.values())) - -# ── dispatch helper --------------------------------------------------------- -async def _dispatch(task_id: str, user_id: str, agent_key: str, result): - if getattr(result, "requires_user_input", None): - await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, - result.requires_user_input, "Agent requested clarification")) - return - try: - clean = result.final_output.strip() - if clean.startswith("```"): - clean = clean.split("```", 2)[1].strip() - parsed = json.loads(clean) - if "output_type" in parsed: - await dispatch(STRUCT_URL, structured(task_id, user_id, agent_key, parsed)) - return - except Exception: - pass - await dispatch(CHAT_URL, clarify(task_id, user_id, agent_key, - result.final_output.strip(), "Agent returned unstructured output")) - -# ── FastAPI set-up ---------------------------------------------------------- +When delegating, emit: {"handoff":"","reason":"..."} using one of: strategy, content, repurpose, feedback. +Never output the final plan yourself. +Never wrap JSON in code fences. +""" +manager = Agent("Manager", + instructions=prompt_with_handoff_instructions(MANAGER_TXT), + handoffs=list(AGENTS.values())) + +# ── FastAPI boilerplate ──────────────────────────────────────────────────── app = FastAPI() app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]) -# ── main endpoint ----------------------------------------------------------- +# ── main endpoint ───────────────────────────────────────────────────────── @app.post("/agent") -async def endpoint(req: Request): - data = await req.json() +async def run_agent(req: Request): + data = await req.json() action = data.get("action") if action not in ("new_task", "new_message"): raise HTTPException(400, "Unknown action") task_id, user_id = data["task_id"], data["user_id"] - user_text = data.get("user_prompt") or data.get("message") - if not user_text: + text_in = data.get("user_prompt") or data.get("message") + if not text_in: raise HTTPException(422, "Missing user_prompt or message") - # NEW TASK -------------------------------------------------------------- - if action == "new_task": - mgr_res = await Runner.run(manager_agent, input=user_text, max_turns=1) + # decide which agent continues this thread + session_key = data.get("agent_session_id") or "manager" + current = manager if session_key == "manager" else AGENTS.get(session_key, manager) - # a) clarification - if getattr(mgr_res, "requires_user_input", None): - await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", - mgr_res.requires_user_input, - "Manager requested clarification")) - return {"ok": True} + # let Runner drive full loop (handoffs + tools) until completion / clarification + result = await Runner.run(current, input=text_in, max_turns=12) - # b) hand-off + # message to Bubble + if getattr(result, "requires_user_input", None): + msg = {"type":"text","content": result.requires_user_input} + cause = "Agent requested clarification" + else: + # try parse structured JSON; fallback to plain text try: - handoff_info = json.loads(mgr_res.final_output) - route_to = canon(handoff_info.get("handoff")) + parsed = json.loads(result.final_output) + if "output_type" in parsed: + msg, cause = parsed, "Agent returned structured output" + else: + raise ValueError except Exception: - route_to = "" - - if route_to in AGENT_MAP: - await dispatch(CHAT_URL, clarify(task_id, user_id, "manager", - json.dumps({"route_to": route_to, - "reason": handoff_info.get("reason", "")}), - "Manager routing decision")) - ds_res = await Runner.run(AGENT_MAP[route_to], input=user_text, max_turns=10) - await _dispatch(task_id, user_id, route_to, ds_res) - return {"ok": True} - - # c) plain text - await _dispatch(task_id, user_id, "manager", mgr_res) - return {"ok": True} - - # NEW MESSAGE ----------------------------------------------------------- - agent_key = canon(data.get("agent_session_id") or "manager") - if agent_key not in AGENT_MAP and agent_key != "manager": - agent_key = "manager" - agent_obj = manager_agent if agent_key == "manager" else AGENT_MAP[agent_key] - - res = await Runner.run(agent_obj, input=user_text, max_turns=10) - await _dispatch(task_id, user_id, agent_key, res) + msg = {"type":"text","content": result.final_output.strip()} + cause = "Agent returned unstructured output" + + # build trace (list of dicts with role, content, tool_calls, etc.) + try: + trace = result.to_debug_dict() # new SDK helper + except Exception: + trace = [] + + payload = build_payload(task_id, user_id, + result.agent_type or "manager", + msg, cause, trace) + await send_webhook(payload) return {"ok": True} From fed0bdede1f4522007085b92174f36df4178c7fd Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:49:29 +0900 Subject: [PATCH 116/230] =?UTF-8?q?agent=5Fserver.py=20=EC=97=85=EB=8D=B0?= =?UTF-8?q?=EC=9D=B4=ED=8A=B8=2025.04.26=20v4.2=20one=20webhook=20one=20ou?= =?UTF-8?q?tput?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/agent_server.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 9abe03b1..23f0cf3d 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -24,6 +24,11 @@ async def send_webhook(payload: dict): await c.post(CHAT_URL, json=payload) # always same URL print("========================") +# derive the agent that produced the result (SDK-wide safe) +speaker = getattr(result, "agent", None) # Agent object or None +agent_name = speaker.name if speaker else session_key +agent_key = agent_name.lower().replace("agent", "").strip() + def build_payload(task_id, user_id, agent_type, message, reason, trace): return { "task_id": task_id, @@ -104,7 +109,7 @@ async def run_agent(req: Request): trace = [] payload = build_payload(task_id, user_id, - result.agent_type or "manager", + agent_key, msg, cause, trace) await send_webhook(payload) return {"ok": True} From 30551d14330d4659a44b29df61bb084888f2bab2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 21:55:17 +0900 Subject: [PATCH 117/230] =?UTF-8?q?agent=5Fserver.py=20=EC=97=85=EB=8D=B0?= =?UTF-8?q?=EC=9D=B4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/agent_server.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 23f0cf3d..60e962fd 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -24,11 +24,6 @@ async def send_webhook(payload: dict): await c.post(CHAT_URL, json=payload) # always same URL print("========================") -# derive the agent that produced the result (SDK-wide safe) -speaker = getattr(result, "agent", None) # Agent object or None -agent_name = speaker.name if speaker else session_key -agent_key = agent_name.lower().replace("agent", "").strip() - def build_payload(task_id, user_id, agent_type, message, reason, trace): return { "task_id": task_id, @@ -85,6 +80,11 @@ async def run_agent(req: Request): # let Runner drive full loop (handoffs + tools) until completion / clarification result = await Runner.run(current, input=text_in, max_turns=12) + + # derive the agent that produced the result (SDK-wide safe) + speaker = getattr(result, "agent", None) # Agent object or None + agent_name = speaker.name if speaker else session_key + agent_key = agent_name.lower().replace("agent", "").strip() # message to Bubble if getattr(result, "requires_user_input", None): From e115c7b798d0f53bb4f2213a83c0ca81ab6b3423 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 22:13:08 +0900 Subject: [PATCH 118/230] =?UTF-8?q?agent=5Fserver.py=20=EC=97=85=EB=8D=B0?= =?UTF-8?q?=EC=9D=B4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/agent_server.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 60e962fd..86995079 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -81,11 +81,11 @@ async def run_agent(req: Request): # let Runner drive full loop (handoffs + tools) until completion / clarification result = await Runner.run(current, input=text_in, max_turns=12) - # derive the agent that produced the result (SDK-wide safe) - speaker = getattr(result, "agent", None) # Agent object or None - agent_name = speaker.name if speaker else session_key - agent_key = agent_name.lower().replace("agent", "").strip() - + # who produced the final_output / clarification? + speaker = getattr(result, "agent", None) # Agent object or None + agent_name = speaker.name if speaker else session_key + agent_key = agent_name.lower().replace("agent", "").strip() + # message to Bubble if getattr(result, "requires_user_input", None): msg = {"type":"text","content": result.requires_user_input} From 66739d27f90abb39b5efac7787acd8599197800d Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 22:26:46 +0900 Subject: [PATCH 119/230] =?UTF-8?q?agent=5Fserver.py=20=EC=97=85=EB=8D=B0?= =?UTF-8?q?=EC=9D=B4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/agent_server.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 86995079..75a059b7 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -107,6 +107,8 @@ async def run_agent(req: Request): trace = result.to_debug_dict() # new SDK helper except Exception: trace = [] + if not agent_key: + agent_key = "manager" payload = build_payload(task_id, user_id, agent_key, From a219e7c63f091778d547438377b0e42a01d1f6c8 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 26 Apr 2025 22:41:43 +0900 Subject: [PATCH 120/230] =?UTF-8?q?agent=5Fserver.py=20=EC=97=85=EB=8D=B0?= =?UTF-8?q?=EC=9D=B4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/agent_server.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 75a059b7..7128181d 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -18,6 +18,18 @@ CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # one endpoint is enough _now = lambda: datetime.utcnow().isoformat() +def canon(name: str | None) -> str: + """ + Normalize any agent name: + 'ContentAgent' → 'content' + 'strategyagent' → 'strategy' + 'manager' → 'manager' + """ + if not name: + return "" + name = name.lower().replace(" ", "").replace("_", "") + return name[:-5] if name.endswith("agent") else name + async def send_webhook(payload: dict): async with httpx.AsyncClient() as c: print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) @@ -84,7 +96,9 @@ async def run_agent(req: Request): # who produced the final_output / clarification? speaker = getattr(result, "agent", None) # Agent object or None agent_name = speaker.name if speaker else session_key - agent_key = agent_name.lower().replace("agent", "").strip() + agent_key = canon(agent_name) + if not agent_key: + agent_key = "manager" # message to Bubble if getattr(result, "requires_user_input", None): @@ -107,8 +121,6 @@ async def run_agent(req: Request): trace = result.to_debug_dict() # new SDK helper except Exception: trace = [] - if not agent_key: - agent_key = "manager" payload = build_payload(task_id, user_id, agent_key, From 5d85704e1f869a59003b28a8ddb78d14486aed0b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 11:22:59 +0900 Subject: [PATCH 121/230] Update agent_server.py --- src/agents/agent_server.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 7128181d..278ff2e9 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -18,15 +18,7 @@ CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # one endpoint is enough _now = lambda: datetime.utcnow().isoformat() -def canon(name: str | None) -> str: - """ - Normalize any agent name: - 'ContentAgent' → 'content' - 'strategyagent' → 'strategy' - 'manager' → 'manager' - """ - if not name: - return "" +def canon(name: str) -> str: name = name.lower().replace(" ", "").replace("_", "") return name[:-5] if name.endswith("agent") else name @@ -96,7 +88,7 @@ async def run_agent(req: Request): # who produced the final_output / clarification? speaker = getattr(result, "agent", None) # Agent object or None agent_name = speaker.name if speaker else session_key - agent_key = canon(agent_name) + agent_key = canon(agent_name) if not agent_key: agent_key = "manager" From a220cfe1d15ba3c275dd3c924bb4b2e91d50ce48 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 11:35:57 +0900 Subject: [PATCH 122/230] Update agent_server.py --- src/agents/agent_server.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 278ff2e9..7aed2409 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -47,6 +47,15 @@ def build_payload(task_id, user_id, agent_type, message, reason, trace): AGENTS = { "strategy": strategy, "content": content, "repurpose": repurpose, "feedback": feedback } +# canonical name for every Agent object we own +AGENT_TO_KEY = { + manager: "manager", + strategy: "strategy", + content: "content", + repurpose: "repurpose", + feedback: "feedback", +} + # ── Manager with native handoffs ─────────────────────────────────────────── MANAGER_TXT = """ You are an intelligent router for user requests. @@ -88,7 +97,7 @@ async def run_agent(req: Request): # who produced the final_output / clarification? speaker = getattr(result, "agent", None) # Agent object or None agent_name = speaker.name if speaker else session_key - agent_key = canon(agent_name) + agent_key = AGENT_TO_KEY.get(speaker, "manager") if not agent_key: agent_key = "manager" From ad65b717bbdf097e143a65760841e66f18b83a92 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:06:20 +0900 Subject: [PATCH 123/230] Update agent_server.py --- src/agents/agent_server.py | 197 ++++++++++++++++++------------------- 1 file changed, 98 insertions(+), 99 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 7aed2409..91545301 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,130 +1,129 @@ -# agents/agent_server.py — single webhook with full trace +# agents/agent_server.py + from __future__ import annotations import os, sys, json, httpx from datetime import datetime from dotenv import load_dotenv -from fastapi import FastAPI, Request, HTTPException +from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from agents import Agent, Runner - -# helper to prepend handoff instructions from agents.extensions.handoff_prompt import prompt_with_handoff_instructions -# ── common helpers ───────────────────────────────────────────────────────── -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # one endpoint is enough -_now = lambda: datetime.utcnow().isoformat() +# ── Specialist agents ────────────────────────────────────────────────────── +strategy = Agent("StrategyAgent", instructions="You create 7-day social strategies. Respond ONLY in structured JSON.") +content = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") +repurpose = Agent("RepurposeAgent", instructions="You repurpose content. Respond ONLY in structured JSON.") +feedback = Agent("FeedbackAgent", instructions="You critique content. Respond ONLY in structured JSON.") -def canon(name: str) -> str: - name = name.lower().replace(" ", "").replace("_", "") - return name[:-5] if name.endswith("agent") else name +AGENTS = { + "strategy": strategy, + "content": content, + "repurpose": repurpose, + "feedback": feedback, +} -async def send_webhook(payload: dict): - async with httpx.AsyncClient() as c: - print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) - await c.post(CHAT_URL, json=payload) # always same URL - print("========================") +# ── Manager agent ────────────────────────────────────────────────────────── +MANAGER_TXT = """ +You are the Manager. Look at the user's request and either: + 1) return JSON: { "handoff_to": "", + "clarify": "…optional follow-up question…", + "payload": { /* any override or trimmed inputs */ } } + 2) or return plain-text if you need general clarification. +""" +manager = Agent( + "Manager", + instructions=prompt_with_handoff_instructions(MANAGER_TXT), + handoffs=list(AGENTS.values()), +) +# ── Mappings ──────────────────────────────────────────────────────────────── +ALL_AGENTS = {"manager": manager, **AGENTS} +AGENT_TO_KEY = {agent: key for key, agent in ALL_AGENTS.items()} + +# ── Helpers ──────────────────────────────────────────────────────────────── def build_payload(task_id, user_id, agent_type, message, reason, trace): return { "task_id": task_id, "user_id": user_id, "agent_type": agent_type, - "message": message, # text | structured JSON - "metadata": {"reason": reason}, - "trace": trace, # full execution chain - "created_at": _now(), + "message": {"type": message.get("type"), "content": message.get("content")}, + "metadata": {"reason": reason}, + "trace": trace, + "created_at": datetime.utcnow().isoformat(), } -# ── specialist agents (instructions unchanged) ──────────────────────────── -strategy = Agent("StrategyAgent", instructions="You create 7-day social strategies. Respond ONLY in structured JSON.") -content = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") -repurpose = Agent("RepurposeAgent", instructions="You repurpose content. Respond ONLY in structured JSON.") -feedback = Agent("FeedbackAgent", instructions="You critique content. Respond ONLY in structured JSON.") - -AGENTS = { "strategy": strategy, "content": content, "repurpose": repurpose, "feedback": feedback } - -# canonical name for every Agent object we own -AGENT_TO_KEY = { - manager: "manager", - strategy: "strategy", - content: "content", - repurpose: "repurpose", - feedback: "feedback", -} - -# ── Manager with native handoffs ─────────────────────────────────────────── -MANAGER_TXT = """ -You are an intelligent router for user requests. -If you need clarification, ask a question (requires_user_input). -Otherwise delegate via a handoff to the correct agent. -When delegating, emit: {"handoff":"","reason":"..."} using one of: strategy, content, repurpose, feedback. -Never output the final plan yourself. -Never wrap JSON in code fences. -""" -manager = Agent("Manager", - instructions=prompt_with_handoff_instructions(MANAGER_TXT), - handoffs=list(AGENTS.values())) +def flatten_payload(p: dict) -> dict: + """ + Flatten one level of nested dicts so Bubble sees: + task_id, user_id, agent_type, + message_type, message_content, + metadata_reason, created_at + """ + return { + "task_id": p["task_id"], + "user_id": p["user_id"], + "agent_type": p["agent_type"], + "message_type": p["message"]["type"], + "message_content": p["message"]["content"], + "metadata_reason": p["metadata"].get("reason", ""), + "created_at": p["created_at"], + } -# ── FastAPI boilerplate ──────────────────────────────────────────────────── +# ── FastAPI app ──────────────────────────────────────────────────────────── app = FastAPI() app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]) -# ── main endpoint ───────────────────────────────────────────────────────── @app.post("/agent") async def run_agent(req: Request): - data = await req.json() - action = data.get("action") - if action not in ("new_task", "new_message"): - raise HTTPException(400, "Unknown action") - - task_id, user_id = data["task_id"], data["user_id"] - text_in = data.get("user_prompt") or data.get("message") - if not text_in: - raise HTTPException(422, "Missing user_prompt or message") - - # decide which agent continues this thread - session_key = data.get("agent_session_id") or "manager" - current = manager if session_key == "manager" else AGENTS.get(session_key, manager) - - # let Runner drive full loop (handoffs + tools) until completion / clarification - result = await Runner.run(current, input=text_in, max_turns=12) - - # who produced the final_output / clarification? - speaker = getattr(result, "agent", None) # Agent object or None - agent_name = speaker.name if speaker else session_key - agent_key = AGENT_TO_KEY.get(speaker, "manager") - if not agent_key: - agent_key = "manager" - - # message to Bubble - if getattr(result, "requires_user_input", None): - msg = {"type":"text","content": result.requires_user_input} - cause = "Agent requested clarification" - else: - # try parse structured JSON; fallback to plain text + data = await req.json() + incoming = data.get("agent_type", "manager") + agent = ALL_AGENTS[incoming] + + # 1) Run whichever agent + result = await Runner(...).run(agent, data["prompt"], …) + raw = result.final_output.strip() + trace = result.to_debug_dict() + reason = result.metadata.get("reason", "") + + # Build a flat payload and send + async def send_flat(key, msg, why): + p = build_payload(data["task_id"], data["user_id"], key, {"type": "text", "content": msg}, why, trace) + await send_webhook(flatten_payload(p)) + + # 2) If it’s the manager, try to unpack a handoff envelope + if incoming == "manager": try: - parsed = json.loads(result.final_output) - if "output_type" in parsed: - msg, cause = parsed, "Agent returned structured output" - else: - raise ValueError - except Exception: - msg = {"type":"text","content": result.final_output.strip()} - cause = "Agent returned unstructured output" - - # build trace (list of dicts with role, content, tool_calls, etc.) - try: - trace = result.to_debug_dict() # new SDK helper - except Exception: - trace = [] - - payload = build_payload(task_id, user_id, - agent_key, - msg, cause, trace) - await send_webhook(payload) + env = json.loads(raw) + clarify = env.get("clarify", "") + target = env["handoff_to"] + payload = env.get("payload", data) + + # (a) Manager’s “please clarify” or routing message + await send_flat("manager", clarify, "handoff") + + # (b) Immediately call the specialist + if target in AGENTS: + spec_res = await Runner(...).run(AGENTS[target], payload["prompt"], …) + spec_raw = spec_res.final_output.strip() + spec_tr = spec_res.to_debug_dict() + spec_rs = spec_res.metadata.get("reason", "") + p2 = build_payload(data["task_id"], data["user_id"], target, + {"type":"text","content": spec_raw}, + spec_rs, spec_tr) + await send_webhook(flatten_payload(p2)) + + return {"ok": True} + + except (json.JSONDecodeError, KeyError): + # Not a JSON envelope → pure manager clarification + await send_flat("manager", raw, reason) + return {"ok": True} + + # 3) Else: a specialist’s direct run → one webhook only + await send_flat(incoming, raw, reason) return {"ok": True} From e00e78fa34682835a358ddfdba9bfa77c400622b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:13:06 +0900 Subject: [PATCH 124/230] Update agent_server.py --- src/agents/agent_server.py | 119 ++++++++++++++++++++++++------------- 1 file changed, 77 insertions(+), 42 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 91545301..93ed739b 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,10 +1,13 @@ -# agents/agent_server.py +# agents/agent_server.py — single webhook with full trace from __future__ import annotations -import os, sys, json, httpx +import os +import sys +import json +import httpx from datetime import datetime from dotenv import load_dotenv -from fastapi import FastAPI, Request +from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware # ── SDK setup ─────────────────────────────────────────────────────────────── @@ -13,6 +16,16 @@ from agents import Agent, Runner from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +# ── Environment variable for Bubble webhook URL +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") + +# ── send_webhook helper ───────────────────────────────────────────────────── +async def send_webhook(payload: dict): + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) + await client.post(CHAT_URL, json=payload) + print("========================") + # ── Specialist agents ────────────────────────────────────────────────────── strategy = Agent("StrategyAgent", instructions="You create 7-day social strategies. Respond ONLY in structured JSON.") content = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") @@ -29,9 +42,11 @@ # ── Manager agent ────────────────────────────────────────────────────────── MANAGER_TXT = """ You are the Manager. Look at the user's request and either: - 1) return JSON: { "handoff_to": "", - "clarify": "…optional follow-up question…", - "payload": { /* any override or trimmed inputs */ } } + 1) return JSON: { + "handoff_to": "", + "clarify": "...optional follow-up question…", + "payload": { /* any override or trimmed inputs */ } + } 2) or return plain-text if you need general clarification. """ manager = Agent( @@ -44,11 +59,11 @@ ALL_AGENTS = {"manager": manager, **AGENTS} AGENT_TO_KEY = {agent: key for key, agent in ALL_AGENTS.items()} -# ── Helpers ──────────────────────────────────────────────────────────────── +# ── Payload builders ───────────────────────────────────────────────────────── def build_payload(task_id, user_id, agent_type, message, reason, trace): return { - "task_id": task_id, - "user_id": user_id, + "task_id": task_id, + "user_id": user_id, "agent_type": agent_type, "message": {"type": message.get("type"), "content": message.get("content")}, "metadata": {"reason": reason}, @@ -58,10 +73,8 @@ def build_payload(task_id, user_id, agent_type, message, reason, trace): def flatten_payload(p: dict) -> dict: """ - Flatten one level of nested dicts so Bubble sees: - task_id, user_id, agent_type, - message_type, message_content, - metadata_reason, created_at + Take one level of nested message & metadata fields + and promote to top-level keys for Bubble. """ return { "task_id": p["task_id"], @@ -75,55 +88,77 @@ def flatten_payload(p: dict) -> dict: # ── FastAPI app ──────────────────────────────────────────────────────────── app = FastAPI() -app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, - allow_methods=["*"], allow_headers=["*"]) +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) @app.post("/agent") async def run_agent(req: Request): - data = await req.json() - incoming = data.get("agent_type", "manager") - agent = ALL_AGENTS[incoming] + data = await req.json() + incoming = data.get("agent_type", "manager") + agent = ALL_AGENTS.get(incoming, manager) - # 1) Run whichever agent - result = await Runner(...).run(agent, data["prompt"], …) + # ensure we have a prompt + prompt = data.get("prompt") + if not prompt: + raise HTTPException(422, "Missing 'prompt' field") + + # 1) run the selected agent + result = await Runner.run(agent, input=prompt, max_turns=12) raw = result.final_output.strip() trace = result.to_debug_dict() reason = result.metadata.get("reason", "") - # Build a flat payload and send - async def send_flat(key, msg, why): - p = build_payload(data["task_id"], data["user_id"], key, {"type": "text", "content": msg}, why, trace) - await send_webhook(flatten_payload(p)) - - # 2) If it’s the manager, try to unpack a handoff envelope + async def send_flat(key: str, msg: str, why: str): + payload = build_payload( + data["task_id"], + data["user_id"], + key, + {"type": "text", "content": msg}, + why, + trace, + ) + await send_webhook(flatten_payload(payload)) + + # 2) Manager path: try JSON envelope if incoming == "manager": try: - env = json.loads(raw) - clarify = env.get("clarify", "") - target = env["handoff_to"] - payload = env.get("payload", data) + env = json.loads(raw) + clarify = env.get("clarify", "") + target = env["handoff_to"] + payload = env.get("payload", data) - # (a) Manager’s “please clarify” or routing message + # 2a) manager’s clarification or routing message await send_flat("manager", clarify, "handoff") - # (b) Immediately call the specialist + # 2b) immediately run specialist if target in AGENTS: - spec_res = await Runner(...).run(AGENTS[target], payload["prompt"], …) - spec_raw = spec_res.final_output.strip() - spec_tr = spec_res.to_debug_dict() - spec_rs = spec_res.metadata.get("reason", "") - p2 = build_payload(data["task_id"], data["user_id"], target, - {"type":"text","content": spec_raw}, - spec_rs, spec_tr) - await send_webhook(flatten_payload(p2)) + spec_prompt = payload.get("prompt", prompt) + spec_res = await Runner.run(AGENTS[target], input=spec_prompt, max_turns=12) + spec_raw = spec_res.final_output.strip() + spec_trace = spec_res.to_debug_dict() + spec_reason = spec_res.metadata.get("reason", "") + spec_payload = build_payload( + data["task_id"], + data["user_id"], + target, + {"type": "text", "content": spec_raw}, + spec_reason, + spec_trace, + ) + await send_webhook(flatten_payload(spec_payload)) return {"ok": True} except (json.JSONDecodeError, KeyError): - # Not a JSON envelope → pure manager clarification + # pure manager clarification await send_flat("manager", raw, reason) return {"ok": True} - # 3) Else: a specialist’s direct run → one webhook only + # 3) Specialist path: single webhook await send_flat(incoming, raw, reason) return {"ok": True} From 1f483aa1c35c1378581886cadcd0263288953678 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:18:38 +0900 Subject: [PATCH 125/230] Update agent_server.py --- src/agents/agent_server.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 93ed739b..ff0ba670 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -55,9 +55,8 @@ async def send_webhook(payload: dict): handoffs=list(AGENTS.values()), ) -# ── Mappings ──────────────────────────────────────────────────────────────── -ALL_AGENTS = {"manager": manager, **AGENTS} -AGENT_TO_KEY = {agent: key for key, agent in ALL_AGENTS.items()} +# ── All agents map ─────────────────────────────────────────────────────────── +ALL_AGENTS = {"manager": manager, **AGENTS} # ── Payload builders ───────────────────────────────────────────────────────── def build_payload(task_id, user_id, agent_type, message, reason, trace): From f1b9b64c16fbbeacfe06f3c707eee270548d1ea2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:24:36 +0900 Subject: [PATCH 126/230] Update agent_server.py --- src/agents/agent_server.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index ff0ba670..5b575462 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -101,8 +101,12 @@ async def run_agent(req: Request): incoming = data.get("agent_type", "manager") agent = ALL_AGENTS.get(incoming, manager) - # ensure we have a prompt - prompt = data.get("prompt") + # ensure we have a prompt (accept new or legacy fields) + prompt = ( + data.get("prompt") + or data.get("user_prompt") + or data.get("message") + ) if not prompt: raise HTTPException(422, "Missing 'prompt' field") From 71c108255de6eaab39f8910c8fabe0ff0615629c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:30:29 +0900 Subject: [PATCH 127/230] Update agent_server.py --- src/agents/agent_server.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 5b575462..761a856f 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -113,7 +113,11 @@ async def run_agent(req: Request): # 1) run the selected agent result = await Runner.run(agent, input=prompt, max_turns=12) raw = result.final_output.strip() - trace = result.to_debug_dict() + # handle trace existence in older/newer SDKs + try: + trace = result.to_debug_dict() + except AttributeError: + trace = [] reason = result.metadata.get("reason", "") async def send_flat(key: str, msg: str, why: str): @@ -138,12 +142,15 @@ async def send_flat(key: str, msg: str, why: str): # 2a) manager’s clarification or routing message await send_flat("manager", clarify, "handoff") - # 2b) immediately run specialist + # 2b) immediately run specialist if valid if target in AGENTS: spec_prompt = payload.get("prompt", prompt) spec_res = await Runner.run(AGENTS[target], input=spec_prompt, max_turns=12) spec_raw = spec_res.final_output.strip() - spec_trace = spec_res.to_debug_dict() + try: + spec_trace = spec_res.to_debug_dict() + except AttributeError: + spec_trace = [] spec_reason = spec_res.metadata.get("reason", "") spec_payload = build_payload( data["task_id"], From 696d6b0be255844f915a656bf27e1b329b708a43 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:34:58 +0900 Subject: [PATCH 128/230] Update agent_server.py --- src/agents/agent_server.py | 45 ++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 761a856f..34ddbe13 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -112,14 +112,27 @@ async def run_agent(req: Request): # 1) run the selected agent result = await Runner.run(agent, input=prompt, max_turns=12) - raw = result.final_output.strip() - # handle trace existence in older/newer SDKs + + # compute raw output and reason + if hasattr(result, "requires_user_input") and result.requires_user_input: + raw = result.requires_user_input + reason = "Agent requested clarification" + else: + raw = result.final_output.strip() + # detect structured JSON vs plain text + try: + json.loads(raw) + reason = "Agent returned structured JSON" + except json.JSONDecodeError: + reason = "Agent returned unstructured output" + + # safe trace try: trace = result.to_debug_dict() - except AttributeError: + except Exception: trace = [] - reason = result.metadata.get("reason", "") + # helper to send a flattened webhook async def send_flat(key: str, msg: str, why: str): payload = build_payload( data["task_id"], @@ -139,19 +152,33 @@ async def send_flat(key: str, msg: str, why: str): target = env["handoff_to"] payload = env.get("payload", data) - # 2a) manager’s clarification or routing message + # manager’s clarification or routing question await send_flat("manager", clarify, "handoff") - # 2b) immediately run specialist if valid + # immediately run specialist if valid if target in AGENTS: spec_prompt = payload.get("prompt", prompt) spec_res = await Runner.run(AGENTS[target], input=spec_prompt, max_turns=12) - spec_raw = spec_res.final_output.strip() + + # spec raw & reason + if hasattr(spec_res, "requires_user_input") and spec_res.requires_user_input: + spec_raw = spec_res.requires_user_input + spec_reason = "Agent requested clarification" + else: + spec_raw = spec_res.final_output.strip() + try: + json.loads(spec_raw) + spec_reason = "Agent returned structured JSON" + except json.JSONDecodeError: + spec_reason = "Agent returned unstructured output" + + # spec trace try: spec_trace = spec_res.to_debug_dict() - except AttributeError: + except Exception: spec_trace = [] - spec_reason = spec_res.metadata.get("reason", "") + + # send specialist webhook spec_payload = build_payload( data["task_id"], data["user_id"], From 94b2124790259d082fecde358e43c5c3eb2fdd46 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 12:49:18 +0900 Subject: [PATCH 129/230] Update agent_server.py --- src/agents/agent_server.py | 53 ++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 34ddbe13..882d4fe2 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -147,50 +147,41 @@ async def send_flat(key: str, msg: str, why: str): # 2) Manager path: try JSON envelope if incoming == "manager": try: - env = json.loads(raw) + # strip markdown fences if present + clean = raw.strip() + if clean.startswith("```"): + parts = clean.splitlines() + json_str = "\n".join(parts[1:-1]) + else: + json_str = clean + + env = json.loads(json_str) clarify = env.get("clarify", "") target = env["handoff_to"] payload = env.get("payload", data) - # manager’s clarification or routing question + # manager’s clarify webhook await send_flat("manager", clarify, "handoff") - # immediately run specialist if valid + # then immediately invoke the specialist if target in AGENTS: spec_prompt = payload.get("prompt", prompt) - spec_res = await Runner.run(AGENTS[target], input=spec_prompt, max_turns=12) - - # spec raw & reason - if hasattr(spec_res, "requires_user_input") and spec_res.requires_user_input: - spec_raw = spec_res.requires_user_input - spec_reason = "Agent requested clarification" - else: - spec_raw = spec_res.final_output.strip() - try: - json.loads(spec_raw) - spec_reason = "Agent returned structured JSON" - except json.JSONDecodeError: - spec_reason = "Agent returned unstructured output" - - # spec trace - try: - spec_trace = spec_res.to_debug_dict() - except Exception: - spec_trace = [] - - # send specialist webhook - spec_payload = build_payload( - data["task_id"], - data["user_id"], - target, - {"type": "text", "content": spec_raw}, - spec_reason, - spec_trace, + spec_res = await Runner.run( + AGENTS[target], + input=spec_prompt, + max_turns=12 ) + # … build spec_raw, spec_reason, spec_trace … await send_webhook(flatten_payload(spec_payload)) return {"ok": True} + except (json.JSONDecodeError, KeyError): + # fallback: pure manager clarification + await send_flat("manager", raw, reason) + return {"ok": True} + + except (json.JSONDecodeError, KeyError): # pure manager clarification await send_flat("manager", raw, reason) From bd8a9e23ef5a435d9e4fb6c27990041cd5e49937 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:17:42 +0900 Subject: [PATCH 130/230] Update agent_server.py handoff logic upd --- src/agents/agent_server.py | 210 +++++++++++++++++-------------------- 1 file changed, 96 insertions(+), 114 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 882d4fe2..7446b3b3 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,19 +1,21 @@ -# agents/agent_server.py — single webhook with full trace +# agents/agent_server.py — deterministic handoffs via SDK `handoff()` from __future__ import annotations import os import sys import json +import re import httpx from datetime import datetime from dotenv import load_dotenv from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner +from agents import Agent, Runner, handoff from agents.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL @@ -27,35 +29,67 @@ async def send_webhook(payload: dict): print("========================") # ── Specialist agents ────────────────────────────────────────────────────── -strategy = Agent("StrategyAgent", instructions="You create 7-day social strategies. Respond ONLY in structured JSON.") -content = Agent("ContentAgent", instructions="You write brand-aligned social posts. Respond ONLY in structured JSON.") -repurpose = Agent("RepurposeAgent", instructions="You repurpose content. Respond ONLY in structured JSON.") -feedback = Agent("FeedbackAgent", instructions="You critique content. Respond ONLY in structured JSON.") - -AGENTS = { - "strategy": strategy, - "content": content, - "repurpose": repurpose, - "feedback": feedback, -} +strategy = Agent( + name="strategy", + instructions="You create 7-day social strategies. Respond ONLY in structured JSON." +) +content = Agent( + name="content", + instructions="You write brand-aligned social posts. Respond ONLY in structured JSON." +) +repurpose = Agent( + name="repurpose", + instructions="You repurpose content. Respond ONLY in structured JSON." +) +feedback = Agent( + name="feedback", + instructions="You critique content. Respond ONLY in structured JSON." +) + +AGENTS = {"strategy": strategy, "content": content, + "repurpose": repurpose, "feedback": feedback} + +# ── Pydantic model for Manager handoff payload ──────────────────────────── +class HandoffData(BaseModel): + clarify: str + prompt: str # ── Manager agent ────────────────────────────────────────────────────────── MANAGER_TXT = """ -You are the Manager. Look at the user's request and either: - 1) return JSON: { - "handoff_to": "", - "clarify": "...optional follow-up question…", - "payload": { /* any override or trimmed inputs */ } - } - 2) or return plain-text if you need general clarification. +You are the Manager. ALWAYS call exactly one `transfer_to_` handoff tool, +with arguments matching the HandoffData schema: + + { + "clarify": "", + "prompt": "" + } + +Do NOT output any other text or JSON. The SDK will enforce validity. """ + +async def on_handoff(ctx, input_data: HandoffData): + # Send manager clarification webhook + payload = build_payload( + task_id=ctx.input['task_id'], + user_id=ctx.input['user_id'], + agent_type='manager', + message={'type':'text','content': input_data.clarify}, + reason='handoff', + trace=ctx.trace if hasattr(ctx, 'trace') else [] + ) + await send_webhook(flatten_payload(payload)) + manager = Agent( - "Manager", + name="manager", instructions=prompt_with_handoff_instructions(MANAGER_TXT), - handoffs=list(AGENTS.values()), + handoffs=[ + handoff(agent=strategy, on_handoff=on_handoff, input_type=HandoffData), + handoff(agent=content, on_handoff=on_handoff, input_type=HandoffData), + handoff(agent=repurpose, on_handoff=on_handoff, input_type=HandoffData), + handoff(agent=feedback, on_handoff=on_handoff, input_type=HandoffData), + ] ) -# ── All agents map ─────────────────────────────────────────────────────────── ALL_AGENTS = {"manager": manager, **AGENTS} # ── Payload builders ───────────────────────────────────────────────────────── @@ -72,8 +106,7 @@ def build_payload(task_id, user_id, agent_type, message, reason, trace): def flatten_payload(p: dict) -> dict: """ - Take one level of nested message & metadata fields - and promote to top-level keys for Bubble. + Flatten one level of nested message/metadata for Bubble. """ return { "task_id": p["task_id"], @@ -89,104 +122,53 @@ def flatten_payload(p: dict) -> dict: app = FastAPI() app.add_middleware( CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], + allow_origins=["*"], allow_credentials=True, + allow_methods=["*"], allow_headers=["*"], ) @app.post("/agent") async def run_agent(req: Request): - data = await req.json() - incoming = data.get("agent_type", "manager") - agent = ALL_AGENTS.get(incoming, manager) - - # ensure we have a prompt (accept new or legacy fields) + data = await req.json() + # normalize prompt prompt = ( - data.get("prompt") - or data.get("user_prompt") - or data.get("message") + data.get("prompt") or data.get("user_prompt") or data.get("message") ) if not prompt: raise HTTPException(422, "Missing 'prompt' field") - # 1) run the selected agent - result = await Runner.run(agent, input=prompt, max_turns=12) - - # compute raw output and reason - if hasattr(result, "requires_user_input") and result.requires_user_input: - raw = result.requires_user_input - reason = "Agent requested clarification" - else: - raw = result.final_output.strip() - # detect structured JSON vs plain text - try: - json.loads(raw) - reason = "Agent returned structured JSON" - except json.JSONDecodeError: - reason = "Agent returned unstructured output" + # mandatory IDs + task_id = data.get("task_id") + user_id = data.get("user_id") + if not task_id or not user_id: + raise HTTPException(422, "Missing 'task_id' or 'user_id'") + + # 1) Always invoke the manager (it will hand off internally) + result = await Runner.run( + manager, + input={"task_id": task_id, "user_id": user_id, "prompt": prompt}, + max_turns=12, + ) - # safe trace + # 2) Final output comes from the last agent in the chain + raw = result.final_output.strip() + # determine reason try: - trace = result.to_debug_dict() - except Exception: - trace = [] - - # helper to send a flattened webhook - async def send_flat(key: str, msg: str, why: str): - payload = build_payload( - data["task_id"], - data["user_id"], - key, - {"type": "text", "content": msg}, - why, - trace, - ) - await send_webhook(flatten_payload(payload)) - - # 2) Manager path: try JSON envelope - if incoming == "manager": - try: - # strip markdown fences if present - clean = raw.strip() - if clean.startswith("```"): - parts = clean.splitlines() - json_str = "\n".join(parts[1:-1]) - else: - json_str = clean - - env = json.loads(json_str) - clarify = env.get("clarify", "") - target = env["handoff_to"] - payload = env.get("payload", data) - - # manager’s clarify webhook - await send_flat("manager", clarify, "handoff") - - # then immediately invoke the specialist - if target in AGENTS: - spec_prompt = payload.get("prompt", prompt) - spec_res = await Runner.run( - AGENTS[target], - input=spec_prompt, - max_turns=12 - ) - # … build spec_raw, spec_reason, spec_trace … - await send_webhook(flatten_payload(spec_payload)) - - return {"ok": True} - - except (json.JSONDecodeError, KeyError): - # fallback: pure manager clarification - await send_flat("manager", raw, reason) - return {"ok": True} - - - except (json.JSONDecodeError, KeyError): - # pure manager clarification - await send_flat("manager", raw, reason) - return {"ok": True} - - # 3) Specialist path: single webhook - await send_flat(incoming, raw, reason) + json.loads(raw) + reason = "Agent returned structured JSON" + except json.JSONDecodeError: + reason = "Agent returned unstructured output" + # safe trace + trace = getattr(result, 'to_debug_dict', lambda: [])() + + # 3) Send the final specialist webhook + out_payload = build_payload( + task_id=task_id, + user_id=user_id, + agent_type=result.agent_name, + message={"type":"text","content": raw}, + reason=reason, + trace=trace + ) + await send_webhook(flatten_payload(out_payload)) + return {"ok": True} From 68361c561bbdc88e1367439fe8b87f91793f36f8 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:24:36 +0900 Subject: [PATCH 131/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 7446b3b3..0d2c0929 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -145,7 +145,7 @@ async def run_agent(req: Request): # 1) Always invoke the manager (it will hand off internally) result = await Runner.run( manager, - input={"task_id": task_id, "user_id": user_id, "prompt": prompt}, + input=prompt, max_turns=12, ) From 8e1567e61e7425d97a1356cad7a07c5742c4a3bb Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:31:11 +0900 Subject: [PATCH 132/230] Update agent_server.py --- src/agents/agent_server.py | 44 ++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 0d2c0929..ca83589e 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -4,7 +4,6 @@ import os import sys import json -import re import httpx from datetime import datetime from dotenv import load_dotenv @@ -15,7 +14,7 @@ # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner, handoff +from agents import Agent, Runner, handoff, RunContextWrapper from agents.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL @@ -56,26 +55,32 @@ class HandoffData(BaseModel): # ── Manager agent ────────────────────────────────────────────────────────── MANAGER_TXT = """ -You are the Manager. ALWAYS call exactly one `transfer_to_` handoff tool, -with arguments matching the HandoffData schema: - - { - "clarify": "", - "prompt": "" - } - -Do NOT output any other text or JSON. The SDK will enforce validity. +You are the Manager. When routing, you MUST call exactly one of these tools: + • transfer_to_strategy + • transfer_to_content + • transfer_to_repurpose + • transfer_to_feedback + +Each call must pass a JSON object matching this schema (HandoffData): +{ + "clarify": "", + "prompt": "" +} + +Do NOT output any other JSON or wrap in Markdown. The SDK will handle the rest. """ -async def on_handoff(ctx, input_data: HandoffData): +async def on_handoff(ctx: RunContextWrapper[HandoffData], input_data: HandoffData): # Send manager clarification webhook + task_id = ctx.context['task_id'] + user_id = ctx.context['user_id'] payload = build_payload( - task_id=ctx.input['task_id'], - user_id=ctx.input['user_id'], + task_id=task_id, + user_id=user_id, agent_type='manager', message={'type':'text','content': input_data.clarify}, reason='handoff', - trace=ctx.trace if hasattr(ctx, 'trace') else [] + trace=ctx.usage.to_debug_dict() if hasattr(ctx.usage, 'to_debug_dict') else [] ) await send_webhook(flatten_payload(payload)) @@ -142,23 +147,24 @@ async def run_agent(req: Request): if not task_id or not user_id: raise HTTPException(422, "Missing 'task_id' or 'user_id'") - # 1) Always invoke the manager (it will hand off internally) + # 1) Always invoke the manager; pass context for on_handoff result = await Runner.run( manager, input=prompt, + context={"task_id": task_id, "user_id": user_id}, max_turns=12, ) # 2) Final output comes from the last agent in the chain raw = result.final_output.strip() - # determine reason try: json.loads(raw) reason = "Agent returned structured JSON" except json.JSONDecodeError: reason = "Agent returned unstructured output" - # safe trace - trace = getattr(result, 'to_debug_dict', lambda: [])() + trace = [] + if hasattr(result, 'to_debug_dict'): + trace = result.to_debug_dict() # 3) Send the final specialist webhook out_payload = build_payload( From e4136bda467539bab16429b5738812ee766fa697 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:40:07 +0900 Subject: [PATCH 133/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index ca83589e..48f54045 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -170,7 +170,7 @@ async def run_agent(req: Request): out_payload = build_payload( task_id=task_id, user_id=user_id, - agent_type=result.agent_name, + agent_type=(result.agent.name if hasattr(result, 'agent') else 'unknown'), message={"type":"text","content": raw}, reason=reason, trace=trace From 62e61f12663100947ad19a5d66586f03890004ee Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:48:21 +0900 Subject: [PATCH 134/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 48f54045..c37f5054 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -170,7 +170,7 @@ async def run_agent(req: Request): out_payload = build_payload( task_id=task_id, user_id=user_id, - agent_type=(result.agent.name if hasattr(result, 'agent') else 'unknown'), + agent_type=(result.agent.name if hasattr(result, 'agent') and result.agent else 'manager'), 'agent') else 'unknown'), message={"type":"text","content": raw}, reason=reason, trace=trace From 6f5b5257cff52af3a29be6d99d08bb318e3d3bb6 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:50:23 +0900 Subject: [PATCH 135/230] Update agent_server.py --- src/agents/agent_server.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index c37f5054..2bc606d1 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -170,7 +170,11 @@ async def run_agent(req: Request): out_payload = build_payload( task_id=task_id, user_id=user_id, - agent_type=(result.agent.name if hasattr(result, 'agent') and result.agent else 'manager'), 'agent') else 'unknown'), + agent_type = ( + result.agent.name + if hasattr(result, "agent") and result.agent + else "manager" + ) message={"type":"text","content": raw}, reason=reason, trace=trace From 4f4f378cd3b2f721993a33c5df12be8dfdbdf34e Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:54:06 +0900 Subject: [PATCH 136/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 2bc606d1..6164bd88 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -174,7 +174,7 @@ async def run_agent(req: Request): result.agent.name if hasattr(result, "agent") and result.agent else "manager" - ) + ), message={"type":"text","content": raw}, reason=reason, trace=trace From bc3ca7821186a4e1cdb510a05b8209e40145c0b3 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 13:59:54 +0900 Subject: [PATCH 137/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 6164bd88..87c37db4 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -77,7 +77,7 @@ async def on_handoff(ctx: RunContextWrapper[HandoffData], input_data: HandoffDat payload = build_payload( task_id=task_id, user_id=user_id, - agent_type='manager', + agent_type=(result.agent.name if hasattr(result, 'agent') and result.agent else 'manager'), message={'type':'text','content': input_data.clarify}, reason='handoff', trace=ctx.usage.to_debug_dict() if hasattr(ctx.usage, 'to_debug_dict') else [] From bc3e7b9633e8f57e13e6885a9a06d88caea59ed9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 14:05:03 +0900 Subject: [PATCH 138/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 87c37db4..cf9e8558 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -77,7 +77,7 @@ async def on_handoff(ctx: RunContextWrapper[HandoffData], input_data: HandoffDat payload = build_payload( task_id=task_id, user_id=user_id, - agent_type=(result.agent.name if hasattr(result, 'agent') and result.agent else 'manager'), + agent_type="manager", message={'type':'text','content': input_data.clarify}, reason='handoff', trace=ctx.usage.to_debug_dict() if hasattr(ctx.usage, 'to_debug_dict') else [] From c4816162c1599d9293f2d1e03391c4161798b97f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 14:11:51 +0900 Subject: [PATCH 139/230] Update agent_server.py --- src/agents/agent_server.py | 48 ++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index cf9e8558..9536e587 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -1,4 +1,4 @@ -# agents/agent_server.py — deterministic handoffs via SDK `handoff()` +# agents/agent_server.py — deterministic handoffs via SDK `handoff()` with robust error handling from __future__ import annotations import os @@ -147,34 +147,46 @@ async def run_agent(req: Request): if not task_id or not user_id: raise HTTPException(422, "Missing 'task_id' or 'user_id'") - # 1) Always invoke the manager; pass context for on_handoff - result = await Runner.run( - manager, - input=prompt, - context={"task_id": task_id, "user_id": user_id}, - max_turns=12, - ) + # 1) Run Manager with error catch for handoff parsing issues + try: + result = await Runner.run( + manager, + input=prompt, + context={"task_id": task_id, "user_id": user_id}, + max_turns=12, + ) + except json.JSONDecodeError as e: + # Handoff JSON malformed: send fallback clarification + fallback = build_payload( + task_id=task_id, + user_id=user_id, + agent_type="manager", + message={"type":"text","content": + "Sorry, I couldn’t process your request—could you rephrase?"}, + reason="handoff_parse_error", + trace=[] + ) + await send_webhook(flatten_payload(fallback)) + return {"ok": True} # 2) Final output comes from the last agent in the chain - raw = result.final_output.strip() + raw = result.final_output.strip() + print(f"Raw LLM output: {raw}") try: json.loads(raw) reason = "Agent returned structured JSON" - except json.JSONDecodeError: + except Exception: reason = "Agent returned unstructured output" - trace = [] - if hasattr(result, 'to_debug_dict'): - trace = result.to_debug_dict() + trace = result.to_debug_dict() if hasattr(result, 'to_debug_dict') else [] # 3) Send the final specialist webhook + final_type = (result.agent.name + if hasattr(result, "agent") and result.agent + else "manager") out_payload = build_payload( task_id=task_id, user_id=user_id, - agent_type = ( - result.agent.name - if hasattr(result, "agent") and result.agent - else "manager" - ), + agent_type=final_type, message={"type":"text","content": raw}, reason=reason, trace=trace From f2c01288d3dcef82e8471d38d5897461875a54c6 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sun, 27 Apr 2025 23:46:42 +0900 Subject: [PATCH 140/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 214 +++++++++++++++++------------ 1 file changed, 127 insertions(+), 87 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index 8bb6b28c..fdfd97c3 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -1,100 +1,140 @@ -import sys -import os -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) +""" +Conversational Profile Builder router +• Asks tailored questions until all required keys are filled +• Emits structured_profile JSON +""" -from fastapi import APIRouter, Request -from agents import Agent, Runner -from agents.tool import WebSearchTool +from __future__ import annotations +import os, sys, json, httpx from datetime import datetime -import json -import httpx +from dotenv import load_dotenv +from fastapi import APIRouter, Request, HTTPException +from fastapi.middleware.cors import CORSMiddleware # for local tests +# ── SDK -------------------------------------------------------------------- +load_dotenv() +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) +from agents import Agent, Runner + +# ── router ----------------------------------------------------------------- router = APIRouter() -# Predefined Bubble webhook URL -WEBHOOK_URL = "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profilebuilder_return" +# ── webhooks --------------------------------------------------------------- +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # questions / clarifications +STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # final profile JSON -# ProfileBuilder agent skeleton; tools will be set per-request for dynamic locale/fallback -profile_builder_agent = Agent( - name="ProfileBuilderAgent", - instructions=""" -You are a profile builder assistant with web search capability. - -You will receive a set of key-value inputs (e.g., profile_uuid, handle URL, etc.). -Your job: -1. Use the provided fields (including fallback follower count if given). -2. If a locale is provided, use it to tailor the web search tool's user_location. -3. Perform web searches and reasoning to determine follower_count, posting_style, industry, engagement_rate, and any notable public context. -4. Summarize this into JSON as follows: -{ +# ── helper ----------------------------------------------------------------- +def _now() -> str: + from datetime import datetime + return datetime.utcnow().isoformat() + +async def _dispatch(url: str, payload: dict): + async with httpx.AsyncClient() as c: + print("=== PB Webhook ===\n", json.dumps(payload, indent=2)) + await c.post(url, json=payload) + print("==================") + +def clarify(task, user, text, reason="Agent requested clarification"): + return { + "task_id": task, + "user_id": user, + "agent_type": "profilebuilder", + "message": {"type": "text", "content": text}, + "metadata": {"reason": reason}, + "created_at": _now(), + } + +def structured(task, user, obj): + return { + "task_id": task, + "user_id": user, + "agent_type": "profilebuilder", + "message": obj, + "created_at": _now(), + } + +# ── ProfileBuilderAgent ---------------------------------------------------- +REQUIRED_KEYS = [ + "primary_SNSchannel", "profile_type", "core_topic", "sub_angle", + "primary_objective", "content_strength", "time_budget_weekly", + "inspiration_accounts", "provided_follower_count", "locale", + "motivation_note" +] + +PB_PROMPT = f""" +You are ProfileBuilderAgent. + +Goal: collect each of these keys once: {", ".join(REQUIRED_KEYS)}. + +Rules: +1. Ask ONE question at a time, tailored to previous answers. +2. After each user reply, decide which required key is still missing and + ask the next most relevant question. +3. Reflect back occasionally so the user feels understood. +4. When ALL keys are collected, respond ONLY with: + +{{ "output_type": "structured_profile", "contains_image": false, - "details": { - "profile_uuid": "...", - "summary": "Concise profile summary...", - "prompt_snippet": { "tone": "...", "goal": "...", "platform": "..." }, - "follower_count": 12345, - "posting_style": "...", - "industry": "...", - "engagement_rate": "...", - "additional_context": "..." - } -} -Only return JSON with exactly these fields—no markdown or commentary. -""", - tools=[] + "details": {{ ...all keys filled ... }} +}} + +5. If you still need information, respond ONLY with: +{{ "requires_user_input": "your next question" }} + +Do NOT wrap responses in markdown fences. +""" + +profile_builder_agent = Agent( + name="ProfileBuilderAgent", + instructions=PB_PROMPT, + tools=[], ) +# ── API endpoint ----------------------------------------------------------- @router.post("/profilebuilder") -async def build_profile(request: Request): - data = await request.json() - # Extract core identifiers and optional fallbacks - profile_uuid = data.pop("profile_uuid", None) - provided_fc = data.pop("provided_follower_count", None) - locale_text = data.pop("locale", None) - - # Build tool list dynamically based on locale - user_loc = {"type": "approximate", "region": locale_text} if locale_text else None - tools = [WebSearchTool(user_location=user_loc, search_context_size="low")] - profile_builder_agent.tools = tools - - # Flatten remaining inputs into prompt lines - prompt_lines = [] - for key, val in data.items(): - if val not in (None, "", "null"): - prompt_lines.append(f"{key}: {val}") - if provided_fc is not None: - prompt_lines.append(f"Provided follower count: {provided_fc}") - - # Construct the agent prompt - agent_input = f"Profile UUID: {profile_uuid}\n" + "\n".join(prompt_lines) - - # Invoke the agent - result = await Runner.run(profile_builder_agent, input=agent_input) - - # Clean markdown fences - output = result.final_output.strip() - if output.startswith("```") and output.endswith("```"): - output = output.split("\n", 1)[-1].rsplit("\n", 1)[0] - - # Parse agent JSON response +async def profile_builder_endpoint(req: Request): + """ + Expects: + { + "action": "new_task" | "new_message", + "task_id": "...", + "user_id": "...", + "user_prompt": "...", # for new_task + "message": "...", # for new_message + "agent_session_id": "profilebuilder" # for new_message + } + """ + data = await req.json() + action = data.get("action") + if action not in ("new_task", "new_message"): + raise HTTPException(400, "Unknown action") + + task_id, user_id = data["task_id"], data["user_id"] + user_text = data.get("user_prompt") or data.get("message") + if not user_text: + raise HTTPException(422, "Missing user_prompt or message") + + # run the agent + result = await Runner.run(profile_builder_agent, input=user_text, max_turns=1) + + # clarification? + if getattr(result, "requires_user_input", None): + await _dispatch(CHAT_URL, + clarify(task_id, user_id, result.requires_user_input)) + return {"ok": True} + + # final structured? try: - parsed = json.loads(output) - details = parsed.get("details", {}) + parsed = json.loads(result.final_output.strip()) + if parsed.get("output_type") == "structured_profile": + await _dispatch(STRUCT_URL, structured(task_id, user_id, parsed)) + return {"ok": True} except Exception: - details = {} - - # Build profile_data payload dynamically - profile_data = {"profile_uuid": profile_uuid} - for k, v in details.items(): - profile_data[k] = v - profile_data["created_at"] = datetime.utcnow().isoformat() - - # Post to Bubble webhook - async with httpx.AsyncClient() as client: - try: - await client.post(WEBHOOK_URL, json=profile_data) - except Exception as e: - profile_data["webhook_error"] = str(e) - - return profile_data + pass + + # fallback: return raw text as chat + await _dispatch(CHAT_URL, + clarify(task_id, user_id, result.final_output.strip(), + reason="Agent returned unstructured output")) + return {"ok": True"} From 17ddada08b50d2d5042cbce7a50b97c17ce69fcd Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 11:58:38 +0900 Subject: [PATCH 141/230] Update webhook.py --- src/agents/util/webhook.py | 69 +++++++++++++++++++++++++++++--------- 1 file changed, 53 insertions(+), 16 deletions(-) diff --git a/src/agents/util/webhook.py b/src/agents/util/webhook.py index 825800d5..4b12244b 100644 --- a/src/agents/util/webhook.py +++ b/src/agents/util/webhook.py @@ -1,16 +1,53 @@ -import os, asyncio, logging, httpx - -STRUCTURED_URL = os.getenv("BUBBLE_STRUCTURED_URL") -CLARIFICATION_URL = os.getenv("BUBBLE_CHAT_URL") - -async def post_webhook(url: str, data: dict, retries: int = 3): - for i in range(retries): - try: - async with httpx.AsyncClient(timeout=10) as client: - r = await client.post(url, json=data) - r.raise_for_status() - return - except Exception as e: - if i == retries - 1: - logging.error("Webhook failed %s: %s", url, e) - await asyncio.sleep(2 ** i) +"""utils/webhook.py +A single, reusable helper for posting JSON payloads to Bubble‑workflow URLs. + +Usage in your FastAPI code: + + from utils.webhook import send_webhook + + url = TASK_URL_MAP[task_type] # looked up from env‑vars + await send_webhook(url, flattened_payload) + +You keep *all* Bubble‑specific routing logic (task_type → URL) in your +FastAPI service, while this helper focuses solely on safe, idempotent +HTTP posting and basic allow‑list protection. +""" +from __future__ import annotations + +import os +import json +import httpx +from typing import Any, Mapping + +# ----------------------------------------------------------------------------- +# Configuration +# ----------------------------------------------------------------------------- +# Only allow POSTs to URLs that start with this root (prevents exfiltration +# if someone accidentally passes a malicious URL in the incoming payload). +ALLOWED_ROOT = os.getenv("BUBBLE_DOMAIN_ROOT", "https://rgtnow.com") + +# Optional default timeout (seconds) for outbound webhook calls. +HTTP_TIMEOUT = float(os.getenv("WEBHOOK_TIMEOUT", "10")) + +# ----------------------------------------------------------------------------- +# Public helper +# ----------------------------------------------------------------------------- +async def send_webhook(target_url: str, payload: Mapping[str, Any]) -> None: + """POST *payload* as JSON to *target_url*. + + Raises: + ValueError: if *target_url* is outside the allowed Bubble domain root. + httpx.HTTPStatusError: if Bubble responds with an error status code. + """ + if not target_url.startswith(ALLOWED_ROOT): + raise ValueError( + f"Refusing to POST to {target_url!r} — must begin with {ALLOWED_ROOT!r}" + ) + + async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client: + print("=== Webhook Dispatch →", target_url, "===\n", + json.dumps(payload, indent=2, default=str)) + resp = await client.post(target_url, json=payload) + resp.raise_for_status() # bubble up 4xx/5xx to caller for logging + # We ignore / return the response body so the caller may log it if needed. + return None From a27f35f43c4c7f390fddd3d7720b6dc43b6ef787 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 14:30:14 +0900 Subject: [PATCH 142/230] Update agent_profilebuilder.py --- src/agents/agent_profilebuilder.py | 169 ++++++++--------------------- 1 file changed, 45 insertions(+), 124 deletions(-) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/agent_profilebuilder.py index fdfd97c3..475a894e 100644 --- a/src/agents/agent_profilebuilder.py +++ b/src/agents/agent_profilebuilder.py @@ -1,140 +1,61 @@ -""" -Conversational Profile Builder router -• Asks tailored questions until all required keys are filled -• Emits structured_profile JSON -""" +# src/agents/profilebuilder.py -from __future__ import annotations -import os, sys, json, httpx -from datetime import datetime -from dotenv import load_dotenv from fastapi import APIRouter, Request, HTTPException -from fastapi.middleware.cors import CORSMiddleware # for local tests - -# ── SDK -------------------------------------------------------------------- -load_dotenv() -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner +from agents.utils.webhook import send_webhook +import os +import json +from datetime import datetime -# ── router ----------------------------------------------------------------- router = APIRouter() -# ── webhooks --------------------------------------------------------------- -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # questions / clarifications -STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # final profile JSON - -# ── helper ----------------------------------------------------------------- -def _now() -> str: - from datetime import datetime - return datetime.utcnow().isoformat() - -async def _dispatch(url: str, payload: dict): - async with httpx.AsyncClient() as c: - print("=== PB Webhook ===\n", json.dumps(payload, indent=2)) - await c.post(url, json=payload) - print("==================") - -def clarify(task, user, text, reason="Agent requested clarification"): - return { - "task_id": task, - "user_id": user, - "agent_type": "profilebuilder", - "message": {"type": "text", "content": text}, - "metadata": {"reason": reason}, - "created_at": _now(), - } - -def structured(task, user, obj): - return { - "task_id": task, - "user_id": user, - "agent_type": "profilebuilder", - "message": obj, - "created_at": _now(), - } - -# ── ProfileBuilderAgent ---------------------------------------------------- -REQUIRED_KEYS = [ - "primary_SNSchannel", "profile_type", "core_topic", "sub_angle", - "primary_objective", "content_strength", "time_budget_weekly", - "inspiration_accounts", "provided_follower_count", "locale", - "motivation_note" -] - -PB_PROMPT = f""" -You are ProfileBuilderAgent. - -Goal: collect each of these keys once: {", ".join(REQUIRED_KEYS)}. - -Rules: -1. Ask ONE question at a time, tailored to previous answers. -2. After each user reply, decide which required key is still missing and - ask the next most relevant question. -3. Reflect back occasionally so the user feels understood. -4. When ALL keys are collected, respond ONLY with: - -{{ - "output_type": "structured_profile", - "contains_image": false, - "details": {{ ...all keys filled ... }} -}} +# ENV var — Bubble webhook URL for profile save notifications +PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") -5. If you still need information, respond ONLY with: -{{ "requires_user_input": "your next question" }} +# (Optional) timeout for slow webhook sendings +WEBHOOK_TIMEOUT_SECONDS = float(os.getenv("WEBHOOK_TIMEOUT", "10")) -Do NOT wrap responses in markdown fences. -""" -profile_builder_agent = Agent( - name="ProfileBuilderAgent", - instructions=PB_PROMPT, - tools=[], -) - -# ── API endpoint ----------------------------------------------------------- @router.post("/profilebuilder") -async def profile_builder_endpoint(req: Request): +async def profilebuilder_handler(req: Request): """ - Expects: - { - "action": "new_task" | "new_message", - "task_id": "...", - "user_id": "...", - "user_prompt": "...", # for new_task - "message": "...", # for new_message - "agent_session_id": "profilebuilder" # for new_message - } + Handle incoming POST requests to build or update a user profile. + Expects fields: task_id, user_id, profile (dict) """ data = await req.json() - action = data.get("action") - if action not in ("new_task", "new_message"): - raise HTTPException(400, "Unknown action") - task_id, user_id = data["task_id"], data["user_id"] - user_text = data.get("user_prompt") or data.get("message") - if not user_text: - raise HTTPException(422, "Missing user_prompt or message") - - # run the agent - result = await Runner.run(profile_builder_agent, input=user_text, max_turns=1) + # Basic field checks + task_id = data.get("task_id") + user_id = data.get("user_id") + profile = data.get("profile") + + if not task_id or not user_id: + raise HTTPException(422, "Missing required field: task_id or user_id") + + if not profile: + raise HTTPException(422, "Missing required field: profile object") + + # [TODO]: Save the profile blob to Bubble Data API if needed + # await upsert_profile(user_id, profile) + + # Build outgoing webhook payload + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", # custom agent_type you define + "message": { + "type": "text", + "content": "Profile saved successfully." + }, + "metadata": { + "reason": "profile_saved" + }, + "created_at": datetime.utcnow().isoformat(), + } - # clarification? - if getattr(result, "requires_user_input", None): - await _dispatch(CHAT_URL, - clarify(task_id, user_id, result.requires_user_input)) - return {"ok": True} + # Fire webhook to Bubble + if not PROFILE_WEBHOOK_URL: + raise RuntimeError("Missing PROFILE_WEBHOOK_URL environment variable") - # final structured? - try: - parsed = json.loads(result.final_output.strip()) - if parsed.get("output_type") == "structured_profile": - await _dispatch(STRUCT_URL, structured(task_id, user_id, parsed)) - return {"ok": True} - except Exception: - pass + await send_webhook(PROFILE_WEBHOOK_URL, payload) - # fallback: return raw text as chat - await _dispatch(CHAT_URL, - clarify(task_id, user_id, result.final_output.strip(), - reason="Agent returned unstructured output")) - return {"ok": True"} + return {"ok": True, "message": "Profile processed and webhook sent."} From 1a27f21478fc93b4908cc1c57401d2a0abcdbe7b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 14:33:16 +0900 Subject: [PATCH 143/230] Update agent_server.py --- src/agents/agent_server.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 9536e587..0a501d4a 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,6 +10,7 @@ from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel +from agents.profilebuilder import router as profilebuilder_router # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() @@ -130,6 +131,8 @@ def flatten_payload(p: dict) -> dict: allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) +# ── Mount your profilebuilder routes ─────────────────────────────────────── +app.include_router(profilebuilder_router) @app.post("/agent") async def run_agent(req: Request): From f7b456fe9809c2be15efbcb2306f6883ae970ccf Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 05:45:34 +0000 Subject: [PATCH 144/230] rename profilebuilder.py --- src/agents/{agent_profilebuilder.py => profilebuilder.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/agents/{agent_profilebuilder.py => profilebuilder.py} (100%) diff --git a/src/agents/agent_profilebuilder.py b/src/agents/profilebuilder.py similarity index 100% rename from src/agents/agent_profilebuilder.py rename to src/agents/profilebuilder.py From a68f2947fc14f0a391f992a101b05705b6652e60 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 14:49:40 +0900 Subject: [PATCH 145/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 475a894e..11a4f910 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder.py from fastapi import APIRouter, Request, HTTPException -from agents.utils.webhook import send_webhook +from agents.util.webhook import send_webhook import os import json from datetime import datetime From 4ccbb6ccec73961f27ffd16192e37498623882b0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 15:16:32 +0900 Subject: [PATCH 146/230] Update webhook.py --- src/agents/util/webhook.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/agents/util/webhook.py b/src/agents/util/webhook.py index 4b12244b..72e36954 100644 --- a/src/agents/util/webhook.py +++ b/src/agents/util/webhook.py @@ -3,7 +3,7 @@ Usage in your FastAPI code: - from utils.webhook import send_webhook + from agents.utils.webhook import send_webhook url = TASK_URL_MAP[task_type] # looked up from env‑vars await send_webhook(url, flattened_payload) @@ -22,9 +22,8 @@ # ----------------------------------------------------------------------------- # Configuration # ----------------------------------------------------------------------------- -# Only allow POSTs to URLs that start with this root (prevents exfiltration -# if someone accidentally passes a malicious URL in the incoming payload). -ALLOWED_ROOT = os.getenv("BUBBLE_DOMAIN_ROOT", "https://rgtnow.com") +# Only allow POSTs to URLs that start with one of these roots (prevents exfiltration) +ALLOWED_ROOTS = os.getenv("BUBBLE_DOMAIN_ROOTS", "https://rgtnow.com").split(",") # Optional default timeout (seconds) for outbound webhook calls. HTTP_TIMEOUT = float(os.getenv("WEBHOOK_TIMEOUT", "10")) @@ -36,18 +35,17 @@ async def send_webhook(target_url: str, payload: Mapping[str, Any]) -> None: """POST *payload* as JSON to *target_url*. Raises: - ValueError: if *target_url* is outside the allowed Bubble domain root. + ValueError: if *target_url* is outside the allowed Bubble domain roots. httpx.HTTPStatusError: if Bubble responds with an error status code. """ - if not target_url.startswith(ALLOWED_ROOT): + if not any(target_url.startswith(root.strip()) for root in ALLOWED_ROOTS): raise ValueError( - f"Refusing to POST to {target_url!r} — must begin with {ALLOWED_ROOT!r}" + f"Refusing to POST to {target_url!r} — must begin with one of {ALLOWED_ROOTS!r}" ) async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client: print("=== Webhook Dispatch →", target_url, "===\n", json.dumps(payload, indent=2, default=str)) resp = await client.post(target_url, json=payload) - resp.raise_for_status() # bubble up 4xx/5xx to caller for logging - # We ignore / return the response body so the caller may log it if needed. + resp.raise_for_status() return None From 032c0a86d06b7a0b59968cc9916feab097a7220a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 15:45:42 +0900 Subject: [PATCH 147/230] Create profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 src/agents/profilebuilder_agent.py diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py new file mode 100644 index 00000000..9dc0dd79 --- /dev/null +++ b/src/agents/profilebuilder_agent.py @@ -0,0 +1,35 @@ +# src/agents/profilebuilder_agent.py + +from agents import Agent + +profilebuilder_agent = Agent( + name="profilebuilder", + instructions=""" +You are the ProfileBuilder Agent. + +Your job is to guide the user through building a creator profile. +You must collect the following fields: + +- Niche or main topic +- Target audience +- Tone (e.g., friendly, professional) +- Platform focus (Instagram, TikTok, YouTube, etc.) +- Personal goals (what they want to achieve) + +Rules: +- If the user does not provide enough detail, ask friendly, simple follow-up questions. +- Keep your language encouraging and easy to understand. +- After you gather enough information, respond with a FINAL structured JSON like: + +{ + "niche": "Fitness and Wellness", + "target_audience": "Young professionals", + "tone": "Energetic and supportive", + "platforms": ["Instagram", "TikTok"], + "goals": ["Grow to 100K followers", "Launch an online course"] +} + +ONLY output final JSON when you are confident all fields are collected. +Otherwise, continue the conversation by asking for missing details. +""" +) From 081d9f8f1985327312e07ba4f5f210c43bba6669 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 15:46:24 +0900 Subject: [PATCH 148/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 11a4f910..ece60c0d 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -1,6 +1,7 @@ # src/agents/profilebuilder.py from fastapi import APIRouter, Request, HTTPException +from agents.profilebuilder_agent import profilebuilder_agent from agents.util.webhook import send_webhook import os import json From 3edd044de57f623947f2eabb5229eaef2875683a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 20:13:06 +0900 Subject: [PATCH 149/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 159 ++++++++++++++++++++++++----- 1 file changed, 132 insertions(+), 27 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 9dc0dd79..fdfd97c3 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,35 +1,140 @@ -# src/agents/profilebuilder_agent.py +""" +Conversational Profile Builder router +• Asks tailored questions until all required keys are filled +• Emits structured_profile JSON +""" + +from __future__ import annotations +import os, sys, json, httpx +from datetime import datetime +from dotenv import load_dotenv +from fastapi import APIRouter, Request, HTTPException +from fastapi.middleware.cors import CORSMiddleware # for local tests + +# ── SDK -------------------------------------------------------------------- +load_dotenv() +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) +from agents import Agent, Runner + +# ── router ----------------------------------------------------------------- +router = APIRouter() -from agents import Agent +# ── webhooks --------------------------------------------------------------- +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # questions / clarifications +STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # final profile JSON -profilebuilder_agent = Agent( - name="profilebuilder", - instructions=""" -You are the ProfileBuilder Agent. +# ── helper ----------------------------------------------------------------- +def _now() -> str: + from datetime import datetime + return datetime.utcnow().isoformat() -Your job is to guide the user through building a creator profile. -You must collect the following fields: +async def _dispatch(url: str, payload: dict): + async with httpx.AsyncClient() as c: + print("=== PB Webhook ===\n", json.dumps(payload, indent=2)) + await c.post(url, json=payload) + print("==================") -- Niche or main topic -- Target audience -- Tone (e.g., friendly, professional) -- Platform focus (Instagram, TikTok, YouTube, etc.) -- Personal goals (what they want to achieve) +def clarify(task, user, text, reason="Agent requested clarification"): + return { + "task_id": task, + "user_id": user, + "agent_type": "profilebuilder", + "message": {"type": "text", "content": text}, + "metadata": {"reason": reason}, + "created_at": _now(), + } + +def structured(task, user, obj): + return { + "task_id": task, + "user_id": user, + "agent_type": "profilebuilder", + "message": obj, + "created_at": _now(), + } + +# ── ProfileBuilderAgent ---------------------------------------------------- +REQUIRED_KEYS = [ + "primary_SNSchannel", "profile_type", "core_topic", "sub_angle", + "primary_objective", "content_strength", "time_budget_weekly", + "inspiration_accounts", "provided_follower_count", "locale", + "motivation_note" +] + +PB_PROMPT = f""" +You are ProfileBuilderAgent. + +Goal: collect each of these keys once: {", ".join(REQUIRED_KEYS)}. Rules: -- If the user does not provide enough detail, ask friendly, simple follow-up questions. -- Keep your language encouraging and easy to understand. -- After you gather enough information, respond with a FINAL structured JSON like: - -{ - "niche": "Fitness and Wellness", - "target_audience": "Young professionals", - "tone": "Energetic and supportive", - "platforms": ["Instagram", "TikTok"], - "goals": ["Grow to 100K followers", "Launch an online course"] -} - -ONLY output final JSON when you are confident all fields are collected. -Otherwise, continue the conversation by asking for missing details. +1. Ask ONE question at a time, tailored to previous answers. +2. After each user reply, decide which required key is still missing and + ask the next most relevant question. +3. Reflect back occasionally so the user feels understood. +4. When ALL keys are collected, respond ONLY with: + +{{ + "output_type": "structured_profile", + "contains_image": false, + "details": {{ ...all keys filled ... }} +}} + +5. If you still need information, respond ONLY with: +{{ "requires_user_input": "your next question" }} + +Do NOT wrap responses in markdown fences. """ + +profile_builder_agent = Agent( + name="ProfileBuilderAgent", + instructions=PB_PROMPT, + tools=[], ) + +# ── API endpoint ----------------------------------------------------------- +@router.post("/profilebuilder") +async def profile_builder_endpoint(req: Request): + """ + Expects: + { + "action": "new_task" | "new_message", + "task_id": "...", + "user_id": "...", + "user_prompt": "...", # for new_task + "message": "...", # for new_message + "agent_session_id": "profilebuilder" # for new_message + } + """ + data = await req.json() + action = data.get("action") + if action not in ("new_task", "new_message"): + raise HTTPException(400, "Unknown action") + + task_id, user_id = data["task_id"], data["user_id"] + user_text = data.get("user_prompt") or data.get("message") + if not user_text: + raise HTTPException(422, "Missing user_prompt or message") + + # run the agent + result = await Runner.run(profile_builder_agent, input=user_text, max_turns=1) + + # clarification? + if getattr(result, "requires_user_input", None): + await _dispatch(CHAT_URL, + clarify(task_id, user_id, result.requires_user_input)) + return {"ok": True} + + # final structured? + try: + parsed = json.loads(result.final_output.strip()) + if parsed.get("output_type") == "structured_profile": + await _dispatch(STRUCT_URL, structured(task_id, user_id, parsed)) + return {"ok": True} + except Exception: + pass + + # fallback: return raw text as chat + await _dispatch(CHAT_URL, + clarify(task_id, user_id, result.final_output.strip(), + reason="Agent returned unstructured output")) + return {"ok": True"} From 7aad7fc04f261c24c3569cae2e1cd96aee3f5cfe Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 20:17:24 +0900 Subject: [PATCH 150/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 158 ++++------------------------- 1 file changed, 22 insertions(+), 136 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index fdfd97c3..d7655d46 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,140 +1,26 @@ -""" -Conversational Profile Builder router -• Asks tailored questions until all required keys are filled -• Emits structured_profile JSON -""" - -from __future__ import annotations -import os, sys, json, httpx -from datetime import datetime -from dotenv import load_dotenv -from fastapi import APIRouter, Request, HTTPException -from fastapi.middleware.cors import CORSMiddleware # for local tests - -# ── SDK -------------------------------------------------------------------- -load_dotenv() -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner - -# ── router ----------------------------------------------------------------- -router = APIRouter() - -# ── webhooks --------------------------------------------------------------- -CHAT_URL = os.getenv("BUBBLE_CHAT_URL") # questions / clarifications -STRUCT_URL = os.getenv("BUBBLE_STRUCTURED_URL") # final profile JSON - -# ── helper ----------------------------------------------------------------- -def _now() -> str: - from datetime import datetime - return datetime.utcnow().isoformat() - -async def _dispatch(url: str, payload: dict): - async with httpx.AsyncClient() as c: - print("=== PB Webhook ===\n", json.dumps(payload, indent=2)) - await c.post(url, json=payload) - print("==================") - -def clarify(task, user, text, reason="Agent requested clarification"): - return { - "task_id": task, - "user_id": user, - "agent_type": "profilebuilder", - "message": {"type": "text", "content": text}, - "metadata": {"reason": reason}, - "created_at": _now(), - } - -def structured(task, user, obj): - return { - "task_id": task, - "user_id": user, - "agent_type": "profilebuilder", - "message": obj, - "created_at": _now(), - } - -# ── ProfileBuilderAgent ---------------------------------------------------- -REQUIRED_KEYS = [ - "primary_SNSchannel", "profile_type", "core_topic", "sub_angle", - "primary_objective", "content_strength", "time_budget_weekly", - "inspiration_accounts", "provided_follower_count", "locale", - "motivation_note" -] - -PB_PROMPT = f""" -You are ProfileBuilderAgent. - -Goal: collect each of these keys once: {", ".join(REQUIRED_KEYS)}. +profilebuilder_agent = Agent( + name="profilebuilder", + instructions=""" +You are the ProfileBuilder Agent. + +Your job is to guide the user step-by-step through building a creator profile. +You must collect the following fields, one at a time: + +- Niche or main topic +- Target audience +- Personal tone (e.g., friendly, professional) +- Platform focus (Instagram, TikTok, YouTube, etc.) +- Personal goals (specific achievements) +- Motivations (deeper personal why behind creating content) +- Inspirations (other creators or brands they admire) Rules: -1. Ask ONE question at a time, tailored to previous answers. -2. After each user reply, decide which required key is still missing and - ask the next most relevant question. -3. Reflect back occasionally so the user feels understood. -4. When ALL keys are collected, respond ONLY with: - -{{ - "output_type": "structured_profile", - "contains_image": false, - "details": {{ ...all keys filled ... }} -}} - -5. If you still need information, respond ONLY with: -{{ "requires_user_input": "your next question" }} - -Do NOT wrap responses in markdown fences. +- After the user answers a question, immediately output a simple JSON object with ONLY that field. +- Example: { "niche": "Fitness and Wellness" } +- Do NOT wait until all fields are complete to output. +- Continue asking questions until all fields are reasonably collected. +- DO NOT output the final complete profile JSON yourself. Let the user confirm manually later. +- Keep your language friendly, supportive, and easy to understand. +- Be patient. If the user gives unclear answers, ask simple clarifying questions. """ - -profile_builder_agent = Agent( - name="ProfileBuilderAgent", - instructions=PB_PROMPT, - tools=[], ) - -# ── API endpoint ----------------------------------------------------------- -@router.post("/profilebuilder") -async def profile_builder_endpoint(req: Request): - """ - Expects: - { - "action": "new_task" | "new_message", - "task_id": "...", - "user_id": "...", - "user_prompt": "...", # for new_task - "message": "...", # for new_message - "agent_session_id": "profilebuilder" # for new_message - } - """ - data = await req.json() - action = data.get("action") - if action not in ("new_task", "new_message"): - raise HTTPException(400, "Unknown action") - - task_id, user_id = data["task_id"], data["user_id"] - user_text = data.get("user_prompt") or data.get("message") - if not user_text: - raise HTTPException(422, "Missing user_prompt or message") - - # run the agent - result = await Runner.run(profile_builder_agent, input=user_text, max_turns=1) - - # clarification? - if getattr(result, "requires_user_input", None): - await _dispatch(CHAT_URL, - clarify(task_id, user_id, result.requires_user_input)) - return {"ok": True} - - # final structured? - try: - parsed = json.loads(result.final_output.strip()) - if parsed.get("output_type") == "structured_profile": - await _dispatch(STRUCT_URL, structured(task_id, user_id, parsed)) - return {"ok": True} - except Exception: - pass - - # fallback: return raw text as chat - await _dispatch(CHAT_URL, - clarify(task_id, user_id, result.final_output.strip(), - reason="Agent returned unstructured output")) - return {"ok": True"} From 87516c8e337cbfaed7c5f0ced9ffa6a476437d67 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 20:18:11 +0900 Subject: [PATCH 151/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 57 +++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index ece60c0d..f048a77e 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -1,62 +1,65 @@ # src/agents/profilebuilder.py -from fastapi import APIRouter, Request, HTTPException from agents.profilebuilder_agent import profilebuilder_agent -from agents.util.webhook import send_webhook +from agents.utils.webhook import send_webhook +from fastapi import APIRouter, Request, HTTPException import os import json from datetime import datetime router = APIRouter() -# ENV var — Bubble webhook URL for profile save notifications PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") -# (Optional) timeout for slow webhook sendings -WEBHOOK_TIMEOUT_SECONDS = float(os.getenv("WEBHOOK_TIMEOUT", "10")) - - @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): - """ - Handle incoming POST requests to build or update a user profile. - Expects fields: task_id, user_id, profile (dict) - """ data = await req.json() - # Basic field checks task_id = data.get("task_id") user_id = data.get("user_id") - profile = data.get("profile") + prompt = ( + data.get("prompt") or data.get("user_prompt") or data.get("message") + ) + + if not (task_id and user_id and prompt): + raise HTTPException(422, "Missing task_id, user_id, or prompt") - if not task_id or not user_id: - raise HTTPException(422, "Missing required field: task_id or user_id") + result = await Runner.run( + profilebuilder_agent, + input=prompt, + context={"task_id": task_id, "user_id": user_id}, + max_turns=3, + ) - if not profile: - raise HTTPException(422, "Missing required field: profile object") + raw = result.final_output.strip() - # [TODO]: Save the profile blob to Bubble Data API if needed - # await upsert_profile(user_id, profile) + # Try to parse single field JSON + try: + field_update = json.loads(raw) + if not isinstance(field_update, dict) or len(field_update) != 1: + raise ValueError("Must output a single-field JSON object") + reason = "profile_partial" + except (json.JSONDecodeError, ValueError) as e: + raise HTTPException(500, f"Agent output invalid: {e}") - # Build outgoing webhook payload + # Build payload to send to Bubble payload = { "task_id": task_id, "user_id": user_id, - "agent_type": "profilebuilder", # custom agent_type you define + "agent_type": "profilebuilder", "message": { - "type": "text", - "content": "Profile saved successfully." + "type": "profile_partial", + "content": field_update }, "metadata": { - "reason": "profile_saved" + "reason": reason }, "created_at": datetime.utcnow().isoformat(), } - # Fire webhook to Bubble if not PROFILE_WEBHOOK_URL: - raise RuntimeError("Missing PROFILE_WEBHOOK_URL environment variable") + raise RuntimeError("Missing PROFILE_WEBHOOK_URL") await send_webhook(PROFILE_WEBHOOK_URL, payload) - return {"ok": True, "message": "Profile processed and webhook sent."} + return {"ok": True} From 62ca34a0572d641f5b6a82355b00dcaaa1799f50 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 20:23:13 +0900 Subject: [PATCH 152/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index d7655d46..8f1e79d9 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,3 +1,7 @@ +# src/agents/profilebuilder_agent.py + +from agents import Agent + profilebuilder_agent = Agent( name="profilebuilder", instructions=""" From 832d35864b2a8637da9ded432764cb3f6437974c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 20:26:54 +0900 Subject: [PATCH 153/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index f048a77e..2026a7d2 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder.py from agents.profilebuilder_agent import profilebuilder_agent -from agents.utils.webhook import send_webhook +from agents.util.webhook import send_webhook from fastapi import APIRouter, Request, HTTPException import os import json From 6644726d6f14744b84c17b30830505fdff6fa97c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Mon, 28 Apr 2025 22:31:59 +0900 Subject: [PATCH 154/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 47 ++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 2026a7d2..a4239254 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -9,7 +9,9 @@ router = APIRouter() +# URLs pulled from environment variables PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") +CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") # This is the chat bubble webhook you already have @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): @@ -24,6 +26,7 @@ async def profilebuilder_handler(req: Request): if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") + # 1. Run ProfileBuilder agent result = await Runner.run( profilebuilder_agent, input=prompt, @@ -31,35 +34,49 @@ async def profilebuilder_handler(req: Request): max_turns=3, ) - raw = result.final_output.strip() + raw_output = result.final_output.strip() - # Try to parse single field JSON + # 2. Parse partial profile field from agent output try: - field_update = json.loads(raw) + field_update = json.loads(raw_output) if not isinstance(field_update, dict) or len(field_update) != 1: - raise ValueError("Must output a single-field JSON object") + raise ValueError("Agent must output a single-field JSON object") reason = "profile_partial" except (json.JSONDecodeError, ValueError) as e: raise HTTPException(500, f"Agent output invalid: {e}") - # Build payload to send to Bubble - payload = { + # 3. Send webhook to update Profile fields + profile_payload = { "task_id": task_id, "user_id": user_id, "agent_type": "profilebuilder", - "message": { - "type": "profile_partial", - "content": field_update - }, - "metadata": { - "reason": reason - }, + "message_type": "profile_partial", + "message_content": field_update, + "metadata_reason": reason, "created_at": datetime.utcnow().isoformat(), } - if not PROFILE_WEBHOOK_URL: raise RuntimeError("Missing PROFILE_WEBHOOK_URL") - await send_webhook(PROFILE_WEBHOOK_URL, payload) + await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) + + # 4. Send webhook to update Chat (agent's next question) + # Get the next outgoing prompt (already included in agent output after profile field is collected) + next_prompt = result.next_prompt if hasattr(result, "next_prompt") else None + + if next_prompt: + chat_payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": next_prompt, + "metadata_reason": "follow_up", + "created_at": datetime.utcnow().isoformat(), + } + if not CHAT_WEBHOOK_URL: + raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL") + + await send_webhook(CHAT_WEBHOOK_URL, chat_payload) return {"ok": True} From bb65d0d4c7cd9c7ab96dfc14f778d8069e4aa5e0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 29 Apr 2025 14:37:10 +0900 Subject: [PATCH 155/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index a4239254..3356478a 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -2,6 +2,7 @@ from agents.profilebuilder_agent import profilebuilder_agent from agents.util.webhook import send_webhook +from agents import Runner from fastapi import APIRouter, Request, HTTPException import os import json From c384c6d4c6f9fef15c30c009e91dd9769341d0a0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 29 Apr 2025 14:41:25 +0900 Subject: [PATCH 156/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 3356478a..172f2bc2 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -2,7 +2,7 @@ from agents.profilebuilder_agent import profilebuilder_agent from agents.util.webhook import send_webhook -from agents import Runner +from agents.run import Runner from fastapi import APIRouter, Request, HTTPException import os import json From ba2334fa1c49e7df2824c59815a900cc4457eb61 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 29 Apr 2025 14:51:04 +0900 Subject: [PATCH 157/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 172f2bc2..486636f5 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -2,7 +2,7 @@ from agents.profilebuilder_agent import profilebuilder_agent from agents.util.webhook import send_webhook -from agents.run import Runner +from agents.run import Runner # <-- Correct import! from fastapi import APIRouter, Request, HTTPException import os import json @@ -10,9 +10,8 @@ router = APIRouter() -# URLs pulled from environment variables PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") -CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") # This is the chat bubble webhook you already have +CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): @@ -37,11 +36,16 @@ async def profilebuilder_handler(req: Request): raw_output = result.final_output.strip() - # 2. Parse partial profile field from agent output + # 2. Defensive parsing: agent must return JSON try: + if not raw_output or not raw_output.startswith("{"): + raise ValueError("Agent output is not JSON or is empty") + field_update = json.loads(raw_output) + if not isinstance(field_update, dict) or len(field_update) != 1: raise ValueError("Agent must output a single-field JSON object") + reason = "profile_partial" except (json.JSONDecodeError, ValueError) as e: raise HTTPException(500, f"Agent output invalid: {e}") @@ -56,13 +60,13 @@ async def profilebuilder_handler(req: Request): "metadata_reason": reason, "created_at": datetime.utcnow().isoformat(), } + if not PROFILE_WEBHOOK_URL: raise RuntimeError("Missing PROFILE_WEBHOOK_URL") await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) - # 4. Send webhook to update Chat (agent's next question) - # Get the next outgoing prompt (already included in agent output after profile field is collected) + # 4. Send webhook to Chat (agent's next question) next_prompt = result.next_prompt if hasattr(result, "next_prompt") else None if next_prompt: From b60cf16f06494a76837feba07fac4621348a1dac Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 29 Apr 2025 14:57:48 +0900 Subject: [PATCH 158/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 71 ++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 19 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 8f1e79d9..1381c832 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -7,24 +7,57 @@ instructions=""" You are the ProfileBuilder Agent. -Your job is to guide the user step-by-step through building a creator profile. -You must collect the following fields, one at a time: - -- Niche or main topic -- Target audience -- Personal tone (e.g., friendly, professional) -- Platform focus (Instagram, TikTok, YouTube, etc.) -- Personal goals (specific achievements) -- Motivations (deeper personal why behind creating content) -- Inspirations (other creators or brands they admire) - -Rules: -- After the user answers a question, immediately output a simple JSON object with ONLY that field. -- Example: { "niche": "Fitness and Wellness" } -- Do NOT wait until all fields are complete to output. -- Continue asking questions until all fields are reasonably collected. -- DO NOT output the final complete profile JSON yourself. Let the user confirm manually later. -- Keep your language friendly, supportive, and easy to understand. -- Be patient. If the user gives unclear answers, ask simple clarifying questions. +Your role is to guide the user step-by-step through building a creator profile by asking friendly, simple questions and recording structured answers. + +You must help the user fill out these profile fields: + + +Field Name Description +niche Their niche or main topic +target_audience Who they want to reach +personal_tone Desired tone (e.g., friendly, professional) +platform_focus Primary platforms (Instagram, TikTok, YouTube, etc.) +personal_goals Specific achievements or milestones they aim for +motivations Deeper personal reasons for creating content +inspirations Other creators or brands they admire +🔥 Critical Behavior Rules +Every reply must be a valid JSON object — no freeform text allowed. +The JSON must contain only one field at a time. +NEVER mix text and JSON or wrap outputs in Markdown. +DO NOT output a final complete profile. Let the user review and confirm later. +✅ Examples of correct outputs: + +{ "niche": "Fitness and Wellness" } +{ "platform_focus": ["Instagram", "TikTok"] } +✅ Example if the answer is unclear: + +{ "clarification_prompt": "Could you be more specific about your target audience?" } +💬 Tone and Communication Style +Stay friendly, supportive, and patient. +Use simple, easy-to-understand language. +Act like a friendly coach, not a strict form filler. +Encourage the user after each answer: "Awesome!", "Great!", "Thanks for sharing!", etc. +🔄 Conversation Flow Rules +After each user reply: +Immediately output the collected field (in JSON). +Immediately ask the next logical question (also in JSON, using clarification_prompt). +If a user gives a vague or incomplete answer: +Output a clarification_prompt asking for more details. +Stay positive and encouraging while clarifying. +After all fields are reasonably collected: +Stop asking new questions. +Allow the user to review and finalize the profile manually (handled by the system). +⚠️ Important Compliance +If you ever fail to output JSON, it will cause system errors. +Always prioritize JSON correctness above all. +✅ Remember: You are not trying to rush — you are trying to make the user feel understood and supported. + +🧠 Example Conversation Flow + +User says Agent (you) reply +"I want to create fitness content" { "niche": "Fitness and Wellness" } +(Immediately after) { "clarification_prompt": "Awesome! Who is your ideal audience?" } +"Young professionals" { "target_audience": "Young professionals" } +(Immediately after) { "clarification_prompt": "Great! What tone would you like your brand to have?" } """ ) From b25bc58a0b0f934d31d8249ace2c3ddbb497c47b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 29 Apr 2025 15:08:21 +0900 Subject: [PATCH 159/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 110 ++++++++++++++++++----------- 1 file changed, 69 insertions(+), 41 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 1381c832..dfc2b08e 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -7,57 +7,85 @@ instructions=""" You are the ProfileBuilder Agent. -Your role is to guide the user step-by-step through building a creator profile by asking friendly, simple questions and recording structured answers. +Your role is to guide the user step-by-step through building a creator profile by asking supportive, friendly questions, while strictly outputting structured JSON at all times. You must help the user fill out these profile fields: Field Name Description -niche Their niche or main topic +niche Their main content topic target_audience Who they want to reach -personal_tone Desired tone (e.g., friendly, professional) -platform_focus Primary platforms (Instagram, TikTok, YouTube, etc.) -personal_goals Specific achievements or milestones they aim for -motivations Deeper personal reasons for creating content +personal_tone Desired voice/style (e.g., friendly, professional) +platform_focus Primary platforms (e.g., Instagram, TikTok, YouTube) +personal_goals Specific achievements or aspirations +motivations Deeper personal reasons behind creating inspirations Other creators or brands they admire -🔥 Critical Behavior Rules -Every reply must be a valid JSON object — no freeform text allowed. -The JSON must contain only one field at a time. -NEVER mix text and JSON or wrap outputs in Markdown. -DO NOT output a final complete profile. Let the user review and confirm later. -✅ Examples of correct outputs: +🛠 Critical Technical Rules +Every reply must be a valid JSON object. +One field per JSON output only. +No freeform text, no Markdown, no mixed outputs. +No complete final profiles — one field at a time only. +✅ Correct JSON Output Examples: { "niche": "Fitness and Wellness" } { "platform_focus": ["Instagram", "TikTok"] } -✅ Example if the answer is unclear: - -{ "clarification_prompt": "Could you be more specific about your target audience?" } -💬 Tone and Communication Style -Stay friendly, supportive, and patient. -Use simple, easy-to-understand language. -Act like a friendly coach, not a strict form filler. -Encourage the user after each answer: "Awesome!", "Great!", "Thanks for sharing!", etc. +✅ Clarification Example: + +{ "clarification_prompt": "Could you describe your audience a little more specifically?" } +🎨 Tone and Communication Style +Be friendly, supportive, and patient like a mentor. +Use easy-to-understand, warm language. +Celebrate when the user answers ("Awesome!", "Great!", "Thanks for sharing!") +Gently clarify if an answer is unclear — no shaming. +Stay positive even if the user is vague or uncertain. 🔄 Conversation Flow Rules -After each user reply: -Immediately output the collected field (in JSON). -Immediately ask the next logical question (also in JSON, using clarification_prompt). -If a user gives a vague or incomplete answer: -Output a clarification_prompt asking for more details. -Stay positive and encouraging while clarifying. -After all fields are reasonably collected: -Stop asking new questions. -Allow the user to review and finalize the profile manually (handled by the system). -⚠️ Important Compliance -If you ever fail to output JSON, it will cause system errors. -Always prioritize JSON correctness above all. -✅ Remember: You are not trying to rush — you are trying to make the user feel understood and supported. - -🧠 Example Conversation Flow - -User says Agent (you) reply -"I want to create fitness content" { "niche": "Fitness and Wellness" } -(Immediately after) { "clarification_prompt": "Awesome! Who is your ideal audience?" } -"Young professionals" { "target_audience": "Young professionals" } -(Immediately after) { "clarification_prompt": "Great! What tone would you like your brand to have?" } +Start by asking about niche. +After collecting each field, immediately follow up with the next suggested question. +If a user answer is vague, output a clarification_prompt to politely refine. +Continue until at least 6 out of 7 fields are filled. +Once 6 fields are collected, stop asking new questions — allow user to review and finalize manually. +📋 Suggested Initial Question Sequence +Always follow this structured flow unless the user redirects: + + +Order Field Clarification Prompt Example +1 niche "Awesome! What's your main niche or the topic you want to focus on?" +2 target_audience "Great! Who are you trying to reach with your content?" +3 personal_tone "Perfect. How would you like your brand's voice to sound? (Friendly, professional, witty?)" +4 platform_focus "Which platforms are you most excited to create content for? (Instagram, TikTok, YouTube?)" +5 personal_goals "What are some personal goals you'd love to achieve through your content?" +6 motivations "What's your deeper motivation or 'why' behind becoming a creator?" +7 inspirations "Are there any creators or brands you really admire?" +✅ Always output the field collected first. +✅ Then immediately output the next question using clarification_prompt JSON. + +🧠 Personalization Hooks (Optional Soft Touches) +If the user's niche or target_audience gives hints (e.g., fitness, education, entertainment), +you can slightly adjust your next prompt tone. +Examples: + +If niche is fitness: +"Great! Fitness content is super inspiring. Who would you love to motivate?" +If niche is education: +"Teaching is powerful! Who's your dream audience to help?" +✅ Always stay JSON-correct even if you personalize. + +📋 Example Correct Full Sequence + +Event Agent JSON Output +User: "I want to create fitness content" { "niche": "Fitness and Wellness" } +Then { "clarification_prompt": "Awesome! Who is your ideal audience?" } +User: "Young professionals" { "target_audience": "Young professionals" } +Then { "clarification_prompt": "Perfect! What tone do you want your brand to have?" } +etc. keep going in sequence +⚡ Special Conditions +If a user says "that's enough" or similar: +Politely end the conversation and thank them. +If agent detects 6+ fields filled: +Stop asking automatically. +✅ Example final closing: + +{ "clarification_prompt": "Amazing work! You can now review and finalize your profile. ✨" } + """ ) From 395102546c54ee40f5d353b6695a57fe118ce961 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 2 May 2025 17:04:42 +0900 Subject: [PATCH 160/230] Update agent_server add profileagent.py --- src/agents/agent_server.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 0a501d4a..3b0e064c 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -11,6 +11,9 @@ from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from agents.profilebuilder import router as profilebuilder_router +from openai.tools.websearch import WebSearchTool +from agents.tool import MarkdownBlock # Assumes this helper renders markdown properly + # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() @@ -45,9 +48,24 @@ async def send_webhook(payload: dict): name="feedback", instructions="You critique content. Respond ONLY in structured JSON." ) +profile_analyzer = Agent( + name="profile_analyzer", + instructions=""" +You are an expert in analyzing aspiring influencer profiles. + +Your goal is to deeply understand a user's motivations, niche, audience, and goals based on their collected profile data. Then, generate a highly personalized report that: +- Recognizes their unique strengths and values +- Suggests viable directions based on their niche and goals +- Offers caution or tradeoff considerations +- Is written in clear, supportive, actionable tone + +Use WebSearchTool if needed to briefly validate niche demand or market trends. Output a single MarkdownBlock with the report. Do NOT output JSON or code. Respond only with a single full markdown block. +""", + tools=[WebSearchTool()], +) AGENTS = {"strategy": strategy, "content": content, - "repurpose": repurpose, "feedback": feedback} + "repurpose": repurpose, "feedback": feedback,"profile_analyzer": profile_analyzer,} # ── Pydantic model for Manager handoff payload ──────────────────────────── class HandoffData(BaseModel): From 9551e23e2d93d7c6cb55098c7098d61d06a0c7d3 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 2 May 2025 17:14:46 +0900 Subject: [PATCH 161/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 3b0e064c..6be79b65 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -11,7 +11,7 @@ from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from agents.profilebuilder import router as profilebuilder_router -from openai.tools.websearch import WebSearchTool +from .tool import WebSearchTool from agents.tool import MarkdownBlock # Assumes this helper renders markdown properly From dd7470c9115e1da5116ecd6650a4449518d38ec8 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 2 May 2025 17:18:18 +0900 Subject: [PATCH 162/230] Update agent_server.py --- src/agents/agent_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 6be79b65..31805e31 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -12,7 +12,6 @@ from pydantic import BaseModel from agents.profilebuilder import router as profilebuilder_router from .tool import WebSearchTool -from agents.tool import MarkdownBlock # Assumes this helper renders markdown properly # ── SDK setup ─────────────────────────────────────────────────────────────── From fea5db99cf00e8f54ba9fb0c347dad22fe695054 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 3 May 2025 06:38:22 +0900 Subject: [PATCH 163/230] Update agent_server.py --- src/agents/agent_server.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 31805e31..81252faa 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -10,7 +10,7 @@ from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel -from agents.profilebuilder import router as profilebuilder_router +from agents.profilebuilder_agent import profilebuilder_agent from .tool import WebSearchTool @@ -63,8 +63,14 @@ async def send_webhook(payload: dict): tools=[WebSearchTool()], ) -AGENTS = {"strategy": strategy, "content": content, - "repurpose": repurpose, "feedback": feedback,"profile_analyzer": profile_analyzer,} +AGENTS = { + "strategy": strategy, + "content": content, + "repurpose": repurpose, + "feedback": feedback, + "profile_analyzer": profile_analyzer, + "profilebuilder": profilebuilder_agent +} # ── Pydantic model for Manager handoff payload ──────────────────────────── class HandoffData(BaseModel): From 13ea6013864590e993860103c38f91cf2de3aea9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 3 May 2025 06:41:49 +0900 Subject: [PATCH 164/230] Update agent_server.py --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 81252faa..2e8ce93b 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -155,7 +155,7 @@ def flatten_payload(p: dict) -> dict: allow_methods=["*"], allow_headers=["*"], ) # ── Mount your profilebuilder routes ─────────────────────────────────────── -app.include_router(profilebuilder_router) +app.include_router(profilebuilder_agent) @app.post("/agent") async def run_agent(req: Request): From 82076817a5749ad4b0dc60510bb91e425f96416f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 3 May 2025 06:45:34 +0900 Subject: [PATCH 165/230] Update agent_server.py --- src/agents/agent_server.py | 46 +++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 2e8ce93b..a6d9e684 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -13,7 +13,6 @@ from agents.profilebuilder_agent import profilebuilder_agent from .tool import WebSearchTool - # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) @@ -220,3 +219,48 @@ async def run_agent(req: Request): await send_webhook(flatten_payload(out_payload)) return {"ok": True} + +@app.post("/agent_direct") +async def run_agent_direct(req: Request): + data = await req.json() + + agent_type = data.get("agent_type") + agent = AGENTS.get(agent_type) + if not agent: + raise HTTPException(422, f"Unknown agent_type: {agent_type}") + + task_id = data.get("task_id") + user_id = data.get("user_id") + if not task_id or not user_id: + raise HTTPException(422, "Missing 'task_id' or 'user_id'") + + prompt = data.get("prompt") or data.get("message") or "" + context = { + "task_id": task_id, + "user_id": user_id, + "profile_data": data.get("profile_data") + } + + result = await Runner.run(agent, input=prompt, context=context, max_turns=12) + raw = result.final_output.strip() + + try: + content = json.loads(raw) + reason = "Agent returned structured JSON" + msg = {"type": "structured", "content": content} + except json.JSONDecodeError: + reason = "Agent returned unstructured output" + msg = {"type": "text", "content": raw} + + trace = result.to_debug_dict() if hasattr(result, "to_debug_dict") else [] + + payload = build_payload( + task_id=task_id, + user_id=user_id, + agent_type=agent.name, + message=msg, + reason=reason, + trace=trace + ) + await send_webhook(flatten_payload(payload)) + return {"ok": True} From 97f429d9879900b1c05dd1bbf60ecfae3918e633 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Sat, 3 May 2025 06:56:37 +0900 Subject: [PATCH 166/230] Update agent_server.py --- src/agents/agent_server.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index a6d9e684..36d17889 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -153,8 +153,6 @@ def flatten_payload(p: dict) -> dict: allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) -# ── Mount your profilebuilder routes ─────────────────────────────────────── -app.include_router(profilebuilder_agent) @app.post("/agent") async def run_agent(req: Request): From f919dbf693c175cedceee17256d40c414e8bae50 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 6 May 2025 13:23:29 +0900 Subject: [PATCH 167/230] Update agent_server.py adding profilebuilder_router and app.include --- src/agents/agent_server.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 36d17889..c33c1292 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -11,6 +11,8 @@ from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from agents.profilebuilder_agent import profilebuilder_agent +from agents.profilebuilder import router as profilebuilder_router + from .tool import WebSearchTool # ── SDK setup ─────────────────────────────────────────────────────────────── @@ -153,6 +155,7 @@ def flatten_payload(p: dict) -> dict: allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) +app.include_router(profilebuilder_router) @app.post("/agent") async def run_agent(req: Request): From 11087158dbce1bef2a2dbb250c0a53f3372288cb Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 6 May 2025 19:18:34 +0900 Subject: [PATCH 168/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 46 ++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 486636f5..38a70fc1 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -2,7 +2,7 @@ from agents.profilebuilder_agent import profilebuilder_agent from agents.util.webhook import send_webhook -from agents.run import Runner # <-- Correct import! +from agents.run import Runner from fastapi import APIRouter, Request, HTTPException import os import json @@ -19,14 +19,12 @@ async def profilebuilder_handler(req: Request): task_id = data.get("task_id") user_id = data.get("user_id") - prompt = ( - data.get("prompt") or data.get("user_prompt") or data.get("message") - ) + prompt = data.get("prompt") or data.get("user_prompt") or data.get("message") if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # 1. Run ProfileBuilder agent + # 1. Run the ProfileBuilder agent result = await Runner.run( profilebuilder_agent, input=prompt, @@ -36,37 +34,38 @@ async def profilebuilder_handler(req: Request): raw_output = result.final_output.strip() - # 2. Defensive parsing: agent must return JSON + # 2. Parse JSON output from agent try: if not raw_output or not raw_output.startswith("{"): - raise ValueError("Agent output is not JSON or is empty") + raise ValueError("Agent output is not valid JSON or is empty") field_update = json.loads(raw_output) if not isinstance(field_update, dict) or len(field_update) != 1: raise ValueError("Agent must output a single-field JSON object") - + reason = "profile_partial" except (json.JSONDecodeError, ValueError) as e: raise HTTPException(500, f"Agent output invalid: {e}") - # 3. Send webhook to update Profile fields - profile_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "profile_partial", - "message_content": field_update, - "metadata_reason": reason, - "created_at": datetime.utcnow().isoformat(), - } + # 3. Webhook to Profile DB only if not a clarification prompt + if "clarification_prompt" not in field_update: + profile_payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "profile_partial", + "message_content": field_update, + "metadata_reason": reason, + "created_at": datetime.utcnow().isoformat(), + } - if not PROFILE_WEBHOOK_URL: - raise RuntimeError("Missing PROFILE_WEBHOOK_URL") + if not PROFILE_WEBHOOK_URL: + raise RuntimeError("Missing PROFILE_WEBHOOK_URL") - await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) + await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) - # 4. Send webhook to Chat (agent's next question) + # 4. Webhook to Chat UI for the next clarification prompt next_prompt = result.next_prompt if hasattr(result, "next_prompt") else None if next_prompt: @@ -79,9 +78,10 @@ async def profilebuilder_handler(req: Request): "metadata_reason": "follow_up", "created_at": datetime.utcnow().isoformat(), } + if not CHAT_WEBHOOK_URL: raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL") await send_webhook(CHAT_WEBHOOK_URL, chat_payload) - return {"ok": True} + return {"ok": True"} From b17ea8d749438e9f382ec89fbef7083f8d806a84 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 6 May 2025 19:22:20 +0900 Subject: [PATCH 169/230] Update profilebuilder.py --- src/agents/profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder.py b/src/agents/profilebuilder.py index 38a70fc1..8166e71e 100644 --- a/src/agents/profilebuilder.py +++ b/src/agents/profilebuilder.py @@ -84,4 +84,4 @@ async def profilebuilder_handler(req: Request): await send_webhook(CHAT_WEBHOOK_URL, chat_payload) - return {"ok": True"} + return {"ok": True} From b6eda67a4941b9d39ce1cd3462ad74b4c1c36858 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 6 May 2025 19:23:31 +0900 Subject: [PATCH 170/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 125 ++++++++++++++--------------- 1 file changed, 61 insertions(+), 64 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index dfc2b08e..3718aebb 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,7 +1,5 @@ # src/agents/profilebuilder_agent.py -from agents import Agent - profilebuilder_agent = Agent( name="profilebuilder", instructions=""" @@ -11,81 +9,80 @@ You must help the user fill out these profile fields: +Field Name | Description +---------------------|--------------------------------------------------- +niche | Their main content topic +target_audience | Who they want to reach +personal_tone | Desired voice/style (e.g., friendly, professional) +platform_focus | Primary platforms (e.g., Instagram, TikTok, YouTube) +personal_goals | Specific achievements or aspirations +motivations | Deeper personal reasons behind creating +inspirations | Other creators or brands they admire -Field Name Description -niche Their main content topic -target_audience Who they want to reach -personal_tone Desired voice/style (e.g., friendly, professional) -platform_focus Primary platforms (e.g., Instagram, TikTok, YouTube) -personal_goals Specific achievements or aspirations -motivations Deeper personal reasons behind creating -inspirations Other creators or brands they admire 🛠 Critical Technical Rules -Every reply must be a valid JSON object. -One field per JSON output only. -No freeform text, no Markdown, no mixed outputs. -No complete final profiles — one field at a time only. -✅ Correct JSON Output Examples: - +- Every reply must be a **valid JSON object**. +- One field per JSON output only. +- No freeform text, no Markdown, no mixed outputs. +- Never include multiple fields in one JSON response. +- Never return a final complete profile. +- ✅ Always output the collected field first in a JSON object. +- ✅ Then, in a **separate** reply, output the next question using `clarification_prompt` JSON. + +✅ Output Examples: { "niche": "Fitness and Wellness" } { "platform_focus": ["Instagram", "TikTok"] } -✅ Clarification Example: +✅ Clarification Prompt Example: { "clarification_prompt": "Could you describe your audience a little more specifically?" } -🎨 Tone and Communication Style -Be friendly, supportive, and patient like a mentor. -Use easy-to-understand, warm language. -Celebrate when the user answers ("Awesome!", "Great!", "Thanks for sharing!") -Gently clarify if an answer is unclear — no shaming. -Stay positive even if the user is vague or uncertain. -🔄 Conversation Flow Rules -Start by asking about niche. -After collecting each field, immediately follow up with the next suggested question. -If a user answer is vague, output a clarification_prompt to politely refine. -Continue until at least 6 out of 7 fields are filled. -Once 6 fields are collected, stop asking new questions — allow user to review and finalize manually. -📋 Suggested Initial Question Sequence -Always follow this structured flow unless the user redirects: +⚠️ Fallback Rule (Critical): +If you ever feel unsure how to respond, return: +{ "clarification_prompt": "Could you clarify that a bit more?" } -Order Field Clarification Prompt Example -1 niche "Awesome! What's your main niche or the topic you want to focus on?" -2 target_audience "Great! Who are you trying to reach with your content?" -3 personal_tone "Perfect. How would you like your brand's voice to sound? (Friendly, professional, witty?)" -4 platform_focus "Which platforms are you most excited to create content for? (Instagram, TikTok, YouTube?)" -5 personal_goals "What are some personal goals you'd love to achieve through your content?" -6 motivations "What's your deeper motivation or 'why' behind becoming a creator?" -7 inspirations "Are there any creators or brands you really admire?" -✅ Always output the field collected first. -✅ Then immediately output the next question using clarification_prompt JSON. +🎨 Tone and Communication Style +- Be friendly, supportive, and patient like a mentor. +- Use easy-to-understand, warm language. +- Celebrate when the user answers ("Awesome!", "Great!", "Thanks for sharing!") +- Gently clarify if an answer is unclear — no shaming. +- Stay positive even if the user is vague or uncertain. + +🔄 Conversation Flow Rules +- Always begin by asking about the user's **niche**. +- After collecting each field, immediately follow up with the next question in the order listed. +- If a user's answer is vague, return only a `clarification_prompt` to refine. +- Continue until at least 6 out of 7 fields are filled. +- Once at least 6 fields are collected, stop asking new questions. +- Instead, send: + { "clarification_prompt": "Amazing work! You can now review and finalize your profile. ✨" } + +📋 Suggested Initial Question Sequence: +Order | Field | Clarification Prompt +------|-------------------|----------------------------------------------- +1 | niche | "Awesome! What's your main niche or the topic you want to focus on?" +2 | target_audience | "Great! Who are you trying to reach with your content?" +3 | personal_tone | "Perfect. How would you like your brand's voice to sound?" +4 | platform_focus | "Which platforms are you most excited to create content for?" +5 | personal_goals | "What are some personal goals you'd love to achieve through your content?" +6 | motivations | "What's your deeper motivation or 'why' behind becoming a creator?" +7 | inspirations | "Are there any creators or brands you really admire?" 🧠 Personalization Hooks (Optional Soft Touches) -If the user's niche or target_audience gives hints (e.g., fitness, education, entertainment), -you can slightly adjust your next prompt tone. -Examples: +If the user's niche or audience gives hints, adjust your tone slightly: +- Fitness niche → "Fitness content is super inspiring. Who would you love to motivate?" +- Education niche → "Teaching is powerful! Who’s your dream audience to help?" -If niche is fitness: -"Great! Fitness content is super inspiring. Who would you love to motivate?" -If niche is education: -"Teaching is powerful! Who's your dream audience to help?" -✅ Always stay JSON-correct even if you personalize. +✅ Always output in valid JSON format. Never mix structured and unstructured responses. -📋 Example Correct Full Sequence +📋 Example Full Sequence: +Event | Agent Output +--------------|-------------------------------------------------------- +User says: "I want to do fitness content" → { "niche": "Fitness and Wellness" } +Then → { "clarification_prompt": "Awesome! Who is your ideal audience?" } +User says: "Young professionals" → { "target_audience": "Young professionals" } +Then → { "clarification_prompt": "Great! What tone would you like to use?" } -Event Agent JSON Output -User: "I want to create fitness content" { "niche": "Fitness and Wellness" } -Then { "clarification_prompt": "Awesome! Who is your ideal audience?" } -User: "Young professionals" { "target_audience": "Young professionals" } -Then { "clarification_prompt": "Perfect! What tone do you want your brand to have?" } -etc. keep going in sequence ⚡ Special Conditions -If a user says "that's enough" or similar: -Politely end the conversation and thank them. -If agent detects 6+ fields filled: -Stop asking automatically. -✅ Example final closing: - -{ "clarification_prompt": "Amazing work! You can now review and finalize your profile. ✨" } - +- If a user says "that's enough" or similar: + → politely end the conversation with encouragement. """ ) From 805fe1282e387160a54ab47ebdef8dea54124bed Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Tue, 6 May 2025 19:25:47 +0900 Subject: [PATCH 171/230] Update profilebuilder_agent.py --- src/agents/profilebuilder_agent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 3718aebb..8c6d81ec 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,5 +1,7 @@ # src/agents/profilebuilder_agent.py +from agents import Agent + profilebuilder_agent = Agent( name="profilebuilder", instructions=""" From 0b7dffb62b51785f93d564800207e41cbd9bc5a2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 11:24:26 +0900 Subject: [PATCH 172/230] Create agent_outputs.py --- src/agents/agent_outputs.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 src/agents/agent_outputs.py diff --git a/src/agents/agent_outputs.py b/src/agents/agent_outputs.py new file mode 100644 index 00000000..ce3e2b3f --- /dev/null +++ b/src/agents/agent_outputs.py @@ -0,0 +1,19 @@ +# agents/agent_outputs.py +""" +Shared, strictly‑typed output schemas for Agents SDK. +Phase α only covers ProfileBuilder; add more as you refactor other agents. +""" + +from typing import List, Union +from pydantic import BaseModel + + +class ProfileFieldOut(BaseModel): + """One field‑value pair collected by the profile‑builder agent.""" + field_name: str + value: Union[str, List[str]] + + +class ClarificationOut(BaseModel): + """Prompt asking the user to supply missing info.""" + prompt: str From fb49b0bc17abb9fe9a3c96e9760b63b1d941df10 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 03:16:09 +0000 Subject: [PATCH 173/230] output --- src/agents/agent_output.py | 20 ++++++++++++++++++++ src/agents/agent_outputs.py | 19 ------------------- 2 files changed, 20 insertions(+), 19 deletions(-) delete mode 100644 src/agents/agent_outputs.py diff --git a/src/agents/agent_output.py b/src/agents/agent_output.py index 3262c57d..6a6ea643 100644 --- a/src/agents/agent_output.py +++ b/src/agents/agent_output.py @@ -142,3 +142,23 @@ def _type_to_str(t: type[Any]) -> str: return f"{origin.__name__}[{args_str}]" else: return str(t) + +# ───────────────────────────────────────────────────────────── +# Additional output schemas (Phase α) +# ───────────────────────────────────────────────────────────── +from typing import List, Union +from pydantic import BaseModel + +class ProfileFieldOut(BaseModel): + """One field‑value pair collected by Profile‑builder.""" + field_name: str + value: Union[str, List[str]] + +class ClarificationOut(BaseModel): + """Prompt asking the user for missing info.""" + prompt: str + +__all__ += [ + "ProfileFieldOut", + "ClarificationOut", +] diff --git a/src/agents/agent_outputs.py b/src/agents/agent_outputs.py deleted file mode 100644 index ce3e2b3f..00000000 --- a/src/agents/agent_outputs.py +++ /dev/null @@ -1,19 +0,0 @@ -# agents/agent_outputs.py -""" -Shared, strictly‑typed output schemas for Agents SDK. -Phase α only covers ProfileBuilder; add more as you refactor other agents. -""" - -from typing import List, Union -from pydantic import BaseModel - - -class ProfileFieldOut(BaseModel): - """One field‑value pair collected by the profile‑builder agent.""" - field_name: str - value: Union[str, List[str]] - - -class ClarificationOut(BaseModel): - """Prompt asking the user to supply missing info.""" - prompt: str From b2f65e38c8e32ce36bdec6190db6525be5cf0248 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 03:23:34 +0000 Subject: [PATCH 174/230] output file upd --- src/agents/agent_output.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_output.py b/src/agents/agent_output.py index 6a6ea643..0b104d7c 100644 --- a/src/agents/agent_output.py +++ b/src/agents/agent_output.py @@ -158,7 +158,14 @@ class ClarificationOut(BaseModel): """Prompt asking the user for missing info.""" prompt: str -__all__ += [ +# Ensure __all__ exists, then extend it +try: + __all__ +except NameError: + __all__ = [] + +__all__.extend([ "ProfileFieldOut", "ClarificationOut", -] +]) + From 4ea762b98ea0ade4c0a0a9a8fc5aca01126af274 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 05:38:18 +0000 Subject: [PATCH 175/230] updated profilebuilder_agent with new tools and guardrail logic --- src/agents/profilebuilder_agent.py | 105 +++++------------------------ 1 file changed, 17 insertions(+), 88 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 8c6d81ec..1ab34ec1 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,90 +1,19 @@ -# src/agents/profilebuilder_agent.py - -from agents import Agent - -profilebuilder_agent = Agent( - name="profilebuilder", - instructions=""" -You are the ProfileBuilder Agent. - -Your role is to guide the user step-by-step through building a creator profile by asking supportive, friendly questions, while strictly outputting structured JSON at all times. - -You must help the user fill out these profile fields: - -Field Name | Description ----------------------|--------------------------------------------------- -niche | Their main content topic -target_audience | Who they want to reach -personal_tone | Desired voice/style (e.g., friendly, professional) -platform_focus | Primary platforms (e.g., Instagram, TikTok, YouTube) -personal_goals | Specific achievements or aspirations -motivations | Deeper personal reasons behind creating -inspirations | Other creators or brands they admire - -🛠 Critical Technical Rules -- Every reply must be a **valid JSON object**. -- One field per JSON output only. -- No freeform text, no Markdown, no mixed outputs. -- Never include multiple fields in one JSON response. -- Never return a final complete profile. -- ✅ Always output the collected field first in a JSON object. -- ✅ Then, in a **separate** reply, output the next question using `clarification_prompt` JSON. - -✅ Output Examples: -{ "niche": "Fitness and Wellness" } -{ "platform_focus": ["Instagram", "TikTok"] } - -✅ Clarification Prompt Example: -{ "clarification_prompt": "Could you describe your audience a little more specifically?" } - -⚠️ Fallback Rule (Critical): -If you ever feel unsure how to respond, return: -{ "clarification_prompt": "Could you clarify that a bit more?" } - -🎨 Tone and Communication Style -- Be friendly, supportive, and patient like a mentor. -- Use easy-to-understand, warm language. -- Celebrate when the user answers ("Awesome!", "Great!", "Thanks for sharing!") -- Gently clarify if an answer is unclear — no shaming. -- Stay positive even if the user is vague or uncertain. - -🔄 Conversation Flow Rules -- Always begin by asking about the user's **niche**. -- After collecting each field, immediately follow up with the next question in the order listed. -- If a user's answer is vague, return only a `clarification_prompt` to refine. -- Continue until at least 6 out of 7 fields are filled. -- Once at least 6 fields are collected, stop asking new questions. -- Instead, send: - { "clarification_prompt": "Amazing work! You can now review and finalize your profile. ✨" } - -📋 Suggested Initial Question Sequence: -Order | Field | Clarification Prompt -------|-------------------|----------------------------------------------- -1 | niche | "Awesome! What's your main niche or the topic you want to focus on?" -2 | target_audience | "Great! Who are you trying to reach with your content?" -3 | personal_tone | "Perfect. How would you like your brand's voice to sound?" -4 | platform_focus | "Which platforms are you most excited to create content for?" -5 | personal_goals | "What are some personal goals you'd love to achieve through your content?" -6 | motivations | "What's your deeper motivation or 'why' behind becoming a creator?" -7 | inspirations | "Are there any creators or brands you really admire?" - -🧠 Personalization Hooks (Optional Soft Touches) -If the user's niche or audience gives hints, adjust your tone slightly: -- Fitness niche → "Fitness content is super inspiring. Who would you love to motivate?" -- Education niche → "Teaching is powerful! Who’s your dream audience to help?" - -✅ Always output in valid JSON format. Never mix structured and unstructured responses. +from openai_agents import Agent, output_guardrail, GuardrailFunctionOutput +from .agent_output import ProfileFieldOut, ClarificationOut + +profile_builder = Agent( + name="Profile‑builder", + instructions=( + "Collect ONE profile field at a time from the user.\n" + "Return ONLY a JSON object matching ProfileFieldOut OR ClarificationOut." + ), + output_type=ProfileFieldOut, + alternate_output_types=[ClarificationOut], +) -📋 Example Full Sequence: -Event | Agent Output ---------------|-------------------------------------------------------- -User says: "I want to do fitness content" → { "niche": "Fitness and Wellness" } -Then → { "clarification_prompt": "Awesome! Who is your ideal audience?" } -User says: "Young professionals" → { "target_audience": "Young professionals" } -Then → { "clarification_prompt": "Great! What tone would you like to use?" } +@output_guardrail +async def schema_guardrail(ctx, agent, llm_output): + # If JSON parsed into one of the declared types, we're good. + return GuardrailFunctionOutput("schema_ok", tripwire_triggered=False) -⚡ Special Conditions -- If a user says "that's enough" or similar: - → politely end the conversation with encouragement. -""" -) +profile_builder.output_guardrails = [schema_guardrail] From 051de72f8826091c13b19c25862166e733326f4f Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 05:48:18 +0000 Subject: [PATCH 176/230] revise pyproject dependencies accomodate tools, guardrail --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2302d8a3..de0bde77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "openai-agents" +name = "rightnow-agent-app" version = "0.0.9" description = "OpenAI Agents SDK" readme = "README.md" @@ -8,6 +8,7 @@ license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ "openai>=1.66.5", + "openai-agents>=0.3.1", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", From 529455ac8a1c7a679a46d0814177c8d93f7862c0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 05:52:15 +0000 Subject: [PATCH 177/230] revise pyproject dependencies accomodate tools, guardrail --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index de0bde77..fe9e9ebb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ "openai>=1.66.5", - "openai-agents>=0.3.1", + "openai-agents>=0.0.14", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", From 89e93d4c5c5b9d66028d7dc49ce36f2e82e4e039 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 06:22:58 +0000 Subject: [PATCH 178/230] revise pyproject dependencies accomodate tools, guardrail --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fe9e9ebb..de0bde77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ "openai>=1.66.5", - "openai-agents>=0.0.14", + "openai-agents>=0.3.1", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", From 73da64779e9d632139794af5a4e085f59b5203ff Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 06:37:07 +0000 Subject: [PATCH 179/230] revise pyproject dependencies accomodate tools, guardrail --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index de0bde77..fe9e9ebb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ "openai>=1.66.5", - "openai-agents>=0.3.1", + "openai-agents>=0.0.14", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", From 318cd3a5c89ec16845a0c3dc5add20f4d36333fa Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 06:47:56 +0000 Subject: [PATCH 180/230] revise pyproject dependencies check openai-agents directly to git url --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fe9e9ebb..4a8fc8a4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ "openai>=1.66.5", - "openai-agents>=0.0.14", + "openai-agents @ git+https://github.com/openai/openai-agents-python.git", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", From 17d690224885f16c72af4fe4eba569eeb2011659 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 06:52:12 +0000 Subject: [PATCH 181/230] revise pyproject dependencies update openaiagent, direct git ref bottom code --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 4a8fc8a4..2f1e7cc0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -138,3 +138,6 @@ markers = [ [tool.inline-snapshot] format-command = "ruff format --stdin-filename {filename}" + +[tool.hatch.metadata] +allow-direct-references = true From 2f0fedc10107d8544a71ac741930f0dda01a43a7 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 07:29:35 +0000 Subject: [PATCH 182/230] Add OpenAI Agents SDK as submodule --- .gitmodules | 3 +++ openai_agents | 1 + 2 files changed, 4 insertions(+) create mode 100644 .gitmodules create mode 160000 openai_agents diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..a8a151cb --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "openai_agents"] + path = openai_agents + url = https://github.com/openai/openai-agents-python.git diff --git a/openai_agents b/openai_agents new file mode 160000 index 00000000..f9763495 --- /dev/null +++ b/openai_agents @@ -0,0 +1 @@ +Subproject commit f9763495b86afcf0c421451a92200e1141fa8dcb From 8677ad3f7a9d73505e798ad2be68d2a3c4d69f88 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 07:42:20 +0000 Subject: [PATCH 183/230] added openai as submodule --- .gitmodules | 3 +++ vendor/openai-agents-python | 1 + 2 files changed, 4 insertions(+) create mode 160000 vendor/openai-agents-python diff --git a/.gitmodules b/.gitmodules index a8a151cb..83e509ef 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "openai_agents"] path = openai_agents url = https://github.com/openai/openai-agents-python.git +[submodule "vendor/openai-agents-python"] + path = vendor/openai-agents-python + url = https://github.com/openai/openai-agents-python.git diff --git a/vendor/openai-agents-python b/vendor/openai-agents-python new file mode 160000 index 00000000..f9763495 --- /dev/null +++ b/vendor/openai-agents-python @@ -0,0 +1 @@ +Subproject commit f9763495b86afcf0c421451a92200e1141fa8dcb From ab0a390c48aa7186da334c1dae4599c96b69bd81 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 08:19:55 +0000 Subject: [PATCH 184/230] Integrated openai-agents-python as a submodule and updated import paths --- src/agents/agent_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index c33c1292..368e5b4f 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -17,8 +17,8 @@ # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from agents import Agent, Runner, handoff, RunContextWrapper +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../openai-agents-python/src'))) +from openai_agents_python.src.agents import Agent, Runner, handoff, RunContextWrapper from agents.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL From 1e67af06f7908109cf106955ff7bf4e3bb4b7eac Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 08:31:52 +0000 Subject: [PATCH 185/230] update agent_server sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ../../openai-agents-python))) --- src/agents/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 368e5b4f..73f70750 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -17,7 +17,7 @@ # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../openai-agents-python/src'))) +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) from openai_agents_python.src.agents import Agent, Runner, handoff, RunContextWrapper from agents.extensions.handoff_prompt import prompt_with_handoff_instructions From 51a49316a4a19469872d9b10995aad5111d21131 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 08:37:43 +0000 Subject: [PATCH 186/230] update profilebuilder_agent.py also with submodule asys --- src/agents/profilebuilder_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 1ab34ec1..3ba84628 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,3 +1,7 @@ +import os, sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) + + from openai_agents import Agent, output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut From 5340aa8de61330c122ed16f4d14801b9b6ce0a70 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 21:01:39 +0900 Subject: [PATCH 187/230] =?UTF-8?q?profilebuilder=5Fagent.py=20=EC=97=85?= =?UTF-8?q?=EB=8D=B0=EC=9D=B4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/profilebuilder_agent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 3ba84628..28e3f101 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -2,7 +2,8 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) -from openai_agents import Agent, output_guardrail, GuardrailFunctionOutput +from openai_agents.agent import Agent +from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut profile_builder = Agent( From 533cdfc198edd4583c3b4908d222214b1e624da0 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Wed, 7 May 2025 21:06:42 +0900 Subject: [PATCH 188/230] =?UTF-8?q?profilebuilder=5Fagent.py=20=EC=97=85?= =?UTF-8?q?=EB=8D=B0=EC=9D=B4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/profilebuilder_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 28e3f101..8c3e0976 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -2,7 +2,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) -from openai_agents.agent import Agent +from openai_agents import Agent from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut From 44dee1dc4a0cedb50d19c84e53b6a008fce2b532 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 00:37:23 +0000 Subject: [PATCH 189/230] fix: replace openai_agents imports with agents --- src/agents/agent_server.py | 2 +- src/agents/profilebuilder_agent.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/agents/agent_server.py b/src/agents/agent_server.py index 73f70750..61a92706 100644 --- a/src/agents/agent_server.py +++ b/src/agents/agent_server.py @@ -18,7 +18,7 @@ # ── SDK setup ─────────────────────────────────────────────────────────────── load_dotenv() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) -from openai_agents_python.src.agents import Agent, Runner, handoff, RunContextWrapper +from agents import Agent, Runner, handoff, RunContextWrapper from agents.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 8c3e0976..3f28f52f 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -2,8 +2,8 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) -from openai_agents import Agent -from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput +from agents import Agent +from agents.guardrails import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut profile_builder = Agent( From 774ab6fad637d3fd4e7bff87eb9b089c6a9a6890 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 00:46:05 +0000 Subject: [PATCH 190/230] fix: replace openai_agents imports with agents --- src/agents/profilebuilder_agent.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 3f28f52f..c4de97ed 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,6 +1,4 @@ import os, sys -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) - from agents import Agent from agents.guardrails import output_guardrail, GuardrailFunctionOutput From 41ace637e384b746ab40c7966ac03e6da7393ff7 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 00:51:51 +0000 Subject: [PATCH 191/230] fix(imports): use openai_agents.agent & guardrails --- src/agents/profilebuilder_agent.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index c4de97ed..115e800d 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,9 +1,11 @@ -import os, sys +# src/agents/profilebuilder_agent.py +# ---------------------------------- +from openai_agents.agent import Agent +from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput -from agents import Agent -from agents.guardrails import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut + profile_builder = Agent( name="Profile‑builder", instructions=( From 057fd53ce5b94fc58b15188853ad654d64d1c43b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 01:35:05 +0000 Subject: [PATCH 192/230] =?UTF-8?q?fix:=20final=20openai=5Fagents=20?= =?UTF-8?q?=E2=86=92=20agents=20import=20cleanup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/profilebuilder_agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 115e800d..4b218c0c 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from openai_agents.agent import Agent -from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput +from agents import Agent +from agents.guardrails import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut From 06e895d2c7eb0e2a7c0b0a9db189b83acd2e1990 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 01:40:51 +0000 Subject: [PATCH 193/230] fix: import guardrails from openai_agents --- src/agents/profilebuilder_agent.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 4b218c0c..51f1c13c 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,10 +1,13 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from agents import Agent -from agents.guardrails import output_guardrail, GuardrailFunctionOutput -from .agent_output import ProfileFieldOut, ClarificationOut +from agents import Agent # stays as-is +from openai_agents.guardrails import ( # ← change this line + output_guardrail, + GuardrailFunctionOutput, +) +from .agent_output import ProfileFieldOut, ClarificationOut profile_builder = Agent( name="Profile‑builder", From 96fd69f8690a5040326422a538841b6f224ff6a2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 01:47:20 +0000 Subject: [PATCH 194/230] fix: import guardrails from openai_agents SDK --- src/agents/profilebuilder_agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 51f1c13c..5645cd1b 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,8 +1,8 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from agents import Agent # stays as-is -from openai_agents.guardrails import ( # ← change this line +from openai_agents import Agent +from openai_agents.guardrails import ( output_guardrail, GuardrailFunctionOutput, ) From 795dbcfc20cf5738b65635aa555f1e487b903542 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 01:55:27 +0000 Subject: [PATCH 195/230] fix: import Agent from openai_agents.agent --- src/agents/profilebuilder_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 5645cd1b..2227b7b1 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from openai_agents import Agent +from from openai_agents.agent import Agent from openai_agents.guardrails import ( output_guardrail, GuardrailFunctionOutput, From 237bac2d79673fb3c1dc3da55e1417617ac46a9a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 02:23:42 +0000 Subject: [PATCH 196/230] fix: correct Agent import --- src/agents/profilebuilder_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/profilebuilder_agent.py b/src/agents/profilebuilder_agent.py index 2227b7b1..fb590707 100644 --- a/src/agents/profilebuilder_agent.py +++ b/src/agents/profilebuilder_agent.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from from openai_agents.agent import Agent +from openai_agents.agent import Agent from openai_agents.guardrails import ( output_guardrail, GuardrailFunctionOutput, From 47fc24384ab6646bfe65f485ba586c6b216ad8d2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 02:49:16 +0000 Subject: [PATCH 197/230] refactor: move app code, remove vendored SDK --- src/agents/__init__.py | 252 ---- src/agents/_config.py | 26 - src/agents/_debug.py | 17 - src/agents/_run_impl.py | 921 --------------- src/agents/agent.py | 245 ---- src/agents/computer.py | 107 -- src/agents/exceptions.py | 63 - src/agents/extensions/__init__.py | 0 src/agents/extensions/handoff_filters.py | 67 -- src/agents/extensions/handoff_prompt.py | 19 - src/agents/extensions/visualization.py | 137 --- src/agents/function_schema.py | 344 ------ src/agents/guardrail.py | 320 ------ src/agents/handoffs.py | 236 ---- src/agents/items.py | 248 ---- src/agents/lifecycle.py | 105 -- src/agents/logger.py | 3 - src/agents/mcp/__init__.py | 21 - src/agents/mcp/server.py | 301 ----- src/agents/mcp/util.py | 136 --- src/agents/model_settings.py | 72 -- src/agents/models/__init__.py | 0 src/agents/models/_openai_shared.py | 34 - src/agents/models/fake_id.py | 5 - src/agents/models/interface.py | 107 -- src/agents/models/openai_chatcompletions.py | 1014 ----------------- src/agents/models/openai_provider.py | 91 -- src/agents/models/openai_responses.py | 393 ------- src/agents/py.typed | 1 - src/agents/result.py | 225 ---- src/agents/run.py | 942 --------------- src/agents/run_context.py | 26 - src/agents/stream_events.py | 58 - src/agents/strict_schema.py | 167 --- src/agents/tool.py | 310 ----- src/agents/tracing/__init__.py | 113 -- src/agents/tracing/create.py | 455 -------- src/agents/tracing/logger.py | 3 - src/agents/tracing/processor_interface.py | 69 -- src/agents/tracing/processors.py | 276 ----- src/agents/tracing/scope.py | 49 - src/agents/tracing/setup.py | 211 ---- src/agents/tracing/span_data.py | 374 ------ src/agents/tracing/spans.py | 264 ----- src/agents/tracing/traces.py | 195 ---- src/agents/tracing/util.py | 22 - src/agents/usage.py | 22 - src/agents/util/__init__.py | 0 src/agents/util/_coro.py | 2 - src/agents/util/_error_tracing.py | 16 - src/agents/util/_json.py | 31 - src/agents/util/_pretty_print.py | 56 - src/agents/util/_transforms.py | 11 - src/agents/util/_types.py | 7 - src/agents/util/schemas.py | 17 - src/agents/util/services.py | 34 - src/agents/util/webhook.py | 51 - src/agents/version.py | 7 - src/agents/voice/__init__.py | 51 - src/agents/voice/events.py | 47 - src/agents/voice/exceptions.py | 8 - src/agents/voice/imports.py | 11 - src/agents/voice/input.py | 88 -- src/agents/voice/model.py | 193 ---- src/agents/voice/models/__init__.py | 0 .../voice/models/openai_model_provider.py | 97 -- src/agents/voice/models/openai_stt.py | 456 -------- src/agents/voice/models/openai_tts.py | 54 - src/agents/voice/pipeline.py | 151 --- src/agents/voice/pipeline_config.py | 46 - src/agents/voice/result.py | 287 ----- src/agents/voice/utils.py | 37 - src/agents/voice/workflow.py | 93 -- src/{agents => app}/agent_onboarding.py | 2 +- src/{agents => app}/agent_output.py | 0 src/{agents => app}/agent_server.py | 6 +- src/{agents => app}/profilebuilder.py | 4 +- src/{agents => app}/profilebuilder_agent.py | 0 78 files changed, 5 insertions(+), 10924 deletions(-) delete mode 100644 src/agents/__init__.py delete mode 100644 src/agents/_config.py delete mode 100644 src/agents/_debug.py delete mode 100644 src/agents/_run_impl.py delete mode 100644 src/agents/agent.py delete mode 100644 src/agents/computer.py delete mode 100644 src/agents/exceptions.py delete mode 100644 src/agents/extensions/__init__.py delete mode 100644 src/agents/extensions/handoff_filters.py delete mode 100644 src/agents/extensions/handoff_prompt.py delete mode 100644 src/agents/extensions/visualization.py delete mode 100644 src/agents/function_schema.py delete mode 100644 src/agents/guardrail.py delete mode 100644 src/agents/handoffs.py delete mode 100644 src/agents/items.py delete mode 100644 src/agents/lifecycle.py delete mode 100644 src/agents/logger.py delete mode 100644 src/agents/mcp/__init__.py delete mode 100644 src/agents/mcp/server.py delete mode 100644 src/agents/mcp/util.py delete mode 100644 src/agents/model_settings.py delete mode 100644 src/agents/models/__init__.py delete mode 100644 src/agents/models/_openai_shared.py delete mode 100644 src/agents/models/fake_id.py delete mode 100644 src/agents/models/interface.py delete mode 100644 src/agents/models/openai_chatcompletions.py delete mode 100644 src/agents/models/openai_provider.py delete mode 100644 src/agents/models/openai_responses.py delete mode 100644 src/agents/py.typed delete mode 100644 src/agents/result.py delete mode 100644 src/agents/run.py delete mode 100644 src/agents/run_context.py delete mode 100644 src/agents/stream_events.py delete mode 100644 src/agents/strict_schema.py delete mode 100644 src/agents/tool.py delete mode 100644 src/agents/tracing/__init__.py delete mode 100644 src/agents/tracing/create.py delete mode 100644 src/agents/tracing/logger.py delete mode 100644 src/agents/tracing/processor_interface.py delete mode 100644 src/agents/tracing/processors.py delete mode 100644 src/agents/tracing/scope.py delete mode 100644 src/agents/tracing/setup.py delete mode 100644 src/agents/tracing/span_data.py delete mode 100644 src/agents/tracing/spans.py delete mode 100644 src/agents/tracing/traces.py delete mode 100644 src/agents/tracing/util.py delete mode 100644 src/agents/usage.py delete mode 100644 src/agents/util/__init__.py delete mode 100644 src/agents/util/_coro.py delete mode 100644 src/agents/util/_error_tracing.py delete mode 100644 src/agents/util/_json.py delete mode 100644 src/agents/util/_pretty_print.py delete mode 100644 src/agents/util/_transforms.py delete mode 100644 src/agents/util/_types.py delete mode 100644 src/agents/util/schemas.py delete mode 100644 src/agents/util/services.py delete mode 100644 src/agents/util/webhook.py delete mode 100644 src/agents/version.py delete mode 100644 src/agents/voice/__init__.py delete mode 100644 src/agents/voice/events.py delete mode 100644 src/agents/voice/exceptions.py delete mode 100644 src/agents/voice/imports.py delete mode 100644 src/agents/voice/input.py delete mode 100644 src/agents/voice/model.py delete mode 100644 src/agents/voice/models/__init__.py delete mode 100644 src/agents/voice/models/openai_model_provider.py delete mode 100644 src/agents/voice/models/openai_stt.py delete mode 100644 src/agents/voice/models/openai_tts.py delete mode 100644 src/agents/voice/pipeline.py delete mode 100644 src/agents/voice/pipeline_config.py delete mode 100644 src/agents/voice/result.py delete mode 100644 src/agents/voice/utils.py delete mode 100644 src/agents/voice/workflow.py rename src/{agents => app}/agent_onboarding.py (98%) rename src/{agents => app}/agent_output.py (100%) rename src/{agents => app}/agent_server.py (97%) rename src/{agents => app}/profilebuilder.py (97%) rename src/{agents => app}/profilebuilder_agent.py (100%) diff --git a/src/agents/__init__.py b/src/agents/__init__.py deleted file mode 100644 index db7d312e..00000000 --- a/src/agents/__init__.py +++ /dev/null @@ -1,252 +0,0 @@ -import logging -import sys -from typing import Literal - -from openai import AsyncOpenAI - -from . import _config -from .agent import Agent, ToolsToFinalOutputFunction, ToolsToFinalOutputResult -from .agent_output import AgentOutputSchema -from .computer import AsyncComputer, Button, Computer, Environment -from .exceptions import ( - AgentsException, - InputGuardrailTripwireTriggered, - MaxTurnsExceeded, - ModelBehaviorError, - OutputGuardrailTripwireTriggered, - UserError, -) -from .guardrail import ( - GuardrailFunctionOutput, - InputGuardrail, - InputGuardrailResult, - OutputGuardrail, - OutputGuardrailResult, - input_guardrail, - output_guardrail, -) -from .handoffs import Handoff, HandoffInputData, HandoffInputFilter, handoff -from .items import ( - HandoffCallItem, - HandoffOutputItem, - ItemHelpers, - MessageOutputItem, - ModelResponse, - ReasoningItem, - RunItem, - ToolCallItem, - ToolCallOutputItem, - TResponseInputItem, -) -from .lifecycle import AgentHooks, RunHooks -from .model_settings import ModelSettings -from .models.interface import Model, ModelProvider, ModelTracing -from .models.openai_chatcompletions import OpenAIChatCompletionsModel -from .models.openai_provider import OpenAIProvider -from .models.openai_responses import OpenAIResponsesModel -from .result import RunResult, RunResultStreaming -from .run import RunConfig, Runner -from .run_context import RunContextWrapper, TContext -from .stream_events import ( - AgentUpdatedStreamEvent, - RawResponsesStreamEvent, - RunItemStreamEvent, - StreamEvent, -) -from .tool import ( - ComputerTool, - FileSearchTool, - FunctionTool, - FunctionToolResult, - Tool, - WebSearchTool, - default_tool_error_function, - function_tool, -) -from .tracing import ( - AgentSpanData, - CustomSpanData, - FunctionSpanData, - GenerationSpanData, - GuardrailSpanData, - HandoffSpanData, - MCPListToolsSpanData, - Span, - SpanData, - SpanError, - SpeechGroupSpanData, - SpeechSpanData, - Trace, - TracingProcessor, - TranscriptionSpanData, - add_trace_processor, - agent_span, - custom_span, - function_span, - gen_span_id, - gen_trace_id, - generation_span, - get_current_span, - get_current_trace, - guardrail_span, - handoff_span, - mcp_tools_span, - set_trace_processors, - set_tracing_disabled, - set_tracing_export_api_key, - speech_group_span, - speech_span, - trace, - transcription_span, -) -from .usage import Usage -from .version import __version__ - - -def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None: - """Set the default OpenAI API key to use for LLM requests (and optionally tracing(). This is - only necessary if the OPENAI_API_KEY environment variable is not already set. - - If provided, this key will be used instead of the OPENAI_API_KEY environment variable. - - Args: - key: The OpenAI key to use. - use_for_tracing: Whether to also use this key to send traces to OpenAI. Defaults to True - If False, you'll either need to set the OPENAI_API_KEY environment variable or call - set_tracing_export_api_key() with the API key you want to use for tracing. - """ - _config.set_default_openai_key(key, use_for_tracing) - - -def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool = True) -> None: - """Set the default OpenAI client to use for LLM requests and/or tracing. If provided, this - client will be used instead of the default OpenAI client. - - Args: - client: The OpenAI client to use. - use_for_tracing: Whether to use the API key from this client for uploading traces. If False, - you'll either need to set the OPENAI_API_KEY environment variable or call - set_tracing_export_api_key() with the API key you want to use for tracing. - """ - _config.set_default_openai_client(client, use_for_tracing) - - -def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None: - """Set the default API to use for OpenAI LLM requests. By default, we will use the responses API - but you can set this to use the chat completions API instead. - """ - _config.set_default_openai_api(api) - - -def enable_verbose_stdout_logging(): - """Enables verbose logging to stdout. This is useful for debugging.""" - logger = logging.getLogger("openai.agents") - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(sys.stdout)) - - -__all__ = [ - "Agent", - "ToolsToFinalOutputFunction", - "ToolsToFinalOutputResult", - "Runner", - "Model", - "ModelProvider", - "ModelTracing", - "ModelSettings", - "OpenAIChatCompletionsModel", - "OpenAIProvider", - "OpenAIResponsesModel", - "AgentOutputSchema", - "Computer", - "AsyncComputer", - "Environment", - "Button", - "AgentsException", - "InputGuardrailTripwireTriggered", - "OutputGuardrailTripwireTriggered", - "MaxTurnsExceeded", - "ModelBehaviorError", - "UserError", - "InputGuardrail", - "InputGuardrailResult", - "OutputGuardrail", - "OutputGuardrailResult", - "GuardrailFunctionOutput", - "input_guardrail", - "output_guardrail", - "handoff", - "Handoff", - "HandoffInputData", - "HandoffInputFilter", - "TResponseInputItem", - "MessageOutputItem", - "ModelResponse", - "RunItem", - "HandoffCallItem", - "HandoffOutputItem", - "ToolCallItem", - "ToolCallOutputItem", - "ReasoningItem", - "ModelResponse", - "ItemHelpers", - "RunHooks", - "AgentHooks", - "RunContextWrapper", - "TContext", - "RunResult", - "RunResultStreaming", - "RunConfig", - "RawResponsesStreamEvent", - "RunItemStreamEvent", - "AgentUpdatedStreamEvent", - "StreamEvent", - "FunctionTool", - "FunctionToolResult", - "ComputerTool", - "FileSearchTool", - "Tool", - "WebSearchTool", - "function_tool", - "Usage", - "add_trace_processor", - "agent_span", - "custom_span", - "function_span", - "generation_span", - "get_current_span", - "get_current_trace", - "guardrail_span", - "handoff_span", - "set_trace_processors", - "set_tracing_disabled", - "speech_group_span", - "transcription_span", - "speech_span", - "mcp_tools_span", - "trace", - "Trace", - "TracingProcessor", - "SpanError", - "Span", - "SpanData", - "AgentSpanData", - "CustomSpanData", - "FunctionSpanData", - "GenerationSpanData", - "GuardrailSpanData", - "HandoffSpanData", - "SpeechGroupSpanData", - "SpeechSpanData", - "MCPListToolsSpanData", - "TranscriptionSpanData", - "set_default_openai_key", - "set_default_openai_client", - "set_default_openai_api", - "set_tracing_export_api_key", - "enable_verbose_stdout_logging", - "gen_trace_id", - "gen_span_id", - "default_tool_error_function", - "__version__", -] diff --git a/src/agents/_config.py b/src/agents/_config.py deleted file mode 100644 index 304cfb83..00000000 --- a/src/agents/_config.py +++ /dev/null @@ -1,26 +0,0 @@ -from openai import AsyncOpenAI -from typing_extensions import Literal - -from .models import _openai_shared -from .tracing import set_tracing_export_api_key - - -def set_default_openai_key(key: str, use_for_tracing: bool) -> None: - _openai_shared.set_default_openai_key(key) - - if use_for_tracing: - set_tracing_export_api_key(key) - - -def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None: - _openai_shared.set_default_openai_client(client) - - if use_for_tracing: - set_tracing_export_api_key(client.api_key) - - -def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None: - if api == "chat_completions": - _openai_shared.set_use_responses_by_default(False) - else: - _openai_shared.set_use_responses_by_default(True) diff --git a/src/agents/_debug.py b/src/agents/_debug.py deleted file mode 100644 index 4da91be4..00000000 --- a/src/agents/_debug.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - - -def _debug_flag_enabled(flag: str) -> bool: - flag_value = os.getenv(flag) - return flag_value is not None and (flag_value == "1" or flag_value.lower() == "true") - - -DONT_LOG_MODEL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_MODEL_DATA") -"""By default we don't log LLM inputs/outputs, to prevent exposing sensitive information. Set this -flag to enable logging them. -""" - -DONT_LOG_TOOL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_TOOL_DATA") -"""By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set -this flag to enable logging them. -""" diff --git a/src/agents/_run_impl.py b/src/agents/_run_impl.py deleted file mode 100644 index 94c181b7..00000000 --- a/src/agents/_run_impl.py +++ /dev/null @@ -1,921 +0,0 @@ -from __future__ import annotations - -import asyncio -import dataclasses -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, cast - -from openai.types.responses import ( - ResponseComputerToolCall, - ResponseFileSearchToolCall, - ResponseFunctionToolCall, - ResponseFunctionWebSearch, - ResponseOutputMessage, -) -from openai.types.responses.response_computer_tool_call import ( - ActionClick, - ActionDoubleClick, - ActionDrag, - ActionKeypress, - ActionMove, - ActionScreenshot, - ActionScroll, - ActionType, - ActionWait, -) -from openai.types.responses.response_input_param import ComputerCallOutput -from openai.types.responses.response_reasoning_item import ResponseReasoningItem - -from .agent import Agent, ToolsToFinalOutputResult -from .agent_output import AgentOutputSchema -from .computer import AsyncComputer, Computer -from .exceptions import AgentsException, ModelBehaviorError, UserError -from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult -from .handoffs import Handoff, HandoffInputData -from .items import ( - HandoffCallItem, - HandoffOutputItem, - ItemHelpers, - MessageOutputItem, - ModelResponse, - ReasoningItem, - RunItem, - ToolCallItem, - ToolCallOutputItem, - TResponseInputItem, -) -from .lifecycle import RunHooks -from .logger import logger -from .model_settings import ModelSettings -from .models.interface import ModelTracing -from .run_context import RunContextWrapper, TContext -from .stream_events import RunItemStreamEvent, StreamEvent -from .tool import ComputerTool, FunctionTool, FunctionToolResult, Tool -from .tracing import ( - SpanError, - Trace, - function_span, - get_current_trace, - guardrail_span, - handoff_span, - trace, -) -from .util import _coro, _error_tracing - -if TYPE_CHECKING: - from .run import RunConfig - - -class QueueCompleteSentinel: - pass - - -QUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel() - -_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None) - - -@dataclass -class AgentToolUseTracker: - agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list) - """Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.""" - - def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None: - existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None) - if existing_data: - existing_data[1].extend(tool_names) - else: - self.agent_to_tools.append((agent, tool_names)) - - def has_used_tools(self, agent: Agent[Any]) -> bool: - existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None) - return existing_data is not None and len(existing_data[1]) > 0 - - -@dataclass -class ToolRunHandoff: - handoff: Handoff - tool_call: ResponseFunctionToolCall - - -@dataclass -class ToolRunFunction: - tool_call: ResponseFunctionToolCall - function_tool: FunctionTool - - -@dataclass -class ToolRunComputerAction: - tool_call: ResponseComputerToolCall - computer_tool: ComputerTool - - -@dataclass -class ProcessedResponse: - new_items: list[RunItem] - handoffs: list[ToolRunHandoff] - functions: list[ToolRunFunction] - computer_actions: list[ToolRunComputerAction] - tools_used: list[str] # Names of all tools used, including hosted tools - - def has_tools_to_run(self) -> bool: - # Handoffs, functions and computer actions need local processing - # Hosted tools have already run, so there's nothing to do. - return any( - [ - self.handoffs, - self.functions, - self.computer_actions, - ] - ) - - -@dataclass -class NextStepHandoff: - new_agent: Agent[Any] - - -@dataclass -class NextStepFinalOutput: - output: Any - - -@dataclass -class NextStepRunAgain: - pass - - -@dataclass -class SingleStepResult: - original_input: str | list[TResponseInputItem] - """The input items i.e. the items before run() was called. May be mutated by handoff input - filters.""" - - model_response: ModelResponse - """The model response for the current step.""" - - pre_step_items: list[RunItem] - """Items generated before the current step.""" - - new_step_items: list[RunItem] - """Items generated during this current step.""" - - next_step: NextStepHandoff | NextStepFinalOutput | NextStepRunAgain - """The next step to take.""" - - @property - def generated_items(self) -> list[RunItem]: - """Items generated during the agent run (i.e. everything generated after - `original_input`).""" - return self.pre_step_items + self.new_step_items - - -def get_model_tracing_impl( - tracing_disabled: bool, trace_include_sensitive_data: bool -) -> ModelTracing: - if tracing_disabled: - return ModelTracing.DISABLED - elif trace_include_sensitive_data: - return ModelTracing.ENABLED - else: - return ModelTracing.ENABLED_WITHOUT_DATA - - -class RunImpl: - @classmethod - async def execute_tools_and_side_effects( - cls, - *, - agent: Agent[TContext], - # The original input to the Runner - original_input: str | list[TResponseInputItem], - # Everything generated by Runner since the original input, but before the current step - pre_step_items: list[RunItem], - new_response: ModelResponse, - processed_response: ProcessedResponse, - output_schema: AgentOutputSchema | None, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - ) -> SingleStepResult: - # Make a copy of the generated items - pre_step_items = list(pre_step_items) - - new_step_items: list[RunItem] = [] - new_step_items.extend(processed_response.new_items) - - # First, lets run the tool calls - function tools and computer actions - function_results, computer_results = await asyncio.gather( - cls.execute_function_tool_calls( - agent=agent, - tool_runs=processed_response.functions, - hooks=hooks, - context_wrapper=context_wrapper, - config=run_config, - ), - cls.execute_computer_actions( - agent=agent, - actions=processed_response.computer_actions, - hooks=hooks, - context_wrapper=context_wrapper, - config=run_config, - ), - ) - new_step_items.extend([result.run_item for result in function_results]) - new_step_items.extend(computer_results) - - # Second, check if there are any handoffs - if run_handoffs := processed_response.handoffs: - return await cls.execute_handoffs( - agent=agent, - original_input=original_input, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - new_response=new_response, - run_handoffs=run_handoffs, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - ) - - # Third, we'll check if the tool use should result in a final output - check_tool_use = await cls._check_for_final_output_from_tools( - agent=agent, - tool_results=function_results, - context_wrapper=context_wrapper, - config=run_config, - ) - - if check_tool_use.is_final_output: - # If the output type is str, then let's just stringify it - if not agent.output_type or agent.output_type is str: - check_tool_use.final_output = str(check_tool_use.final_output) - - if check_tool_use.final_output is None: - logger.error( - "Model returned a final output of None. Not raising an error because we assume" - "you know what you're doing." - ) - - return await cls.execute_final_output( - agent=agent, - original_input=original_input, - new_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - final_output=check_tool_use.final_output, - hooks=hooks, - context_wrapper=context_wrapper, - ) - - # Now we can check if the model also produced a final output - message_items = [item for item in new_step_items if isinstance(item, MessageOutputItem)] - - # We'll use the last content output as the final output - potential_final_output_text = ( - ItemHelpers.extract_last_text(message_items[-1].raw_item) if message_items else None - ) - - # There are two possibilities that lead to a final output: - # 1. Structured output schema => always leads to a final output - # 2. Plain text output schema => only leads to a final output if there are no tool calls - if output_schema and not output_schema.is_plain_text() and potential_final_output_text: - final_output = output_schema.validate_json(potential_final_output_text) - return await cls.execute_final_output( - agent=agent, - original_input=original_input, - new_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - final_output=final_output, - hooks=hooks, - context_wrapper=context_wrapper, - ) - elif ( - not output_schema or output_schema.is_plain_text() - ) and not processed_response.has_tools_to_run(): - return await cls.execute_final_output( - agent=agent, - original_input=original_input, - new_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - final_output=potential_final_output_text or "", - hooks=hooks, - context_wrapper=context_wrapper, - ) - else: - # If there's no final output, we can just run again - return SingleStepResult( - original_input=original_input, - model_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - next_step=NextStepRunAgain(), - ) - - @classmethod - def maybe_reset_tool_choice( - cls, agent: Agent[Any], tool_use_tracker: AgentToolUseTracker, model_settings: ModelSettings - ) -> ModelSettings: - """Resets tool choice to None if the agent has used tools and the agent's reset_tool_choice - flag is True.""" - - if agent.reset_tool_choice is True and tool_use_tracker.has_used_tools(agent): - return dataclasses.replace(model_settings, tool_choice=None) - - return model_settings - - @classmethod - def process_model_response( - cls, - *, - agent: Agent[Any], - all_tools: list[Tool], - response: ModelResponse, - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - ) -> ProcessedResponse: - items: list[RunItem] = [] - - run_handoffs = [] - functions = [] - computer_actions = [] - tools_used: list[str] = [] - handoff_map = {handoff.tool_name: handoff for handoff in handoffs} - function_map = {tool.name: tool for tool in all_tools if isinstance(tool, FunctionTool)} - computer_tool = next((tool for tool in all_tools if isinstance(tool, ComputerTool)), None) - - for output in response.output: - if isinstance(output, ResponseOutputMessage): - items.append(MessageOutputItem(raw_item=output, agent=agent)) - elif isinstance(output, ResponseFileSearchToolCall): - items.append(ToolCallItem(raw_item=output, agent=agent)) - tools_used.append("file_search") - elif isinstance(output, ResponseFunctionWebSearch): - items.append(ToolCallItem(raw_item=output, agent=agent)) - tools_used.append("web_search") - elif isinstance(output, ResponseReasoningItem): - items.append(ReasoningItem(raw_item=output, agent=agent)) - elif isinstance(output, ResponseComputerToolCall): - items.append(ToolCallItem(raw_item=output, agent=agent)) - tools_used.append("computer_use") - if not computer_tool: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Computer tool not found", - data={}, - ) - ) - raise ModelBehaviorError( - "Model produced computer action without a computer tool." - ) - computer_actions.append( - ToolRunComputerAction(tool_call=output, computer_tool=computer_tool) - ) - elif not isinstance(output, ResponseFunctionToolCall): - logger.warning(f"Unexpected output type, ignoring: {type(output)}") - continue - - # At this point we know it's a function tool call - if not isinstance(output, ResponseFunctionToolCall): - continue - - tools_used.append(output.name) - - # Handoffs - if output.name in handoff_map: - items.append(HandoffCallItem(raw_item=output, agent=agent)) - handoff = ToolRunHandoff( - tool_call=output, - handoff=handoff_map[output.name], - ) - run_handoffs.append(handoff) - # Regular function tool call - else: - if output.name not in function_map: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Tool not found", - data={"tool_name": output.name}, - ) - ) - raise ModelBehaviorError(f"Tool {output.name} not found in agent {agent.name}") - items.append(ToolCallItem(raw_item=output, agent=agent)) - functions.append( - ToolRunFunction( - tool_call=output, - function_tool=function_map[output.name], - ) - ) - - return ProcessedResponse( - new_items=items, - handoffs=run_handoffs, - functions=functions, - computer_actions=computer_actions, - tools_used=tools_used, - ) - - @classmethod - async def execute_function_tool_calls( - cls, - *, - agent: Agent[TContext], - tool_runs: list[ToolRunFunction], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> list[FunctionToolResult]: - async def run_single_tool( - func_tool: FunctionTool, tool_call: ResponseFunctionToolCall - ) -> Any: - with function_span(func_tool.name) as span_fn: - if config.trace_include_sensitive_data: - span_fn.span_data.input = tool_call.arguments - try: - _, _, result = await asyncio.gather( - hooks.on_tool_start(context_wrapper, agent, func_tool), - ( - agent.hooks.on_tool_start(context_wrapper, agent, func_tool) - if agent.hooks - else _coro.noop_coroutine() - ), - func_tool.on_invoke_tool(context_wrapper, tool_call.arguments), - ) - - await asyncio.gather( - hooks.on_tool_end(context_wrapper, agent, func_tool, result), - ( - agent.hooks.on_tool_end(context_wrapper, agent, func_tool, result) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - except Exception as e: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Error running tool", - data={"tool_name": func_tool.name, "error": str(e)}, - ) - ) - if isinstance(e, AgentsException): - raise e - raise UserError(f"Error running tool {func_tool.name}: {e}") from e - - if config.trace_include_sensitive_data: - span_fn.span_data.output = result - return result - - tasks = [] - for tool_run in tool_runs: - function_tool = tool_run.function_tool - tasks.append(run_single_tool(function_tool, tool_run.tool_call)) - - results = await asyncio.gather(*tasks) - - return [ - FunctionToolResult( - tool=tool_run.function_tool, - output=result, - run_item=ToolCallOutputItem( - output=result, - raw_item=ItemHelpers.tool_call_output_item(tool_run.tool_call, str(result)), - agent=agent, - ), - ) - for tool_run, result in zip(tool_runs, results) - ] - - @classmethod - async def execute_computer_actions( - cls, - *, - agent: Agent[TContext], - actions: list[ToolRunComputerAction], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> list[RunItem]: - results: list[RunItem] = [] - # Need to run these serially, because each action can affect the computer state - for action in actions: - results.append( - await ComputerAction.execute( - agent=agent, - action=action, - hooks=hooks, - context_wrapper=context_wrapper, - config=config, - ) - ) - - return results - - @classmethod - async def execute_handoffs( - cls, - *, - agent: Agent[TContext], - original_input: str | list[TResponseInputItem], - pre_step_items: list[RunItem], - new_step_items: list[RunItem], - new_response: ModelResponse, - run_handoffs: list[ToolRunHandoff], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - ) -> SingleStepResult: - # If there is more than one handoff, add tool responses that reject those handoffs - multiple_handoffs = len(run_handoffs) > 1 - if multiple_handoffs: - output_message = "Multiple handoffs detected, ignoring this one." - new_step_items.extend( - [ - ToolCallOutputItem( - output=output_message, - raw_item=ItemHelpers.tool_call_output_item( - handoff.tool_call, output_message - ), - agent=agent, - ) - for handoff in run_handoffs[1:] - ] - ) - - actual_handoff = run_handoffs[0] - with handoff_span(from_agent=agent.name) as span_handoff: - handoff = actual_handoff.handoff - new_agent: Agent[Any] = await handoff.on_invoke_handoff( - context_wrapper, actual_handoff.tool_call.arguments - ) - span_handoff.span_data.to_agent = new_agent.name - if multiple_handoffs: - requested_agents = [handoff.handoff.agent_name for handoff in run_handoffs] - span_handoff.set_error( - SpanError( - message="Multiple handoffs requested", - data={ - "requested_agents": requested_agents, - }, - ) - ) - - # Append a tool output item for the handoff - new_step_items.append( - HandoffOutputItem( - agent=agent, - raw_item=ItemHelpers.tool_call_output_item( - actual_handoff.tool_call, - handoff.get_transfer_message(new_agent), - ), - source_agent=agent, - target_agent=new_agent, - ) - ) - - # Execute handoff hooks - await asyncio.gather( - hooks.on_handoff( - context=context_wrapper, - from_agent=agent, - to_agent=new_agent, - ), - ( - agent.hooks.on_handoff( - context_wrapper, - agent=new_agent, - source=agent, - ) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - # If there's an input filter, filter the input for the next agent - input_filter = handoff.input_filter or ( - run_config.handoff_input_filter if run_config else None - ) - if input_filter: - logger.debug("Filtering inputs for handoff") - handoff_input_data = HandoffInputData( - input_history=tuple(original_input) - if isinstance(original_input, list) - else original_input, - pre_handoff_items=tuple(pre_step_items), - new_items=tuple(new_step_items), - ) - if not callable(input_filter): - _error_tracing.attach_error_to_span( - span_handoff, - SpanError( - message="Invalid input filter", - data={"details": "not callable()"}, - ), - ) - raise UserError(f"Invalid input filter: {input_filter}") - filtered = input_filter(handoff_input_data) - if not isinstance(filtered, HandoffInputData): - _error_tracing.attach_error_to_span( - span_handoff, - SpanError( - message="Invalid input filter result", - data={"details": "not a HandoffInputData"}, - ), - ) - raise UserError(f"Invalid input filter result: {filtered}") - - original_input = ( - filtered.input_history - if isinstance(filtered.input_history, str) - else list(filtered.input_history) - ) - pre_step_items = list(filtered.pre_handoff_items) - new_step_items = list(filtered.new_items) - - return SingleStepResult( - original_input=original_input, - model_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - next_step=NextStepHandoff(new_agent), - ) - - @classmethod - async def execute_final_output( - cls, - *, - agent: Agent[TContext], - original_input: str | list[TResponseInputItem], - new_response: ModelResponse, - pre_step_items: list[RunItem], - new_step_items: list[RunItem], - final_output: Any, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - ) -> SingleStepResult: - # Run the on_end hooks - await cls.run_final_output_hooks(agent, hooks, context_wrapper, final_output) - - return SingleStepResult( - original_input=original_input, - model_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - next_step=NextStepFinalOutput(final_output), - ) - - @classmethod - async def run_final_output_hooks( - cls, - agent: Agent[TContext], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - final_output: Any, - ): - await asyncio.gather( - hooks.on_agent_end(context_wrapper, agent, final_output), - agent.hooks.on_end(context_wrapper, agent, final_output) - if agent.hooks - else _coro.noop_coroutine(), - ) - - @classmethod - async def run_single_input_guardrail( - cls, - agent: Agent[Any], - guardrail: InputGuardrail[TContext], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - ) -> InputGuardrailResult: - with guardrail_span(guardrail.get_name()) as span_guardrail: - result = await guardrail.run(agent, input, context) - span_guardrail.span_data.triggered = result.output.tripwire_triggered - return result - - @classmethod - async def run_single_output_guardrail( - cls, - guardrail: OutputGuardrail[TContext], - agent: Agent[Any], - agent_output: Any, - context: RunContextWrapper[TContext], - ) -> OutputGuardrailResult: - with guardrail_span(guardrail.get_name()) as span_guardrail: - result = await guardrail.run(agent=agent, agent_output=agent_output, context=context) - span_guardrail.span_data.triggered = result.output.tripwire_triggered - return result - - @classmethod - def stream_step_result_to_queue( - cls, - step_result: SingleStepResult, - queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel], - ): - for item in step_result.new_step_items: - if isinstance(item, MessageOutputItem): - event = RunItemStreamEvent(item=item, name="message_output_created") - elif isinstance(item, HandoffCallItem): - event = RunItemStreamEvent(item=item, name="handoff_requested") - elif isinstance(item, HandoffOutputItem): - event = RunItemStreamEvent(item=item, name="handoff_occured") - elif isinstance(item, ToolCallItem): - event = RunItemStreamEvent(item=item, name="tool_called") - elif isinstance(item, ToolCallOutputItem): - event = RunItemStreamEvent(item=item, name="tool_output") - elif isinstance(item, ReasoningItem): - event = RunItemStreamEvent(item=item, name="reasoning_item_created") - else: - logger.warning(f"Unexpected item type: {type(item)}") - event = None - - if event: - queue.put_nowait(event) - - @classmethod - async def _check_for_final_output_from_tools( - cls, - *, - agent: Agent[TContext], - tool_results: list[FunctionToolResult], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> ToolsToFinalOutputResult: - """Returns (i, final_output).""" - if not tool_results: - return _NOT_FINAL_OUTPUT - - if agent.tool_use_behavior == "run_llm_again": - return _NOT_FINAL_OUTPUT - elif agent.tool_use_behavior == "stop_on_first_tool": - return ToolsToFinalOutputResult( - is_final_output=True, final_output=tool_results[0].output - ) - elif isinstance(agent.tool_use_behavior, dict): - names = agent.tool_use_behavior.get("stop_at_tool_names", []) - for tool_result in tool_results: - if tool_result.tool.name in names: - return ToolsToFinalOutputResult( - is_final_output=True, final_output=tool_result.output - ) - return ToolsToFinalOutputResult(is_final_output=False, final_output=None) - elif callable(agent.tool_use_behavior): - if inspect.iscoroutinefunction(agent.tool_use_behavior): - return await cast( - Awaitable[ToolsToFinalOutputResult], - agent.tool_use_behavior(context_wrapper, tool_results), - ) - else: - return cast( - ToolsToFinalOutputResult, agent.tool_use_behavior(context_wrapper, tool_results) - ) - - logger.error(f"Invalid tool_use_behavior: {agent.tool_use_behavior}") - raise UserError(f"Invalid tool_use_behavior: {agent.tool_use_behavior}") - - -class TraceCtxManager: - """Creates a trace only if there is no current trace, and manages the trace lifecycle.""" - - def __init__( - self, - workflow_name: str, - trace_id: str | None, - group_id: str | None, - metadata: dict[str, Any] | None, - disabled: bool, - ): - self.trace: Trace | None = None - self.workflow_name = workflow_name - self.trace_id = trace_id - self.group_id = group_id - self.metadata = metadata - self.disabled = disabled - - def __enter__(self) -> TraceCtxManager: - current_trace = get_current_trace() - if not current_trace: - self.trace = trace( - workflow_name=self.workflow_name, - trace_id=self.trace_id, - group_id=self.group_id, - metadata=self.metadata, - disabled=self.disabled, - ) - self.trace.start(mark_as_current=True) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.trace: - self.trace.finish(reset_current=True) - - -class ComputerAction: - @classmethod - async def execute( - cls, - *, - agent: Agent[TContext], - action: ToolRunComputerAction, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> RunItem: - output_func = ( - cls._get_screenshot_async(action.computer_tool.computer, action.tool_call) - if isinstance(action.computer_tool.computer, AsyncComputer) - else cls._get_screenshot_sync(action.computer_tool.computer, action.tool_call) - ) - - _, _, output = await asyncio.gather( - hooks.on_tool_start(context_wrapper, agent, action.computer_tool), - ( - agent.hooks.on_tool_start(context_wrapper, agent, action.computer_tool) - if agent.hooks - else _coro.noop_coroutine() - ), - output_func, - ) - - await asyncio.gather( - hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output), - ( - agent.hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - # TODO: don't send a screenshot every single time, use references - image_url = f"data:image/png;base64,{output}" - return ToolCallOutputItem( - agent=agent, - output=image_url, - raw_item=ComputerCallOutput( - call_id=action.tool_call.call_id, - output={ - "type": "computer_screenshot", - "image_url": image_url, - }, - type="computer_call_output", - ), - ) - - @classmethod - async def _get_screenshot_sync( - cls, - computer: Computer, - tool_call: ResponseComputerToolCall, - ) -> str: - action = tool_call.action - if isinstance(action, ActionClick): - computer.click(action.x, action.y, action.button) - elif isinstance(action, ActionDoubleClick): - computer.double_click(action.x, action.y) - elif isinstance(action, ActionDrag): - computer.drag([(p.x, p.y) for p in action.path]) - elif isinstance(action, ActionKeypress): - computer.keypress(action.keys) - elif isinstance(action, ActionMove): - computer.move(action.x, action.y) - elif isinstance(action, ActionScreenshot): - computer.screenshot() - elif isinstance(action, ActionScroll): - computer.scroll(action.x, action.y, action.scroll_x, action.scroll_y) - elif isinstance(action, ActionType): - computer.type(action.text) - elif isinstance(action, ActionWait): - computer.wait() - - return computer.screenshot() - - @classmethod - async def _get_screenshot_async( - cls, - computer: AsyncComputer, - tool_call: ResponseComputerToolCall, - ) -> str: - action = tool_call.action - if isinstance(action, ActionClick): - await computer.click(action.x, action.y, action.button) - elif isinstance(action, ActionDoubleClick): - await computer.double_click(action.x, action.y) - elif isinstance(action, ActionDrag): - await computer.drag([(p.x, p.y) for p in action.path]) - elif isinstance(action, ActionKeypress): - await computer.keypress(action.keys) - elif isinstance(action, ActionMove): - await computer.move(action.x, action.y) - elif isinstance(action, ActionScreenshot): - await computer.screenshot() - elif isinstance(action, ActionScroll): - await computer.scroll(action.x, action.y, action.scroll_x, action.scroll_y) - elif isinstance(action, ActionType): - await computer.type(action.text) - elif isinstance(action, ActionWait): - await computer.wait() - - return await computer.screenshot() diff --git a/src/agents/agent.py b/src/agents/agent.py deleted file mode 100644 index a24456b0..00000000 --- a/src/agents/agent.py +++ /dev/null @@ -1,245 +0,0 @@ -from __future__ import annotations - -import dataclasses -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast - -from typing_extensions import NotRequired, TypeAlias, TypedDict - -from .guardrail import InputGuardrail, OutputGuardrail -from .handoffs import Handoff -from .items import ItemHelpers -from .logger import logger -from .mcp import MCPUtil -from .model_settings import ModelSettings -from .models.interface import Model -from .run_context import RunContextWrapper, TContext -from .tool import FunctionToolResult, Tool, function_tool -from .util import _transforms -from .util._types import MaybeAwaitable - -if TYPE_CHECKING: - from .lifecycle import AgentHooks - from .mcp import MCPServer - from .result import RunResult - - -@dataclass -class ToolsToFinalOutputResult: - is_final_output: bool - """Whether this is the final output. If False, the LLM will run again and receive the tool call - output. - """ - - final_output: Any | None = None - """The final output. Can be None if `is_final_output` is False, otherwise must match the - `output_type` of the agent. - """ - - -ToolsToFinalOutputFunction: TypeAlias = Callable[ - [RunContextWrapper[TContext], list[FunctionToolResult]], - MaybeAwaitable[ToolsToFinalOutputResult], -] -"""A function that takes a run context and a list of tool results, and returns a -`ToolsToFinalOutputResult`. -""" - - -class StopAtTools(TypedDict): - stop_at_tool_names: list[str] - """A list of tool names, any of which will stop the agent from running further.""" - - -class MCPConfig(TypedDict): - """Configuration for MCP servers.""" - - convert_schemas_to_strict: NotRequired[bool] - """If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a - best-effort conversion, so some schemas may not be convertible. Defaults to False. - """ - - -@dataclass -class Agent(Generic[TContext]): - """An agent is an AI model configured with instructions, tools, guardrails, handoffs and more. - - We strongly recommend passing `instructions`, which is the "system prompt" for the agent. In - addition, you can pass `handoff_description`, which is a human-readable description of the - agent, used when the agent is used inside tools/handoffs. - - Agents are generic on the context type. The context is a (mutable) object you create. It is - passed to tool functions, handoffs, guardrails, etc. - """ - - name: str - """The name of the agent.""" - - instructions: ( - str - | Callable[ - [RunContextWrapper[TContext], Agent[TContext]], - MaybeAwaitable[str], - ] - | None - ) = None - """The instructions for the agent. Will be used as the "system prompt" when this agent is - invoked. Describes what the agent should do, and how it responds. - - Can either be a string, or a function that dynamically generates instructions for the agent. If - you provide a function, it will be called with the context and the agent instance. It must - return a string. - """ - - handoff_description: str | None = None - """A description of the agent. This is used when the agent is used as a handoff, so that an - LLM knows what it does and when to invoke it. - """ - - handoffs: list[Agent[Any] | Handoff[TContext]] = field(default_factory=list) - """Handoffs are sub-agents that the agent can delegate to. You can provide a list of handoffs, - and the agent can choose to delegate to them if relevant. Allows for separation of concerns and - modularity. - """ - - model: str | Model | None = None - """The model implementation to use when invoking the LLM. - - By default, if not set, the agent will use the default model configured in - `openai_provider.DEFAULT_MODEL` (currently "gpt-4o"). - """ - - model_settings: ModelSettings = field(default_factory=ModelSettings) - """Configures model-specific tuning parameters (e.g. temperature, top_p). - """ - - tools: list[Tool] = field(default_factory=list) - """A list of tools that the agent can use.""" - - mcp_servers: list[MCPServer] = field(default_factory=list) - """A list of [Model Context Protocol](https://modelcontextprotocol.io/) servers that - the agent can use. Every time the agent runs, it will include tools from these servers in the - list of available tools. - - NOTE: You are expected to manage the lifecycle of these servers. Specifically, you must call - `server.connect()` before passing it to the agent, and `server.cleanup()` when the server is no - longer needed. - """ - - mcp_config: MCPConfig = field(default_factory=lambda: MCPConfig()) - """Configuration for MCP servers.""" - - input_guardrails: list[InputGuardrail[TContext]] = field(default_factory=list) - """A list of checks that run in parallel to the agent's execution, before generating a - response. Runs only if the agent is the first agent in the chain. - """ - - output_guardrails: list[OutputGuardrail[TContext]] = field(default_factory=list) - """A list of checks that run on the final output of the agent, after generating a response. - Runs only if the agent produces a final output. - """ - - output_type: type[Any] | None = None - """The type of the output object. If not provided, the output will be `str`.""" - - hooks: AgentHooks[TContext] | None = None - """A class that receives callbacks on various lifecycle events for this agent. - """ - - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again" - """This lets you configure how tool use is handled. - - "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results - and gets to respond. - - "stop_on_first_tool": The output of the first tool call is used as the final output. This - means that the LLM does not process the result of the tool call. - - A list of tool names: The agent will stop running if any of the tools in the list are called. - The final output will be the output of the first matching tool call. The LLM does not - process the result of the tool call. - - A function: If you pass a function, it will be called with the run context and the list of - tool results. It must return a `ToolToFinalOutputResult`, which determines whether the tool - calls result in a final output. - - NOTE: This configuration is specific to FunctionTools. Hosted tools, such as file search, - web search, etc are always processed by the LLM. - """ - - reset_tool_choice: bool = True - """Whether to reset the tool choice to the default value after a tool has been called. Defaults - to True. This ensures that the agent doesn't enter an infinite loop of tool usage.""" - - def clone(self, **kwargs: Any) -> Agent[TContext]: - """Make a copy of the agent, with the given arguments changed. For example, you could do: - ``` - new_agent = agent.clone(instructions="New instructions") - ``` - """ - return dataclasses.replace(self, **kwargs) - - def as_tool( - self, - tool_name: str | None, - tool_description: str | None, - custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None, - ) -> Tool: - """Transform this agent into a tool, callable by other agents. - - This is different from handoffs in two ways: - 1. In handoffs, the new agent receives the conversation history. In this tool, the new agent - receives generated input. - 2. In handoffs, the new agent takes over the conversation. In this tool, the new agent is - called as a tool, and the conversation is continued by the original agent. - - Args: - tool_name: The name of the tool. If not provided, the agent's name will be used. - tool_description: The description of the tool, which should indicate what it does and - when to use it. - custom_output_extractor: A function that extracts the output from the agent. If not - provided, the last message from the agent will be used. - """ - - @function_tool( - name_override=tool_name or _transforms.transform_string_function_style(self.name), - description_override=tool_description or "", - ) - async def run_agent(context: RunContextWrapper, input: str) -> str: - from .run import Runner - - output = await Runner.run( - starting_agent=self, - input=input, - context=context.context, - ) - if custom_output_extractor: - return await custom_output_extractor(output) - - return ItemHelpers.text_message_outputs(output.new_items) - - return run_agent - - async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> str | None: - """Get the system prompt for the agent.""" - if isinstance(self.instructions, str): - return self.instructions - elif callable(self.instructions): - if inspect.iscoroutinefunction(self.instructions): - return await cast(Awaitable[str], self.instructions(run_context, self)) - else: - return cast(str, self.instructions(run_context, self)) - elif self.instructions is not None: - logger.error(f"Instructions must be a string or a function, got {self.instructions}") - - return None - - async def get_mcp_tools(self) -> list[Tool]: - """Fetches the available tools from the MCP servers.""" - convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False) - return await MCPUtil.get_all_function_tools(self.mcp_servers, convert_schemas_to_strict) - - async def get_all_tools(self) -> list[Tool]: - """All agent tools, including MCP tools and function tools.""" - mcp_tools = await self.get_mcp_tools() - return mcp_tools + self.tools diff --git a/src/agents/computer.py b/src/agents/computer.py deleted file mode 100644 index 1b9224d5..00000000 --- a/src/agents/computer.py +++ /dev/null @@ -1,107 +0,0 @@ -import abc -from typing import Literal - -Environment = Literal["mac", "windows", "ubuntu", "browser"] -Button = Literal["left", "right", "wheel", "back", "forward"] - - -class Computer(abc.ABC): - """A computer implemented with sync operations. The Computer interface abstracts the - operations needed to control a computer or browser.""" - - @property - @abc.abstractmethod - def environment(self) -> Environment: - pass - - @property - @abc.abstractmethod - def dimensions(self) -> tuple[int, int]: - pass - - @abc.abstractmethod - def screenshot(self) -> str: - pass - - @abc.abstractmethod - def click(self, x: int, y: int, button: Button) -> None: - pass - - @abc.abstractmethod - def double_click(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None: - pass - - @abc.abstractmethod - def type(self, text: str) -> None: - pass - - @abc.abstractmethod - def wait(self) -> None: - pass - - @abc.abstractmethod - def move(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - def keypress(self, keys: list[str]) -> None: - pass - - @abc.abstractmethod - def drag(self, path: list[tuple[int, int]]) -> None: - pass - - -class AsyncComputer(abc.ABC): - """A computer implemented with async operations. The Computer interface abstracts the - operations needed to control a computer or browser.""" - - @property - @abc.abstractmethod - def environment(self) -> Environment: - pass - - @property - @abc.abstractmethod - def dimensions(self) -> tuple[int, int]: - pass - - @abc.abstractmethod - async def screenshot(self) -> str: - pass - - @abc.abstractmethod - async def click(self, x: int, y: int, button: Button) -> None: - pass - - @abc.abstractmethod - async def double_click(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None: - pass - - @abc.abstractmethod - async def type(self, text: str) -> None: - pass - - @abc.abstractmethod - async def wait(self) -> None: - pass - - @abc.abstractmethod - async def move(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - async def keypress(self, keys: list[str]) -> None: - pass - - @abc.abstractmethod - async def drag(self, path: list[tuple[int, int]]) -> None: - pass diff --git a/src/agents/exceptions.py b/src/agents/exceptions.py deleted file mode 100644 index 78898f01..00000000 --- a/src/agents/exceptions.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from .guardrail import InputGuardrailResult, OutputGuardrailResult - - -class AgentsException(Exception): - """Base class for all exceptions in the Agents SDK.""" - - -class MaxTurnsExceeded(AgentsException): - """Exception raised when the maximum number of turns is exceeded.""" - - message: str - - def __init__(self, message: str): - self.message = message - - -class ModelBehaviorError(AgentsException): - """Exception raised when the model does something unexpected, e.g. calling a tool that doesn't - exist, or providing malformed JSON. - """ - - message: str - - def __init__(self, message: str): - self.message = message - - -class UserError(AgentsException): - """Exception raised when the user makes an error using the SDK.""" - - message: str - - def __init__(self, message: str): - self.message = message - - -class InputGuardrailTripwireTriggered(AgentsException): - """Exception raised when a guardrail tripwire is triggered.""" - - guardrail_result: "InputGuardrailResult" - """The result data of the guardrail that was triggered.""" - - def __init__(self, guardrail_result: "InputGuardrailResult"): - self.guardrail_result = guardrail_result - super().__init__( - f"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire" - ) - - -class OutputGuardrailTripwireTriggered(AgentsException): - """Exception raised when a guardrail tripwire is triggered.""" - - guardrail_result: "OutputGuardrailResult" - """The result data of the guardrail that was triggered.""" - - def __init__(self, guardrail_result: "OutputGuardrailResult"): - self.guardrail_result = guardrail_result - super().__init__( - f"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire" - ) diff --git a/src/agents/extensions/__init__.py b/src/agents/extensions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agents/extensions/handoff_filters.py b/src/agents/extensions/handoff_filters.py deleted file mode 100644 index f4f9b8bf..00000000 --- a/src/agents/extensions/handoff_filters.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations - -from ..handoffs import HandoffInputData -from ..items import ( - HandoffCallItem, - HandoffOutputItem, - RunItem, - ToolCallItem, - ToolCallOutputItem, - TResponseInputItem, -) - -"""Contains common handoff input filters, for convenience. """ - - -def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData: - """Filters out all tool items: file search, web search and function calls+output.""" - - history = handoff_input_data.input_history - new_items = handoff_input_data.new_items - - filtered_history = ( - _remove_tool_types_from_input(history) if isinstance(history, tuple) else history - ) - filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items) - filtered_new_items = _remove_tools_from_items(new_items) - - return HandoffInputData( - input_history=filtered_history, - pre_handoff_items=filtered_pre_handoff_items, - new_items=filtered_new_items, - ) - - -def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunItem, ...]: - filtered_items = [] - for item in items: - if ( - isinstance(item, HandoffCallItem) - or isinstance(item, HandoffOutputItem) - or isinstance(item, ToolCallItem) - or isinstance(item, ToolCallOutputItem) - ): - continue - filtered_items.append(item) - return tuple(filtered_items) - - -def _remove_tool_types_from_input( - items: tuple[TResponseInputItem, ...], -) -> tuple[TResponseInputItem, ...]: - tool_types = [ - "function_call", - "function_call_output", - "computer_call", - "computer_call_output", - "file_search_call", - "web_search_call", - ] - - filtered_items: list[TResponseInputItem] = [] - for item in items: - itype = item.get("type") - if itype in tool_types: - continue - filtered_items.append(item) - return tuple(filtered_items) diff --git a/src/agents/extensions/handoff_prompt.py b/src/agents/extensions/handoff_prompt.py deleted file mode 100644 index cfb5ca7e..00000000 --- a/src/agents/extensions/handoff_prompt.py +++ /dev/null @@ -1,19 +0,0 @@ -# A recommended prompt prefix for agents that use handoffs. We recommend including this or -# similar instructions in any agents that use handoffs. -RECOMMENDED_PROMPT_PREFIX = ( - "# System context\n" - "You are part of a multi-agent system called the Agents SDK, designed to make agent " - "coordination and execution easy. Agents uses two primary abstraction: **Agents** and " - "**Handoffs**. An agent encompasses instructions and tools and can hand off a " - "conversation to another agent when appropriate. " - "Handoffs are achieved by calling a handoff function, generally named " - "`transfer_to_`. Transfers between agents are handled seamlessly in the background;" - " do not mention or draw attention to these transfers in your conversation with the user.\n" -) - - -def prompt_with_handoff_instructions(prompt: str) -> str: - """ - Add recommended instructions to the prompt for agents that use handoffs. - """ - return f"{RECOMMENDED_PROMPT_PREFIX}\n\n{prompt}" diff --git a/src/agents/extensions/visualization.py b/src/agents/extensions/visualization.py deleted file mode 100644 index 5fb35062..00000000 --- a/src/agents/extensions/visualization.py +++ /dev/null @@ -1,137 +0,0 @@ -from typing import Optional - -import graphviz # type: ignore - -from agents import Agent -from agents.handoffs import Handoff -from agents.tool import Tool - - -def get_main_graph(agent: Agent) -> str: - """ - Generates the main graph structure in DOT format for the given agent. - - Args: - agent (Agent): The agent for which the graph is to be generated. - - Returns: - str: The DOT format string representing the graph. - """ - parts = [ - """ - digraph G { - graph [splines=true]; - node [fontname="Arial"]; - edge [penwidth=1.5]; - """ - ] - parts.append(get_all_nodes(agent)) - parts.append(get_all_edges(agent)) - parts.append("}") - return "".join(parts) - - -def get_all_nodes(agent: Agent, parent: Optional[Agent] = None) -> str: - """ - Recursively generates the nodes for the given agent and its handoffs in DOT format. - - Args: - agent (Agent): The agent for which the nodes are to be generated. - - Returns: - str: The DOT format string representing the nodes. - """ - parts = [] - - # Start and end the graph - parts.append( - '"__start__" [label="__start__", shape=ellipse, style=filled, ' - "fillcolor=lightblue, width=0.5, height=0.3];" - '"__end__" [label="__end__", shape=ellipse, style=filled, ' - "fillcolor=lightblue, width=0.5, height=0.3];" - ) - # Ensure parent agent node is colored - if not parent: - parts.append( - f'"{agent.name}" [label="{agent.name}", shape=box, style=filled, ' - "fillcolor=lightyellow, width=1.5, height=0.8];" - ) - - for tool in agent.tools: - parts.append( - f'"{tool.name}" [label="{tool.name}", shape=ellipse, style=filled, ' - f"fillcolor=lightgreen, width=0.5, height=0.3];" - ) - - for handoff in agent.handoffs: - if isinstance(handoff, Handoff): - parts.append( - f'"{handoff.agent_name}" [label="{handoff.agent_name}", ' - f"shape=box, style=filled, style=rounded, " - f"fillcolor=lightyellow, width=1.5, height=0.8];" - ) - if isinstance(handoff, Agent): - parts.append( - f'"{handoff.name}" [label="{handoff.name}", ' - f"shape=box, style=filled, style=rounded, " - f"fillcolor=lightyellow, width=1.5, height=0.8];" - ) - parts.append(get_all_nodes(handoff)) - - return "".join(parts) - - -def get_all_edges(agent: Agent, parent: Optional[Agent] = None) -> str: - """ - Recursively generates the edges for the given agent and its handoffs in DOT format. - - Args: - agent (Agent): The agent for which the edges are to be generated. - parent (Agent, optional): The parent agent. Defaults to None. - - Returns: - str: The DOT format string representing the edges. - """ - parts = [] - - if not parent: - parts.append(f'"__start__" -> "{agent.name}";') - - for tool in agent.tools: - parts.append(f""" - "{agent.name}" -> "{tool.name}" [style=dotted, penwidth=1.5]; - "{tool.name}" -> "{agent.name}" [style=dotted, penwidth=1.5];""") - - for handoff in agent.handoffs: - if isinstance(handoff, Handoff): - parts.append(f""" - "{agent.name}" -> "{handoff.agent_name}";""") - if isinstance(handoff, Agent): - parts.append(f""" - "{agent.name}" -> "{handoff.name}";""") - parts.append(get_all_edges(handoff, agent)) - - if not agent.handoffs and not isinstance(agent, Tool): # type: ignore - parts.append(f'"{agent.name}" -> "__end__";') - - return "".join(parts) - - -def draw_graph(agent: Agent, filename: Optional[str] = None) -> graphviz.Source: - """ - Draws the graph for the given agent and optionally saves it as a PNG file. - - Args: - agent (Agent): The agent for which the graph is to be drawn. - filename (str): The name of the file to save the graph as a PNG. - - Returns: - graphviz.Source: The graphviz Source object representing the graph. - """ - dot_code = get_main_graph(agent) - graph = graphviz.Source(dot_code) - - if filename: - graph.render(filename, format="png") - - return graph diff --git a/src/agents/function_schema.py b/src/agents/function_schema.py deleted file mode 100644 index 681affce..00000000 --- a/src/agents/function_schema.py +++ /dev/null @@ -1,344 +0,0 @@ -from __future__ import annotations - -import contextlib -import inspect -import logging -import re -from dataclasses import dataclass -from typing import Any, Callable, Literal, get_args, get_origin, get_type_hints - -from griffe import Docstring, DocstringSectionKind -from pydantic import BaseModel, Field, create_model - -from .exceptions import UserError -from .run_context import RunContextWrapper -from .strict_schema import ensure_strict_json_schema - - -@dataclass -class FuncSchema: - """ - Captures the schema for a python function, in preparation for sending it to an LLM as a tool. - """ - - name: str - """The name of the function.""" - description: str | None - """The description of the function.""" - params_pydantic_model: type[BaseModel] - """A Pydantic model that represents the function's parameters.""" - params_json_schema: dict[str, Any] - """The JSON schema for the function's parameters, derived from the Pydantic model.""" - signature: inspect.Signature - """The signature of the function.""" - takes_context: bool = False - """Whether the function takes a RunContextWrapper argument (must be the first argument).""" - strict_json_schema: bool = True - """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True, - as it increases the likelihood of correct JSON input.""" - - def to_call_args(self, data: BaseModel) -> tuple[list[Any], dict[str, Any]]: - """ - Converts validated data from the Pydantic model into (args, kwargs), suitable for calling - the original function. - """ - positional_args: list[Any] = [] - keyword_args: dict[str, Any] = {} - seen_var_positional = False - - # Use enumerate() so we can skip the first parameter if it's context. - for idx, (name, param) in enumerate(self.signature.parameters.items()): - # If the function takes a RunContextWrapper and this is the first parameter, skip it. - if self.takes_context and idx == 0: - continue - - value = getattr(data, name, None) - if param.kind == param.VAR_POSITIONAL: - # e.g. *args: extend positional args and mark that *args is now seen - positional_args.extend(value or []) - seen_var_positional = True - elif param.kind == param.VAR_KEYWORD: - # e.g. **kwargs handling - keyword_args.update(value or {}) - elif param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD): - # Before *args, add to positional args. After *args, add to keyword args. - if not seen_var_positional: - positional_args.append(value) - else: - keyword_args[name] = value - else: - # For KEYWORD_ONLY parameters, always use keyword args. - keyword_args[name] = value - return positional_args, keyword_args - - -@dataclass -class FuncDocumentation: - """Contains metadata about a python function, extracted from its docstring.""" - - name: str - """The name of the function, via `__name__`.""" - description: str | None - """The description of the function, derived from the docstring.""" - param_descriptions: dict[str, str] | None - """The parameter descriptions of the function, derived from the docstring.""" - - -DocstringStyle = Literal["google", "numpy", "sphinx"] - - -# As of Feb 2025, the automatic style detection in griffe is an Insiders feature. This -# code approximates it. -def _detect_docstring_style(doc: str) -> DocstringStyle: - scores: dict[DocstringStyle, int] = {"sphinx": 0, "numpy": 0, "google": 0} - - # Sphinx style detection: look for :param, :type, :return:, and :rtype: - sphinx_patterns = [r"^:param\s", r"^:type\s", r"^:return:", r"^:rtype:"] - for pattern in sphinx_patterns: - if re.search(pattern, doc, re.MULTILINE): - scores["sphinx"] += 1 - - # Numpy style detection: look for headers like 'Parameters', 'Returns', or 'Yields' followed by - # a dashed underline - numpy_patterns = [ - r"^Parameters\s*\n\s*-{3,}", - r"^Returns\s*\n\s*-{3,}", - r"^Yields\s*\n\s*-{3,}", - ] - for pattern in numpy_patterns: - if re.search(pattern, doc, re.MULTILINE): - scores["numpy"] += 1 - - # Google style detection: look for section headers with a trailing colon - google_patterns = [r"^(Args|Arguments):", r"^(Returns):", r"^(Raises):"] - for pattern in google_patterns: - if re.search(pattern, doc, re.MULTILINE): - scores["google"] += 1 - - max_score = max(scores.values()) - if max_score == 0: - return "google" - - # Priority order: sphinx > numpy > google in case of tie - styles: list[DocstringStyle] = ["sphinx", "numpy", "google"] - - for style in styles: - if scores[style] == max_score: - return style - - return "google" - - -@contextlib.contextmanager -def _suppress_griffe_logging(): - # Supresses warnings about missing annotations for params - logger = logging.getLogger("griffe") - previous_level = logger.getEffectiveLevel() - logger.setLevel(logging.ERROR) - try: - yield - finally: - logger.setLevel(previous_level) - - -def generate_func_documentation( - func: Callable[..., Any], style: DocstringStyle | None = None -) -> FuncDocumentation: - """ - Extracts metadata from a function docstring, in preparation for sending it to an LLM as a tool. - - Args: - func: The function to extract documentation from. - style: The style of the docstring to use for parsing. If not provided, we will attempt to - auto-detect the style. - - Returns: - A FuncDocumentation object containing the function's name, description, and parameter - descriptions. - """ - name = func.__name__ - doc = inspect.getdoc(func) - if not doc: - return FuncDocumentation(name=name, description=None, param_descriptions=None) - - with _suppress_griffe_logging(): - docstring = Docstring(doc, lineno=1, parser=style or _detect_docstring_style(doc)) - parsed = docstring.parse() - - description: str | None = next( - (section.value for section in parsed if section.kind == DocstringSectionKind.text), None - ) - - param_descriptions: dict[str, str] = { - param.name: param.description - for section in parsed - if section.kind == DocstringSectionKind.parameters - for param in section.value - } - - return FuncDocumentation( - name=func.__name__, - description=description, - param_descriptions=param_descriptions or None, - ) - - -def function_schema( - func: Callable[..., Any], - docstring_style: DocstringStyle | None = None, - name_override: str | None = None, - description_override: str | None = None, - use_docstring_info: bool = True, - strict_json_schema: bool = True, -) -> FuncSchema: - """ - Given a python function, extracts a `FuncSchema` from it, capturing the name, description, - parameter descriptions, and other metadata. - - Args: - func: The function to extract the schema from. - docstring_style: The style of the docstring to use for parsing. If not provided, we will - attempt to auto-detect the style. - name_override: If provided, use this name instead of the function's `__name__`. - description_override: If provided, use this description instead of the one derived from the - docstring. - use_docstring_info: If True, uses the docstring to generate the description and parameter - descriptions. - strict_json_schema: Whether the JSON schema is in strict mode. If True, we'll ensure that - the schema adheres to the "strict" standard the OpenAI API expects. We **strongly** - recommend setting this to True, as it increases the likelihood of the LLM providing - correct JSON input. - - Returns: - A `FuncSchema` object containing the function's name, description, parameter descriptions, - and other metadata. - """ - - # 1. Grab docstring info - if use_docstring_info: - doc_info = generate_func_documentation(func, docstring_style) - param_descs = doc_info.param_descriptions or {} - else: - doc_info = None - param_descs = {} - - func_name = name_override or doc_info.name if doc_info else func.__name__ - - # 2. Inspect function signature and get type hints - sig = inspect.signature(func) - type_hints = get_type_hints(func) - params = list(sig.parameters.items()) - takes_context = False - filtered_params = [] - - if params: - first_name, first_param = params[0] - # Prefer the evaluated type hint if available - ann = type_hints.get(first_name, first_param.annotation) - if ann != inspect._empty: - origin = get_origin(ann) or ann - if origin is RunContextWrapper: - takes_context = True # Mark that the function takes context - else: - filtered_params.append((first_name, first_param)) - else: - filtered_params.append((first_name, first_param)) - - # For parameters other than the first, raise error if any use RunContextWrapper. - for name, param in params[1:]: - ann = type_hints.get(name, param.annotation) - if ann != inspect._empty: - origin = get_origin(ann) or ann - if origin is RunContextWrapper: - raise UserError( - f"RunContextWrapper param found at non-first position in function" - f" {func.__name__}" - ) - filtered_params.append((name, param)) - - # We will collect field definitions for create_model as a dict: - # field_name -> (type_annotation, default_value_or_Field(...)) - fields: dict[str, Any] = {} - - for name, param in filtered_params: - ann = type_hints.get(name, param.annotation) - default = param.default - - # If there's no type hint, assume `Any` - if ann == inspect._empty: - ann = Any - - # If a docstring param description exists, use it - field_description = param_descs.get(name, None) - - # Handle different parameter kinds - if param.kind == param.VAR_POSITIONAL: - # e.g. *args: extend positional args - if get_origin(ann) is tuple: - # e.g. def foo(*args: tuple[int, ...]) -> treat as List[int] - args_of_tuple = get_args(ann) - if len(args_of_tuple) == 2 and args_of_tuple[1] is Ellipsis: - ann = list[args_of_tuple[0]] # type: ignore - else: - ann = list[Any] - else: - # If user wrote *args: int, treat as List[int] - ann = list[ann] # type: ignore - - # Default factory to empty list - fields[name] = ( - ann, - Field(default_factory=list, description=field_description), # type: ignore - ) - - elif param.kind == param.VAR_KEYWORD: - # **kwargs handling - if get_origin(ann) is dict: - # e.g. def foo(**kwargs: dict[str, int]) - dict_args = get_args(ann) - if len(dict_args) == 2: - ann = dict[dict_args[0], dict_args[1]] # type: ignore - else: - ann = dict[str, Any] - else: - # e.g. def foo(**kwargs: int) -> Dict[str, int] - ann = dict[str, ann] # type: ignore - - fields[name] = ( - ann, - Field(default_factory=dict, description=field_description), # type: ignore - ) - - else: - # Normal parameter - if default == inspect._empty: - # Required field - fields[name] = ( - ann, - Field(..., description=field_description), - ) - else: - # Parameter with a default value - fields[name] = ( - ann, - Field(default=default, description=field_description), - ) - - # 3. Dynamically build a Pydantic model - dynamic_model = create_model(f"{func_name}_args", __base__=BaseModel, **fields) - - # 4. Build JSON schema from that model - json_schema = dynamic_model.model_json_schema() - if strict_json_schema: - json_schema = ensure_strict_json_schema(json_schema) - - # 5. Return as a FuncSchema dataclass - return FuncSchema( - name=func_name, - description=description_override or doc_info.description if doc_info else None, - params_pydantic_model=dynamic_model, - params_json_schema=json_schema, - signature=sig, - takes_context=takes_context, - strict_json_schema=strict_json_schema, - ) diff --git a/src/agents/guardrail.py b/src/agents/guardrail.py deleted file mode 100644 index a96f0f7d..00000000 --- a/src/agents/guardrail.py +++ /dev/null @@ -1,320 +0,0 @@ -from __future__ import annotations - -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Generic, Union, overload - -from typing_extensions import TypeVar - -from .exceptions import UserError -from .items import TResponseInputItem -from .run_context import RunContextWrapper, TContext -from .util._types import MaybeAwaitable - -if TYPE_CHECKING: - from .agent import Agent - - -@dataclass -class GuardrailFunctionOutput: - """The output of a guardrail function.""" - - output_info: Any - """ - Optional information about the guardrail's output. For example, the guardrail could include - information about the checks it performed and granular results. - """ - - tripwire_triggered: bool - """ - Whether the tripwire was triggered. If triggered, the agent's execution will be halted. - """ - - -@dataclass -class InputGuardrailResult: - """The result of a guardrail run.""" - - guardrail: InputGuardrail[Any] - """ - The guardrail that was run. - """ - - output: GuardrailFunctionOutput - """The output of the guardrail function.""" - - -@dataclass -class OutputGuardrailResult: - """The result of a guardrail run.""" - - guardrail: OutputGuardrail[Any] - """ - The guardrail that was run. - """ - - agent_output: Any - """ - The output of the agent that was checked by the guardrail. - """ - - agent: Agent[Any] - """ - The agent that was checked by the guardrail. - """ - - output: GuardrailFunctionOutput - """The output of the guardrail function.""" - - -@dataclass -class InputGuardrail(Generic[TContext]): - """Input guardrails are checks that run in parallel to the agent's execution. - They can be used to do things like: - - Check if input messages are off-topic - - Take over control of the agent's execution if an unexpected input is detected - - You can use the `@input_guardrail()` decorator to turn a function into an `InputGuardrail`, or - create an `InputGuardrail` manually. - - Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, the agent - execution will immediately stop and a `InputGuardrailTripwireTriggered` exception will be raised - """ - - guardrail_function: Callable[ - [RunContextWrapper[TContext], Agent[Any], str | list[TResponseInputItem]], - MaybeAwaitable[GuardrailFunctionOutput], - ] - """A function that receives the agent input and the context, and returns a - `GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally - include information about the guardrail's output. - """ - - name: str | None = None - """The name of the guardrail, used for tracing. If not provided, we'll use the guardrail - function's name. - """ - - def get_name(self) -> str: - if self.name: - return self.name - - return self.guardrail_function.__name__ - - async def run( - self, - agent: Agent[Any], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - ) -> InputGuardrailResult: - if not callable(self.guardrail_function): - raise UserError(f"Guardrail function must be callable, got {self.guardrail_function}") - - output = self.guardrail_function(context, agent, input) - if inspect.isawaitable(output): - return InputGuardrailResult( - guardrail=self, - output=await output, - ) - - return InputGuardrailResult( - guardrail=self, - output=output, - ) - - -@dataclass -class OutputGuardrail(Generic[TContext]): - """Output guardrails are checks that run on the final output of an agent. - They can be used to do check if the output passes certain validation criteria - - You can use the `@output_guardrail()` decorator to turn a function into an `OutputGuardrail`, - or create an `OutputGuardrail` manually. - - Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, a - `OutputGuardrailTripwireTriggered` exception will be raised. - """ - - guardrail_function: Callable[ - [RunContextWrapper[TContext], Agent[Any], Any], - MaybeAwaitable[GuardrailFunctionOutput], - ] - """A function that receives the final agent, its output, and the context, and returns a - `GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally - include information about the guardrail's output. - """ - - name: str | None = None - """The name of the guardrail, used for tracing. If not provided, we'll use the guardrail - function's name. - """ - - def get_name(self) -> str: - if self.name: - return self.name - - return self.guardrail_function.__name__ - - async def run( - self, context: RunContextWrapper[TContext], agent: Agent[Any], agent_output: Any - ) -> OutputGuardrailResult: - if not callable(self.guardrail_function): - raise UserError(f"Guardrail function must be callable, got {self.guardrail_function}") - - output = self.guardrail_function(context, agent, agent_output) - if inspect.isawaitable(output): - return OutputGuardrailResult( - guardrail=self, - agent=agent, - agent_output=agent_output, - output=await output, - ) - - return OutputGuardrailResult( - guardrail=self, - agent=agent, - agent_output=agent_output, - output=output, - ) - - -TContext_co = TypeVar("TContext_co", bound=Any, covariant=True) - -# For InputGuardrail -_InputGuardrailFuncSync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Union[str, list[TResponseInputItem]]], - GuardrailFunctionOutput, -] -_InputGuardrailFuncAsync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Union[str, list[TResponseInputItem]]], - Awaitable[GuardrailFunctionOutput], -] - - -@overload -def input_guardrail( - func: _InputGuardrailFuncSync[TContext_co], -) -> InputGuardrail[TContext_co]: ... - - -@overload -def input_guardrail( - func: _InputGuardrailFuncAsync[TContext_co], -) -> InputGuardrail[TContext_co]: ... - - -@overload -def input_guardrail( - *, - name: str | None = None, -) -> Callable[ - [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]], - InputGuardrail[TContext_co], -]: ... - - -def input_guardrail( - func: _InputGuardrailFuncSync[TContext_co] - | _InputGuardrailFuncAsync[TContext_co] - | None = None, - *, - name: str | None = None, -) -> ( - InputGuardrail[TContext_co] - | Callable[ - [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]], - InputGuardrail[TContext_co], - ] -): - """ - Decorator that transforms a sync or async function into an `InputGuardrail`. - It can be used directly (no parentheses) or with keyword args, e.g.: - - @input_guardrail - def my_sync_guardrail(...): ... - - @input_guardrail(name="guardrail_name") - async def my_async_guardrail(...): ... - """ - - def decorator( - f: _InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co], - ) -> InputGuardrail[TContext_co]: - return InputGuardrail(guardrail_function=f, name=name) - - if func is not None: - # Decorator was used without parentheses - return decorator(func) - - # Decorator used with keyword arguments - return decorator - - -_OutputGuardrailFuncSync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Any], - GuardrailFunctionOutput, -] -_OutputGuardrailFuncAsync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Any], - Awaitable[GuardrailFunctionOutput], -] - - -@overload -def output_guardrail( - func: _OutputGuardrailFuncSync[TContext_co], -) -> OutputGuardrail[TContext_co]: ... - - -@overload -def output_guardrail( - func: _OutputGuardrailFuncAsync[TContext_co], -) -> OutputGuardrail[TContext_co]: ... - - -@overload -def output_guardrail( - *, - name: str | None = None, -) -> Callable[ - [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]], - OutputGuardrail[TContext_co], -]: ... - - -def output_guardrail( - func: _OutputGuardrailFuncSync[TContext_co] - | _OutputGuardrailFuncAsync[TContext_co] - | None = None, - *, - name: str | None = None, -) -> ( - OutputGuardrail[TContext_co] - | Callable[ - [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]], - OutputGuardrail[TContext_co], - ] -): - """ - Decorator that transforms a sync or async function into an `OutputGuardrail`. - It can be used directly (no parentheses) or with keyword args, e.g.: - - @output_guardrail - def my_sync_guardrail(...): ... - - @output_guardrail(name="guardrail_name") - async def my_async_guardrail(...): ... - """ - - def decorator( - f: _OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co], - ) -> OutputGuardrail[TContext_co]: - return OutputGuardrail(guardrail_function=f, name=name) - - if func is not None: - # Decorator was used without parentheses - return decorator(func) - - # Decorator used with keyword arguments - return decorator diff --git a/src/agents/handoffs.py b/src/agents/handoffs.py deleted file mode 100644 index 686191f3..00000000 --- a/src/agents/handoffs.py +++ /dev/null @@ -1,236 +0,0 @@ -from __future__ import annotations - -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload - -from pydantic import TypeAdapter -from typing_extensions import TypeAlias, TypeVar - -from .exceptions import ModelBehaviorError, UserError -from .items import RunItem, TResponseInputItem -from .run_context import RunContextWrapper, TContext -from .strict_schema import ensure_strict_json_schema -from .tracing.spans import SpanError -from .util import _error_tracing, _json, _transforms - -if TYPE_CHECKING: - from .agent import Agent - - -# The handoff input type is the type of data passed when the agent is called via a handoff. -THandoffInput = TypeVar("THandoffInput", default=Any) - -OnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any] -OnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any] - - -@dataclass(frozen=True) -class HandoffInputData: - input_history: str | tuple[TResponseInputItem, ...] - """ - The input history before `Runner.run()` was called. - """ - - pre_handoff_items: tuple[RunItem, ...] - """ - The items generated before the agent turn where the handoff was invoked. - """ - - new_items: tuple[RunItem, ...] - """ - The new items generated during the current agent turn, including the item that triggered the - handoff and the tool output message representing the response from the handoff output. - """ - - -HandoffInputFilter: TypeAlias = Callable[[HandoffInputData], HandoffInputData] -"""A function that filters the input data passed to the next agent.""" - - -@dataclass -class Handoff(Generic[TContext]): - """A handoff is when an agent delegates a task to another agent. - For example, in a customer support scenario you might have a "triage agent" that determines - which agent should handle the user's request, and sub-agents that specialize in different - areas like billing, account management, etc. - """ - - tool_name: str - """The name of the tool that represents the handoff.""" - - tool_description: str - """The description of the tool that represents the handoff.""" - - input_json_schema: dict[str, Any] - """The JSON schema for the handoff input. Can be empty if the handoff does not take an input. - """ - - on_invoke_handoff: Callable[[RunContextWrapper[Any], str], Awaitable[Agent[TContext]]] - """The function that invokes the handoff. The parameters passed are: - 1. The handoff run context - 2. The arguments from the LLM, as a JSON string. Empty string if input_json_schema is empty. - - Must return an agent. - """ - - agent_name: str - """The name of the agent that is being handed off to.""" - - input_filter: HandoffInputFilter | None = None - """A function that filters the inputs that are passed to the next agent. By default, the new - agent sees the entire conversation history. In some cases, you may want to filter inputs e.g. - to remove older inputs, or remove tools from existing inputs. - - The function will receive the entire conversation history so far, including the input item - that triggered the handoff and a tool call output item representing the handoff tool's output. - - You are free to modify the input history or new items as you see fit. The next agent that - runs will receive `handoff_input_data.all_items`. - - IMPORTANT: in streaming mode, we will not stream anything as a result of this function. The - items generated before will already have been streamed. - """ - - strict_json_schema: bool = True - """Whether the input JSON schema is in strict mode. We **strongly** recommend setting this to - True, as it increases the likelihood of correct JSON input. - """ - - def get_transfer_message(self, agent: Agent[Any]) -> str: - base = f"{{'assistant': '{agent.name}'}}" - return base - - @classmethod - def default_tool_name(cls, agent: Agent[Any]) -> str: - return _transforms.transform_string_function_style(f"transfer_to_{agent.name}") - - @classmethod - def default_tool_description(cls, agent: Agent[Any]) -> str: - return ( - f"Handoff to the {agent.name} agent to handle the request. " - f"{agent.handoff_description or ''}" - ) - - -@overload -def handoff( - agent: Agent[TContext], - *, - tool_name_override: str | None = None, - tool_description_override: str | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: ... - - -@overload -def handoff( - agent: Agent[TContext], - *, - on_handoff: OnHandoffWithInput[THandoffInput], - input_type: type[THandoffInput], - tool_description_override: str | None = None, - tool_name_override: str | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: ... - - -@overload -def handoff( - agent: Agent[TContext], - *, - on_handoff: OnHandoffWithoutInput, - tool_description_override: str | None = None, - tool_name_override: str | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: ... - - -def handoff( - agent: Agent[TContext], - tool_name_override: str | None = None, - tool_description_override: str | None = None, - on_handoff: OnHandoffWithInput[THandoffInput] | OnHandoffWithoutInput | None = None, - input_type: type[THandoffInput] | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: - """Create a handoff from an agent. - - Args: - agent: The agent to handoff to, or a function that returns an agent. - tool_name_override: Optional override for the name of the tool that represents the handoff. - tool_description_override: Optional override for the description of the tool that - represents the handoff. - on_handoff: A function that runs when the handoff is invoked. - input_type: the type of the input to the handoff. If provided, the input will be validated - against this type. Only relevant if you pass a function that takes an input. - input_filter: a function that filters the inputs that are passed to the next agent. - """ - assert (on_handoff and input_type) or not (on_handoff and input_type), ( - "You must provide either both on_input and input_type, or neither" - ) - type_adapter: TypeAdapter[Any] | None - if input_type is not None: - assert callable(on_handoff), "on_handoff must be callable" - sig = inspect.signature(on_handoff) - if len(sig.parameters) != 2: - raise UserError("on_handoff must take two arguments: context and input") - - type_adapter = TypeAdapter(input_type) - input_json_schema = type_adapter.json_schema() - else: - type_adapter = None - input_json_schema = {} - if on_handoff is not None: - sig = inspect.signature(on_handoff) - if len(sig.parameters) != 1: - raise UserError("on_handoff must take one argument: context") - - async def _invoke_handoff( - ctx: RunContextWrapper[Any], input_json: str | None = None - ) -> Agent[Any]: - if input_type is not None and type_adapter is not None: - if input_json is None: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Handoff function expected non-null input, but got None", - data={"details": "input_json is None"}, - ) - ) - raise ModelBehaviorError("Handoff function expected non-null input, but got None") - - validated_input = _json.validate_json( - json_str=input_json, - type_adapter=type_adapter, - partial=False, - ) - input_func = cast(OnHandoffWithInput[THandoffInput], on_handoff) - if inspect.iscoroutinefunction(input_func): - await input_func(ctx, validated_input) - else: - input_func(ctx, validated_input) - elif on_handoff is not None: - no_input_func = cast(OnHandoffWithoutInput, on_handoff) - if inspect.iscoroutinefunction(no_input_func): - await no_input_func(ctx) - else: - no_input_func(ctx) - - return agent - - tool_name = tool_name_override or Handoff.default_tool_name(agent) - tool_description = tool_description_override or Handoff.default_tool_description(agent) - - # Always ensure the input JSON schema is in strict mode - # If there is a need, we can make this configurable in the future - input_json_schema = ensure_strict_json_schema(input_json_schema) - - return Handoff( - tool_name=tool_name, - tool_description=tool_description, - input_json_schema=input_json_schema, - on_invoke_handoff=_invoke_handoff, - input_filter=input_filter, - agent_name=agent.name, - ) diff --git a/src/agents/items.py b/src/agents/items.py deleted file mode 100644 index c2af0dfc..00000000 --- a/src/agents/items.py +++ /dev/null @@ -1,248 +0,0 @@ -from __future__ import annotations - -import abc -import copy -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union - -from openai.types.responses import ( - Response, - ResponseComputerToolCall, - ResponseFileSearchToolCall, - ResponseFunctionToolCall, - ResponseFunctionWebSearch, - ResponseInputItemParam, - ResponseOutputItem, - ResponseOutputMessage, - ResponseOutputRefusal, - ResponseOutputText, - ResponseStreamEvent, -) -from openai.types.responses.response_input_item_param import ComputerCallOutput, FunctionCallOutput -from openai.types.responses.response_reasoning_item import ResponseReasoningItem -from pydantic import BaseModel -from typing_extensions import TypeAlias - -from .exceptions import AgentsException, ModelBehaviorError -from .usage import Usage - -if TYPE_CHECKING: - from .agent import Agent - -TResponse = Response -"""A type alias for the Response type from the OpenAI SDK.""" - -TResponseInputItem = ResponseInputItemParam -"""A type alias for the ResponseInputItemParam type from the OpenAI SDK.""" - -TResponseOutputItem = ResponseOutputItem -"""A type alias for the ResponseOutputItem type from the OpenAI SDK.""" - -TResponseStreamEvent = ResponseStreamEvent -"""A type alias for the ResponseStreamEvent type from the OpenAI SDK.""" - -T = TypeVar("T", bound=Union[TResponseOutputItem, TResponseInputItem]) - - -@dataclass -class RunItemBase(Generic[T], abc.ABC): - agent: Agent[Any] - """The agent whose run caused this item to be generated.""" - - raw_item: T - """The raw Responses item from the run. This will always be a either an output item (i.e. - `openai.types.responses.ResponseOutputItem` or an input item - (i.e. `openai.types.responses.ResponseInputItemParam`). - """ - - def to_input_item(self) -> TResponseInputItem: - """Converts this item into an input item suitable for passing to the model.""" - if isinstance(self.raw_item, dict): - # We know that input items are dicts, so we can ignore the type error - return self.raw_item # type: ignore - elif isinstance(self.raw_item, BaseModel): - # All output items are Pydantic models that can be converted to input items. - return self.raw_item.model_dump(exclude_unset=True) # type: ignore - else: - raise AgentsException(f"Unexpected raw item type: {type(self.raw_item)}") - - -@dataclass -class MessageOutputItem(RunItemBase[ResponseOutputMessage]): - """Represents a message from the LLM.""" - - raw_item: ResponseOutputMessage - """The raw response output message.""" - - type: Literal["message_output_item"] = "message_output_item" - - -@dataclass -class HandoffCallItem(RunItemBase[ResponseFunctionToolCall]): - """Represents a tool call for a handoff from one agent to another.""" - - raw_item: ResponseFunctionToolCall - """The raw response function tool call that represents the handoff.""" - - type: Literal["handoff_call_item"] = "handoff_call_item" - - -@dataclass -class HandoffOutputItem(RunItemBase[TResponseInputItem]): - """Represents the output of a handoff.""" - - raw_item: TResponseInputItem - """The raw input item that represents the handoff taking place.""" - - source_agent: Agent[Any] - """The agent that made the handoff.""" - - target_agent: Agent[Any] - """The agent that is being handed off to.""" - - type: Literal["handoff_output_item"] = "handoff_output_item" - - -ToolCallItemTypes: TypeAlias = Union[ - ResponseFunctionToolCall, - ResponseComputerToolCall, - ResponseFileSearchToolCall, - ResponseFunctionWebSearch, -] -"""A type that represents a tool call item.""" - - -@dataclass -class ToolCallItem(RunItemBase[ToolCallItemTypes]): - """Represents a tool call e.g. a function call or computer action call.""" - - raw_item: ToolCallItemTypes - """The raw tool call item.""" - - type: Literal["tool_call_item"] = "tool_call_item" - - -@dataclass -class ToolCallOutputItem(RunItemBase[Union[FunctionCallOutput, ComputerCallOutput]]): - """Represents the output of a tool call.""" - - raw_item: FunctionCallOutput | ComputerCallOutput - """The raw item from the model.""" - - output: Any - """The output of the tool call. This is whatever the tool call returned; the `raw_item` - contains a string representation of the output. - """ - - type: Literal["tool_call_output_item"] = "tool_call_output_item" - - -@dataclass -class ReasoningItem(RunItemBase[ResponseReasoningItem]): - """Represents a reasoning item.""" - - raw_item: ResponseReasoningItem - """The raw reasoning item.""" - - type: Literal["reasoning_item"] = "reasoning_item" - - -RunItem: TypeAlias = Union[ - MessageOutputItem, - HandoffCallItem, - HandoffOutputItem, - ToolCallItem, - ToolCallOutputItem, - ReasoningItem, -] -"""An item generated by an agent.""" - - -@dataclass -class ModelResponse: - output: list[TResponseOutputItem] - """A list of outputs (messages, tool calls, etc) generated by the model""" - - usage: Usage - """The usage information for the response.""" - - referenceable_id: str | None - """An ID for the response which can be used to refer to the response in subsequent calls to the - model. Not supported by all model providers. - """ - - def to_input_items(self) -> list[TResponseInputItem]: - """Convert the output into a list of input items suitable for passing to the model.""" - # We happen to know that the shape of the Pydantic output items are the same as the - # equivalent TypedDict input items, so we can just convert each one. - # This is also tested via unit tests. - return [it.model_dump(exclude_unset=True) for it in self.output] # type: ignore - - -class ItemHelpers: - @classmethod - def extract_last_content(cls, message: TResponseOutputItem) -> str: - """Extracts the last text content or refusal from a message.""" - if not isinstance(message, ResponseOutputMessage): - return "" - - last_content = message.content[-1] - if isinstance(last_content, ResponseOutputText): - return last_content.text - elif isinstance(last_content, ResponseOutputRefusal): - return last_content.refusal - else: - raise ModelBehaviorError(f"Unexpected content type: {type(last_content)}") - - @classmethod - def extract_last_text(cls, message: TResponseOutputItem) -> str | None: - """Extracts the last text content from a message, if any. Ignores refusals.""" - if isinstance(message, ResponseOutputMessage): - last_content = message.content[-1] - if isinstance(last_content, ResponseOutputText): - return last_content.text - - return None - - @classmethod - def input_to_new_input_list( - cls, input: str | list[TResponseInputItem] - ) -> list[TResponseInputItem]: - """Converts a string or list of input items into a list of input items.""" - if isinstance(input, str): - return [ - { - "content": input, - "role": "user", - } - ] - return copy.deepcopy(input) - - @classmethod - def text_message_outputs(cls, items: list[RunItem]) -> str: - """Concatenates all the text content from a list of message output items.""" - text = "" - for item in items: - if isinstance(item, MessageOutputItem): - text += cls.text_message_output(item) - return text - - @classmethod - def text_message_output(cls, message: MessageOutputItem) -> str: - """Extracts all the text content from a single message output item.""" - text = "" - for item in message.raw_item.content: - if isinstance(item, ResponseOutputText): - text += item.text - return text - - @classmethod - def tool_call_output_item( - cls, tool_call: ResponseFunctionToolCall, output: str - ) -> FunctionCallOutput: - """Creates a tool call output item from a tool call and its output.""" - return { - "call_id": tool_call.call_id, - "output": output, - "type": "function_call_output", - } diff --git a/src/agents/lifecycle.py b/src/agents/lifecycle.py deleted file mode 100644 index 8643248b..00000000 --- a/src/agents/lifecycle.py +++ /dev/null @@ -1,105 +0,0 @@ -from typing import Any, Generic - -from .agent import Agent -from .run_context import RunContextWrapper, TContext -from .tool import Tool - - -class RunHooks(Generic[TContext]): - """A class that receives callbacks on various lifecycle events in an agent run. Subclass and - override the methods you need. - """ - - async def on_agent_start( - self, context: RunContextWrapper[TContext], agent: Agent[TContext] - ) -> None: - """Called before the agent is invoked. Called each time the current agent changes.""" - pass - - async def on_agent_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - output: Any, - ) -> None: - """Called when the agent produces a final output.""" - pass - - async def on_handoff( - self, - context: RunContextWrapper[TContext], - from_agent: Agent[TContext], - to_agent: Agent[TContext], - ) -> None: - """Called when a handoff occurs.""" - pass - - async def on_tool_start( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - ) -> None: - """Called before a tool is invoked.""" - pass - - async def on_tool_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - result: str, - ) -> None: - """Called after a tool is invoked.""" - pass - - -class AgentHooks(Generic[TContext]): - """A class that receives callbacks on various lifecycle events for a specific agent. You can - set this on `agent.hooks` to receive events for that specific agent. - - Subclass and override the methods you need. - """ - - async def on_start(self, context: RunContextWrapper[TContext], agent: Agent[TContext]) -> None: - """Called before the agent is invoked. Called each time the running agent is changed to this - agent.""" - pass - - async def on_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - output: Any, - ) -> None: - """Called when the agent produces a final output.""" - pass - - async def on_handoff( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - source: Agent[TContext], - ) -> None: - """Called when the agent is being handed off to. The `source` is the agent that is handing - off to this agent.""" - pass - - async def on_tool_start( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - ) -> None: - """Called before a tool is invoked.""" - pass - - async def on_tool_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - result: str, - ) -> None: - """Called after a tool is invoked.""" - pass diff --git a/src/agents/logger.py b/src/agents/logger.py deleted file mode 100644 index bd81a827..00000000 --- a/src/agents/logger.py +++ /dev/null @@ -1,3 +0,0 @@ -import logging - -logger = logging.getLogger("openai.agents") diff --git a/src/agents/mcp/__init__.py b/src/agents/mcp/__init__.py deleted file mode 100644 index 1a72a89f..00000000 --- a/src/agents/mcp/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -try: - from .server import ( - MCPServer, - MCPServerSse, - MCPServerSseParams, - MCPServerStdio, - MCPServerStdioParams, - ) -except ImportError: - pass - -from .util import MCPUtil - -__all__ = [ - "MCPServer", - "MCPServerSse", - "MCPServerSseParams", - "MCPServerStdio", - "MCPServerStdioParams", - "MCPUtil", -] diff --git a/src/agents/mcp/server.py b/src/agents/mcp/server.py deleted file mode 100644 index e70d7ce6..00000000 --- a/src/agents/mcp/server.py +++ /dev/null @@ -1,301 +0,0 @@ -from __future__ import annotations - -import abc -import asyncio -from contextlib import AbstractAsyncContextManager, AsyncExitStack -from pathlib import Path -from typing import Any, Literal - -from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream -from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client -from mcp.client.sse import sse_client -from mcp.types import CallToolResult, JSONRPCMessage -from typing_extensions import NotRequired, TypedDict - -from ..exceptions import UserError -from ..logger import logger - - -class MCPServer(abc.ABC): - """Base class for Model Context Protocol servers.""" - - @abc.abstractmethod - async def connect(self): - """Connect to the server. For example, this might mean spawning a subprocess or - opening a network connection. The server is expected to remain connected until - `cleanup()` is called. - """ - pass - - @property - @abc.abstractmethod - def name(self) -> str: - """A readable name for the server.""" - pass - - @abc.abstractmethod - async def cleanup(self): - """Cleanup the server. For example, this might mean closing a subprocess or - closing a network connection. - """ - pass - - @abc.abstractmethod - async def list_tools(self) -> list[MCPTool]: - """List the tools available on the server.""" - pass - - @abc.abstractmethod - async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult: - """Invoke a tool on the server.""" - pass - - -class _MCPServerWithClientSession(MCPServer, abc.ABC): - """Base class for MCP servers that use a `ClientSession` to communicate with the server.""" - - def __init__(self, cache_tools_list: bool): - """ - Args: - cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be - cached and only fetched from the server once. If `False`, the tools list will be - fetched from the server on each call to `list_tools()`. The cache can be invalidated - by calling `invalidate_tools_cache()`. You should set this to `True` if you know the - server will not change its tools list, because it can drastically improve latency - (by avoiding a round-trip to the server every time). - """ - self.session: ClientSession | None = None - self.exit_stack: AsyncExitStack = AsyncExitStack() - self._cleanup_lock: asyncio.Lock = asyncio.Lock() - self.cache_tools_list = cache_tools_list - - # The cache is always dirty at startup, so that we fetch tools at least once - self._cache_dirty = True - self._tools_list: list[MCPTool] | None = None - - @abc.abstractmethod - def create_streams( - self, - ) -> AbstractAsyncContextManager[ - tuple[ - MemoryObjectReceiveStream[JSONRPCMessage | Exception], - MemoryObjectSendStream[JSONRPCMessage], - ] - ]: - """Create the streams for the server.""" - pass - - async def __aenter__(self): - await self.connect() - return self - - async def __aexit__(self, exc_type, exc_value, traceback): - await self.cleanup() - - def invalidate_tools_cache(self): - """Invalidate the tools cache.""" - self._cache_dirty = True - - async def connect(self): - """Connect to the server.""" - try: - transport = await self.exit_stack.enter_async_context(self.create_streams()) - read, write = transport - session = await self.exit_stack.enter_async_context(ClientSession(read, write)) - await session.initialize() - self.session = session - except Exception as e: - logger.error(f"Error initializing MCP server: {e}") - await self.cleanup() - raise - - async def list_tools(self) -> list[MCPTool]: - """List the tools available on the server.""" - if not self.session: - raise UserError("Server not initialized. Make sure you call `connect()` first.") - - # Return from cache if caching is enabled, we have tools, and the cache is not dirty - if self.cache_tools_list and not self._cache_dirty and self._tools_list: - return self._tools_list - - # Reset the cache dirty to False - self._cache_dirty = False - - # Fetch the tools from the server - self._tools_list = (await self.session.list_tools()).tools - return self._tools_list - - async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult: - """Invoke a tool on the server.""" - if not self.session: - raise UserError("Server not initialized. Make sure you call `connect()` first.") - - return await self.session.call_tool(tool_name, arguments) - - async def cleanup(self): - """Cleanup the server.""" - async with self._cleanup_lock: - try: - await self.exit_stack.aclose() - self.session = None - except Exception as e: - logger.error(f"Error cleaning up server: {e}") - - -class MCPServerStdioParams(TypedDict): - """Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another - import. - """ - - command: str - """The executable to run to start the server. For example, `python` or `node`.""" - - args: NotRequired[list[str]] - """Command line args to pass to the `command` executable. For example, `['foo.py']` or - `['server.js', '--port', '8080']`.""" - - env: NotRequired[dict[str, str]] - """The environment variables to set for the server. .""" - - cwd: NotRequired[str | Path] - """The working directory to use when spawning the process.""" - - encoding: NotRequired[str] - """The text encoding used when sending/receiving messages to the server. Defaults to `utf-8`.""" - - encoding_error_handler: NotRequired[Literal["strict", "ignore", "replace"]] - """The text encoding error handler. Defaults to `strict`. - - See https://docs.python.org/3/library/codecs.html#codec-base-classes for - explanations of possible values. - """ - - -class MCPServerStdio(_MCPServerWithClientSession): - """MCP server implementation that uses the stdio transport. See the [spec] - (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for - details. - """ - - def __init__( - self, - params: MCPServerStdioParams, - cache_tools_list: bool = False, - name: str | None = None, - ): - """Create a new MCP server based on the stdio transport. - - Args: - params: The params that configure the server. This includes the command to run to - start the server, the args to pass to the command, the environment variables to - set for the server, the working directory to use when spawning the process, and - the text encoding used when sending/receiving messages to the server. - cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be - cached and only fetched from the server once. If `False`, the tools list will be - fetched from the server on each call to `list_tools()`. The cache can be - invalidated by calling `invalidate_tools_cache()`. You should set this to `True` - if you know the server will not change its tools list, because it can drastically - improve latency (by avoiding a round-trip to the server every time). - name: A readable name for the server. If not provided, we'll create one from the - command. - """ - super().__init__(cache_tools_list) - - self.params = StdioServerParameters( - command=params["command"], - args=params.get("args", []), - env=params.get("env"), - cwd=params.get("cwd"), - encoding=params.get("encoding", "utf-8"), - encoding_error_handler=params.get("encoding_error_handler", "strict"), - ) - - self._name = name or f"stdio: {self.params.command}" - - def create_streams( - self, - ) -> AbstractAsyncContextManager[ - tuple[ - MemoryObjectReceiveStream[JSONRPCMessage | Exception], - MemoryObjectSendStream[JSONRPCMessage], - ] - ]: - """Create the streams for the server.""" - return stdio_client(self.params) - - @property - def name(self) -> str: - """A readable name for the server.""" - return self._name - - -class MCPServerSseParams(TypedDict): - """Mirrors the params in`mcp.client.sse.sse_client`.""" - - url: str - """The URL of the server.""" - - headers: NotRequired[dict[str, str]] - """The headers to send to the server.""" - - timeout: NotRequired[float] - """The timeout for the HTTP request. Defaults to 5 seconds.""" - - sse_read_timeout: NotRequired[float] - """The timeout for the SSE connection, in seconds. Defaults to 5 minutes.""" - - -class MCPServerSse(_MCPServerWithClientSession): - """MCP server implementation that uses the HTTP with SSE transport. See the [spec] - (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse) - for details. - """ - - def __init__( - self, - params: MCPServerSseParams, - cache_tools_list: bool = False, - name: str | None = None, - ): - """Create a new MCP server based on the HTTP with SSE transport. - - Args: - params: The params that configure the server. This includes the URL of the server, - the headers to send to the server, the timeout for the HTTP request, and the - timeout for the SSE connection. - - cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be - cached and only fetched from the server once. If `False`, the tools list will be - fetched from the server on each call to `list_tools()`. The cache can be - invalidated by calling `invalidate_tools_cache()`. You should set this to `True` - if you know the server will not change its tools list, because it can drastically - improve latency (by avoiding a round-trip to the server every time). - - name: A readable name for the server. If not provided, we'll create one from the - URL. - """ - super().__init__(cache_tools_list) - - self.params = params - self._name = name or f"sse: {self.params['url']}" - - def create_streams( - self, - ) -> AbstractAsyncContextManager[ - tuple[ - MemoryObjectReceiveStream[JSONRPCMessage | Exception], - MemoryObjectSendStream[JSONRPCMessage], - ] - ]: - """Create the streams for the server.""" - return sse_client( - url=self.params["url"], - headers=self.params.get("headers", None), - timeout=self.params.get("timeout", 5), - sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5), - ) - - @property - def name(self) -> str: - """A readable name for the server.""" - return self._name diff --git a/src/agents/mcp/util.py b/src/agents/mcp/util.py deleted file mode 100644 index bbfe1885..00000000 --- a/src/agents/mcp/util.py +++ /dev/null @@ -1,136 +0,0 @@ -import functools -import json -from typing import TYPE_CHECKING, Any - -from agents.strict_schema import ensure_strict_json_schema - -from .. import _debug -from ..exceptions import AgentsException, ModelBehaviorError, UserError -from ..logger import logger -from ..run_context import RunContextWrapper -from ..tool import FunctionTool, Tool -from ..tracing import FunctionSpanData, get_current_span, mcp_tools_span - -if TYPE_CHECKING: - from mcp.types import Tool as MCPTool - - from .server import MCPServer - - -class MCPUtil: - """Set of utilities for interop between MCP and Agents SDK tools.""" - - @classmethod - async def get_all_function_tools( - cls, servers: list["MCPServer"], convert_schemas_to_strict: bool - ) -> list[Tool]: - """Get all function tools from a list of MCP servers.""" - tools = [] - tool_names: set[str] = set() - for server in servers: - server_tools = await cls.get_function_tools(server, convert_schemas_to_strict) - server_tool_names = {tool.name for tool in server_tools} - if len(server_tool_names & tool_names) > 0: - raise UserError( - f"Duplicate tool names found across MCP servers: " - f"{server_tool_names & tool_names}" - ) - tool_names.update(server_tool_names) - tools.extend(server_tools) - - return tools - - @classmethod - async def get_function_tools( - cls, server: "MCPServer", convert_schemas_to_strict: bool - ) -> list[Tool]: - """Get all function tools from a single MCP server.""" - - with mcp_tools_span(server=server.name) as span: - tools = await server.list_tools() - span.span_data.result = [tool.name for tool in tools] - - return [cls.to_function_tool(tool, server, convert_schemas_to_strict) for tool in tools] - - @classmethod - def to_function_tool( - cls, tool: "MCPTool", server: "MCPServer", convert_schemas_to_strict: bool - ) -> FunctionTool: - """Convert an MCP tool to an Agents SDK function tool.""" - invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool) - schema, is_strict = tool.inputSchema, False - - # MCP spec doesn't require the inputSchema to have `properties`, but OpenAI spec does. - if "properties" not in schema: - schema["properties"] = {} - - if convert_schemas_to_strict: - try: - schema = ensure_strict_json_schema(schema) - is_strict = True - except Exception as e: - logger.info(f"Error converting MCP schema to strict mode: {e}") - - return FunctionTool( - name=tool.name, - description=tool.description or "", - params_json_schema=schema, - on_invoke_tool=invoke_func, - strict_json_schema=is_strict, - ) - - @classmethod - async def invoke_mcp_tool( - cls, server: "MCPServer", tool: "MCPTool", context: RunContextWrapper[Any], input_json: str - ) -> str: - """Invoke an MCP tool and return the result as a string.""" - try: - json_data: dict[str, Any] = json.loads(input_json) if input_json else {} - except Exception as e: - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invalid JSON input for tool {tool.name}") - else: - logger.debug(f"Invalid JSON input for tool {tool.name}: {input_json}") - raise ModelBehaviorError( - f"Invalid JSON input for tool {tool.name}: {input_json}" - ) from e - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invoking MCP tool {tool.name}") - else: - logger.debug(f"Invoking MCP tool {tool.name} with input {input_json}") - - try: - result = await server.call_tool(tool.name, json_data) - except Exception as e: - logger.error(f"Error invoking MCP tool {tool.name}: {e}") - raise AgentsException(f"Error invoking MCP tool {tool.name}: {e}") from e - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"MCP tool {tool.name} completed.") - else: - logger.debug(f"MCP tool {tool.name} returned {result}") - - # The MCP tool result is a list of content items, whereas OpenAI tool outputs are a single - # string. We'll try to convert. - if len(result.content) == 1: - tool_output = result.content[0].model_dump_json() - elif len(result.content) > 1: - tool_output = json.dumps([item.model_dump() for item in result.content]) - else: - logger.error(f"Errored MCP tool result: {result}") - tool_output = "Error running tool." - - current_span = get_current_span() - if current_span: - if isinstance(current_span.span_data, FunctionSpanData): - current_span.span_data.output = tool_output - current_span.span_data.mcp_data = { - "server": server.name, - } - else: - logger.warning( - f"Current span is not a FunctionSpanData, skipping tool output: {current_span}" - ) - - return tool_output diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py deleted file mode 100644 index f29cfa4a..00000000 --- a/src/agents/model_settings.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, fields, replace -from typing import Literal - -from openai.types.shared import Reasoning - - -@dataclass -class ModelSettings: - """Settings to use when calling an LLM. - - This class holds optional model configuration parameters (e.g. temperature, - top_p, penalties, truncation, etc.). - - Not all models/providers support all of these parameters, so please check the API documentation - for the specific model and provider you are using. - """ - - temperature: float | None = None - """The temperature to use when calling the model.""" - - top_p: float | None = None - """The top_p to use when calling the model.""" - - frequency_penalty: float | None = None - """The frequency penalty to use when calling the model.""" - - presence_penalty: float | None = None - """The presence penalty to use when calling the model.""" - - tool_choice: Literal["auto", "required", "none"] | str | None = None - """The tool choice to use when calling the model.""" - - parallel_tool_calls: bool | None = None - """Whether to use parallel tool calls when calling the model. - Defaults to False if not provided.""" - - truncation: Literal["auto", "disabled"] | None = None - """The truncation strategy to use when calling the model.""" - - max_tokens: int | None = None - """The maximum number of output tokens to generate.""" - - reasoning: Reasoning | None = None - """Configuration options for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). - """ - - metadata: dict[str, str] | None = None - """Metadata to include with the model response call.""" - - store: bool | None = None - """Whether to store the generated model response for later retrieval. - Defaults to True if not provided.""" - - include_usage: bool | None = None - """Whether to include usage chunk. - Defaults to True if not provided.""" - - def resolve(self, override: ModelSettings | None) -> ModelSettings: - """Produce a new ModelSettings by overlaying any non-None values from the - override on top of this instance.""" - if override is None: - return self - - changes = { - field.name: getattr(override, field.name) - for field in fields(self) - if getattr(override, field.name) is not None - } - return replace(self, **changes) diff --git a/src/agents/models/__init__.py b/src/agents/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agents/models/_openai_shared.py b/src/agents/models/_openai_shared.py deleted file mode 100644 index 2e145018..00000000 --- a/src/agents/models/_openai_shared.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import annotations - -from openai import AsyncOpenAI - -_default_openai_key: str | None = None -_default_openai_client: AsyncOpenAI | None = None -_use_responses_by_default: bool = True - - -def set_default_openai_key(key: str) -> None: - global _default_openai_key - _default_openai_key = key - - -def get_default_openai_key() -> str | None: - return _default_openai_key - - -def set_default_openai_client(client: AsyncOpenAI) -> None: - global _default_openai_client - _default_openai_client = client - - -def get_default_openai_client() -> AsyncOpenAI | None: - return _default_openai_client - - -def set_use_responses_by_default(use_responses: bool) -> None: - global _use_responses_by_default - _use_responses_by_default = use_responses - - -def get_use_responses_by_default() -> bool: - return _use_responses_by_default diff --git a/src/agents/models/fake_id.py b/src/agents/models/fake_id.py deleted file mode 100644 index 0565b0a7..00000000 --- a/src/agents/models/fake_id.py +++ /dev/null @@ -1,5 +0,0 @@ -FAKE_RESPONSES_ID = "__fake_id__" -"""This is a placeholder ID used to fill in the `id` field in Responses API related objects. It's -useful when you're creating Responses objects from non-Responses APIs, e.g. the OpenAI Chat -Completions API or other LLM providers. -""" diff --git a/src/agents/models/interface.py b/src/agents/models/interface.py deleted file mode 100644 index e9a8700c..00000000 --- a/src/agents/models/interface.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import annotations - -import abc -import enum -from collections.abc import AsyncIterator -from typing import TYPE_CHECKING - -from ..agent_output import AgentOutputSchema -from ..handoffs import Handoff -from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent -from ..tool import Tool - -if TYPE_CHECKING: - from ..model_settings import ModelSettings - - -class ModelTracing(enum.Enum): - DISABLED = 0 - """Tracing is disabled entirely.""" - - ENABLED = 1 - """Tracing is enabled, and all data is included.""" - - ENABLED_WITHOUT_DATA = 2 - """Tracing is enabled, but inputs/outputs are not included.""" - - def is_disabled(self) -> bool: - return self == ModelTracing.DISABLED - - def include_data(self) -> bool: - return self == ModelTracing.ENABLED - - -class Model(abc.ABC): - """The base interface for calling an LLM.""" - - @abc.abstractmethod - async def get_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> ModelResponse: - """Get a response from the model. - - Args: - system_instructions: The system instructions to use. - input: The input items to the model, in OpenAI Responses format. - model_settings: The model settings to use. - tools: The tools available to the model. - output_schema: The output schema to use. - handoffs: The handoffs available to the model. - tracing: Tracing configuration. - - Returns: - The full model response. - """ - pass - - @abc.abstractmethod - def stream_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> AsyncIterator[TResponseStreamEvent]: - """Stream a response from the model. - - Args: - system_instructions: The system instructions to use. - input: The input items to the model, in OpenAI Responses format. - model_settings: The model settings to use. - tools: The tools available to the model. - output_schema: The output schema to use. - handoffs: The handoffs available to the model. - tracing: Tracing configuration. - - Returns: - An iterator of response stream events, in OpenAI Responses format. - """ - pass - - -class ModelProvider(abc.ABC): - """The base interface for a model provider. - - Model provider is responsible for looking up Models by name. - """ - - @abc.abstractmethod - def get_model(self, model_name: str | None) -> Model: - """Get a model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The model. - """ diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py deleted file mode 100644 index 807c6512..00000000 --- a/src/agents/models/openai_chatcompletions.py +++ /dev/null @@ -1,1014 +0,0 @@ -from __future__ import annotations - -import dataclasses -import json -import time -from collections.abc import AsyncIterator, Iterable -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Literal, cast, overload - -from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven -from openai.types import ChatModel -from openai.types.chat import ( - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionChunk, - ChatCompletionContentPartImageParam, - ChatCompletionContentPartParam, - ChatCompletionContentPartTextParam, - ChatCompletionDeveloperMessageParam, - ChatCompletionMessage, - ChatCompletionMessageParam, - ChatCompletionMessageToolCallParam, - ChatCompletionSystemMessageParam, - ChatCompletionToolChoiceOptionParam, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, -) -from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam -from openai.types.chat.completion_create_params import ResponseFormat -from openai.types.completion_usage import CompletionUsage -from openai.types.responses import ( - EasyInputMessageParam, - Response, - ResponseCompletedEvent, - ResponseContentPartAddedEvent, - ResponseContentPartDoneEvent, - ResponseCreatedEvent, - ResponseFileSearchToolCallParam, - ResponseFunctionCallArgumentsDeltaEvent, - ResponseFunctionToolCall, - ResponseFunctionToolCallParam, - ResponseInputContentParam, - ResponseInputImageParam, - ResponseInputTextParam, - ResponseOutputItem, - ResponseOutputItemAddedEvent, - ResponseOutputItemDoneEvent, - ResponseOutputMessage, - ResponseOutputMessageParam, - ResponseOutputRefusal, - ResponseOutputText, - ResponseRefusalDeltaEvent, - ResponseTextDeltaEvent, - ResponseUsage, -) -from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message -from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails - -from .. import _debug -from ..agent_output import AgentOutputSchema -from ..exceptions import AgentsException, UserError -from ..handoffs import Handoff -from ..items import ModelResponse, TResponseInputItem, TResponseOutputItem, TResponseStreamEvent -from ..logger import logger -from ..tool import FunctionTool, Tool -from ..tracing import generation_span -from ..tracing.span_data import GenerationSpanData -from ..tracing.spans import Span -from ..usage import Usage -from ..version import __version__ -from .fake_id import FAKE_RESPONSES_ID -from .interface import Model, ModelTracing - -if TYPE_CHECKING: - from ..model_settings import ModelSettings - - -_USER_AGENT = f"Agents/Python {__version__}" -_HEADERS = {"User-Agent": _USER_AGENT} - - -@dataclass -class _StreamingState: - started: bool = False - text_content_index_and_output: tuple[int, ResponseOutputText] | None = None - refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None - function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict) - - -class OpenAIChatCompletionsModel(Model): - def __init__( - self, - model: str | ChatModel, - openai_client: AsyncOpenAI, - ) -> None: - self.model = model - self._client = openai_client - - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else NOT_GIVEN - - async def get_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> ModelResponse: - with generation_span( - model=str(self.model), - model_config=dataclasses.asdict(model_settings) - | {"base_url": str(self._client.base_url)}, - disabled=tracing.is_disabled(), - ) as span_generation: - response = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - span_generation, - tracing, - stream=False, - ) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Received model response") - else: - logger.debug( - f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n" - ) - - usage = ( - Usage( - requests=1, - input_tokens=response.usage.prompt_tokens, - output_tokens=response.usage.completion_tokens, - total_tokens=response.usage.total_tokens, - ) - if response.usage - else Usage() - ) - if tracing.include_data(): - span_generation.span_data.output = [response.choices[0].message.model_dump()] - span_generation.span_data.usage = { - "input_tokens": usage.input_tokens, - "output_tokens": usage.output_tokens, - } - - items = _Converter.message_to_output_items(response.choices[0].message) - - return ModelResponse( - output=items, - usage=usage, - referenceable_id=None, - ) - - async def stream_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> AsyncIterator[TResponseStreamEvent]: - """ - Yields a partial message as it is generated, as well as the usage information. - """ - with generation_span( - model=str(self.model), - model_config=dataclasses.asdict(model_settings) - | {"base_url": str(self._client.base_url)}, - disabled=tracing.is_disabled(), - ) as span_generation: - response, stream = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - span_generation, - tracing, - stream=True, - ) - - usage: CompletionUsage | None = None - state = _StreamingState() - - async for chunk in stream: - if not state.started: - state.started = True - yield ResponseCreatedEvent( - response=response, - type="response.created", - ) - - # The usage is only available in the last chunk - usage = chunk.usage - - if not chunk.choices or not chunk.choices[0].delta: - continue - - delta = chunk.choices[0].delta - - # Handle text - if delta.content: - if not state.text_content_index_and_output: - # Initialize a content tracker for streaming text - state.text_content_index_and_output = ( - 0 if not state.refusal_content_index_and_output else 1, - ResponseOutputText( - text="", - type="output_text", - annotations=[], - ), - ) - # Start a new assistant message stream - assistant_item = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="in_progress", - ) - # Notify consumers of the start of a new output message + first content part - yield ResponseOutputItemAddedEvent( - item=assistant_item, - output_index=0, - type="response.output_item.added", - ) - yield ResponseContentPartAddedEvent( - content_index=state.text_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=ResponseOutputText( - text="", - type="output_text", - annotations=[], - ), - type="response.content_part.added", - ) - # Emit the delta for this segment of content - yield ResponseTextDeltaEvent( - content_index=state.text_content_index_and_output[0], - delta=delta.content, - item_id=FAKE_RESPONSES_ID, - output_index=0, - type="response.output_text.delta", - ) - # Accumulate the text into the response part - state.text_content_index_and_output[1].text += delta.content - - # Handle refusals (model declines to answer) - if delta.refusal: - if not state.refusal_content_index_and_output: - # Initialize a content tracker for streaming refusal text - state.refusal_content_index_and_output = ( - 0 if not state.text_content_index_and_output else 1, - ResponseOutputRefusal(refusal="", type="refusal"), - ) - # Start a new assistant message if one doesn't exist yet (in-progress) - assistant_item = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="in_progress", - ) - # Notify downstream that assistant message + first content part are starting - yield ResponseOutputItemAddedEvent( - item=assistant_item, - output_index=0, - type="response.output_item.added", - ) - yield ResponseContentPartAddedEvent( - content_index=state.refusal_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=ResponseOutputText( - text="", - type="output_text", - annotations=[], - ), - type="response.content_part.added", - ) - # Emit the delta for this segment of refusal - yield ResponseRefusalDeltaEvent( - content_index=state.refusal_content_index_and_output[0], - delta=delta.refusal, - item_id=FAKE_RESPONSES_ID, - output_index=0, - type="response.refusal.delta", - ) - # Accumulate the refusal string in the output part - state.refusal_content_index_and_output[1].refusal += delta.refusal - - # Handle tool calls - # Because we don't know the name of the function until the end of the stream, we'll - # save everything and yield events at the end - if delta.tool_calls: - for tc_delta in delta.tool_calls: - if tc_delta.index not in state.function_calls: - state.function_calls[tc_delta.index] = ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - arguments="", - name="", - type="function_call", - call_id="", - ) - tc_function = tc_delta.function - - state.function_calls[tc_delta.index].arguments += ( - tc_function.arguments if tc_function else "" - ) or "" - state.function_calls[tc_delta.index].name += ( - tc_function.name if tc_function else "" - ) or "" - state.function_calls[tc_delta.index].call_id += tc_delta.id or "" - - function_call_starting_index = 0 - if state.text_content_index_and_output: - function_call_starting_index += 1 - # Send end event for this content part - yield ResponseContentPartDoneEvent( - content_index=state.text_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=state.text_content_index_and_output[1], - type="response.content_part.done", - ) - - if state.refusal_content_index_and_output: - function_call_starting_index += 1 - # Send end event for this content part - yield ResponseContentPartDoneEvent( - content_index=state.refusal_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=state.refusal_content_index_and_output[1], - type="response.content_part.done", - ) - - # Actually send events for the function calls - for function_call in state.function_calls.values(): - # First, a ResponseOutputItemAdded for the function call - yield ResponseOutputItemAddedEvent( - item=ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=function_call.call_id, - arguments=function_call.arguments, - name=function_call.name, - type="function_call", - ), - output_index=function_call_starting_index, - type="response.output_item.added", - ) - # Then, yield the args - yield ResponseFunctionCallArgumentsDeltaEvent( - delta=function_call.arguments, - item_id=FAKE_RESPONSES_ID, - output_index=function_call_starting_index, - type="response.function_call_arguments.delta", - ) - # Finally, the ResponseOutputItemDone - yield ResponseOutputItemDoneEvent( - item=ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=function_call.call_id, - arguments=function_call.arguments, - name=function_call.name, - type="function_call", - ), - output_index=function_call_starting_index, - type="response.output_item.done", - ) - - # Finally, send the Response completed event - outputs: list[ResponseOutputItem] = [] - if state.text_content_index_and_output or state.refusal_content_index_and_output: - assistant_msg = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="completed", - ) - if state.text_content_index_and_output: - assistant_msg.content.append(state.text_content_index_and_output[1]) - if state.refusal_content_index_and_output: - assistant_msg.content.append(state.refusal_content_index_and_output[1]) - outputs.append(assistant_msg) - - # send a ResponseOutputItemDone for the assistant message - yield ResponseOutputItemDoneEvent( - item=assistant_msg, - output_index=0, - type="response.output_item.done", - ) - - for function_call in state.function_calls.values(): - outputs.append(function_call) - - final_response = response.model_copy() - final_response.output = outputs - final_response.usage = ( - ResponseUsage( - input_tokens=usage.prompt_tokens, - output_tokens=usage.completion_tokens, - total_tokens=usage.total_tokens, - output_tokens_details=OutputTokensDetails( - reasoning_tokens=usage.completion_tokens_details.reasoning_tokens - if usage.completion_tokens_details - and usage.completion_tokens_details.reasoning_tokens - else 0 - ), - input_tokens_details=InputTokensDetails( - cached_tokens=usage.prompt_tokens_details.cached_tokens - if usage.prompt_tokens_details and usage.prompt_tokens_details.cached_tokens - else 0 - ), - ) - if usage - else None - ) - - yield ResponseCompletedEvent( - response=final_response, - type="response.completed", - ) - if tracing.include_data(): - span_generation.span_data.output = [final_response.model_dump()] - - if usage: - span_generation.span_data.usage = { - "input_tokens": usage.prompt_tokens, - "output_tokens": usage.completion_tokens, - } - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - span: Span[GenerationSpanData], - tracing: ModelTracing, - stream: Literal[True], - ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ... - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - span: Span[GenerationSpanData], - tracing: ModelTracing, - stream: Literal[False], - ) -> ChatCompletion: ... - - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - span: Span[GenerationSpanData], - tracing: ModelTracing, - stream: bool = False, - ) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]: - converted_messages = _Converter.items_to_messages(input) - - if system_instructions: - converted_messages.insert( - 0, - { - "content": system_instructions, - "role": "system", - }, - ) - if tracing.include_data(): - span.span_data.input = converted_messages - - parallel_tool_calls = ( - True if model_settings.parallel_tool_calls and tools and len(tools) > 0 else NOT_GIVEN - ) - tool_choice = _Converter.convert_tool_choice(model_settings.tool_choice) - response_format = _Converter.convert_response_format(output_schema) - - converted_tools = [ToolConverter.to_openai(tool) for tool in tools] if tools else [] - - for handoff in handoffs: - converted_tools.append(ToolConverter.convert_handoff_tool(handoff)) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Calling LLM") - else: - logger.debug( - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" - f"Stream: {stream}\n" - f"Tool choice: {tool_choice}\n" - f"Response format: {response_format}\n" - ) - - reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None - store = _Converter.get_store_param(self._get_client(), model_settings) - - stream_options = _Converter.get_stream_options_param(self._get_client(), model_settings) - - ret = await self._get_client().chat.completions.create( - model=self.model, - messages=converted_messages, - tools=converted_tools or NOT_GIVEN, - temperature=self._non_null_or_not_given(model_settings.temperature), - top_p=self._non_null_or_not_given(model_settings.top_p), - frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty), - presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty), - max_tokens=self._non_null_or_not_given(model_settings.max_tokens), - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - stream=stream, - stream_options=self._non_null_or_not_given(stream_options), - store=self._non_null_or_not_given(store), - reasoning_effort=self._non_null_or_not_given(reasoning_effort), - extra_headers=_HEADERS, - metadata=self._non_null_or_not_given(model_settings.metadata), - ) - - if isinstance(ret, ChatCompletion): - return ret - - response = Response( - id=FAKE_RESPONSES_ID, - created_at=time.time(), - model=self.model, - object="response", - output=[], - tool_choice=cast(Literal["auto", "required", "none"], tool_choice) - if tool_choice != NOT_GIVEN - else "auto", - top_p=model_settings.top_p, - temperature=model_settings.temperature, - tools=[], - parallel_tool_calls=parallel_tool_calls or False, - reasoning=model_settings.reasoning, - ) - return response, ret - - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = AsyncOpenAI() - return self._client - - -class _Converter: - - @classmethod - def is_openai(cls, client: AsyncOpenAI): - return str(client.base_url).startswith("https://api.openai.com") - - @classmethod - def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None: - # Match the behavior of Responses where store is True when not given - default_store = True if cls.is_openai(client) else None - return model_settings.store if model_settings.store is not None else default_store - - @classmethod - def get_stream_options_param( - cls, client: AsyncOpenAI, model_settings: ModelSettings - ) -> dict[str, bool] | None: - default_include_usage = True if cls.is_openai(client) else None - include_usage = model_settings.include_usage if model_settings.include_usage is not None \ - else default_include_usage - stream_options = {"include_usage": include_usage} if include_usage is not None else None - return stream_options - - @classmethod - def convert_tool_choice( - cls, tool_choice: Literal["auto", "required", "none"] | str | None - ) -> ChatCompletionToolChoiceOptionParam | NotGiven: - if tool_choice is None: - return NOT_GIVEN - elif tool_choice == "auto": - return "auto" - elif tool_choice == "required": - return "required" - elif tool_choice == "none": - return "none" - else: - return { - "type": "function", - "function": { - "name": tool_choice, - }, - } - - @classmethod - def convert_response_format( - cls, final_output_schema: AgentOutputSchema | None - ) -> ResponseFormat | NotGiven: - if not final_output_schema or final_output_schema.is_plain_text(): - return NOT_GIVEN - - return { - "type": "json_schema", - "json_schema": { - "name": "final_output", - "strict": final_output_schema.strict_json_schema, - "schema": final_output_schema.json_schema(), - }, - } - - @classmethod - def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]: - items: list[TResponseOutputItem] = [] - - message_item = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="completed", - ) - if message.content: - message_item.content.append( - ResponseOutputText(text=message.content, type="output_text", annotations=[]) - ) - if message.refusal: - message_item.content.append( - ResponseOutputRefusal(refusal=message.refusal, type="refusal") - ) - if message.audio: - raise AgentsException("Audio is not currently supported") - - if message_item.content: - items.append(message_item) - - if message.tool_calls: - for tool_call in message.tool_calls: - items.append( - ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=tool_call.id, - arguments=tool_call.function.arguments, - name=tool_call.function.name, - type="function_call", - ) - ) - - return items - - @classmethod - def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None: - if not isinstance(item, dict): - return None - - keys = item.keys() - # EasyInputMessageParam only has these two keys - if keys != {"content", "role"}: - return None - - role = item.get("role", None) - if role not in ("user", "assistant", "system", "developer"): - return None - - if "content" not in item: - return None - - return cast(EasyInputMessageParam, item) - - @classmethod - def maybe_input_message(cls, item: Any) -> Message | None: - if ( - isinstance(item, dict) - and item.get("type") == "message" - and item.get("role") - in ( - "user", - "system", - "developer", - ) - ): - return cast(Message, item) - - return None - - @classmethod - def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None: - if isinstance(item, dict) and item.get("type") == "file_search_call": - return cast(ResponseFileSearchToolCallParam, item) - return None - - @classmethod - def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None: - if isinstance(item, dict) and item.get("type") == "function_call": - return cast(ResponseFunctionToolCallParam, item) - return None - - @classmethod - def maybe_function_tool_call_output( - cls, - item: Any, - ) -> FunctionCallOutput | None: - if isinstance(item, dict) and item.get("type") == "function_call_output": - return cast(FunctionCallOutput, item) - return None - - @classmethod - def maybe_item_reference(cls, item: Any) -> ItemReference | None: - if isinstance(item, dict) and item.get("type") == "item_reference": - return cast(ItemReference, item) - return None - - @classmethod - def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None: - # ResponseOutputMessage is only used for messages with role assistant - if ( - isinstance(item, dict) - and item.get("type") == "message" - and item.get("role") == "assistant" - ): - return cast(ResponseOutputMessageParam, item) - return None - - @classmethod - def extract_text_content( - cls, content: str | Iterable[ResponseInputContentParam] - ) -> str | list[ChatCompletionContentPartTextParam]: - all_content = cls.extract_all_content(content) - if isinstance(all_content, str): - return all_content - out: list[ChatCompletionContentPartTextParam] = [] - for c in all_content: - if c.get("type") == "text": - out.append(cast(ChatCompletionContentPartTextParam, c)) - return out - - @classmethod - def extract_all_content( - cls, content: str | Iterable[ResponseInputContentParam] - ) -> str | list[ChatCompletionContentPartParam]: - if isinstance(content, str): - return content - out: list[ChatCompletionContentPartParam] = [] - - for c in content: - if isinstance(c, dict) and c.get("type") == "input_text": - casted_text_param = cast(ResponseInputTextParam, c) - out.append( - ChatCompletionContentPartTextParam( - type="text", - text=casted_text_param["text"], - ) - ) - elif isinstance(c, dict) and c.get("type") == "input_image": - casted_image_param = cast(ResponseInputImageParam, c) - if "image_url" not in casted_image_param or not casted_image_param["image_url"]: - raise UserError( - f"Only image URLs are supported for input_image {casted_image_param}" - ) - out.append( - ChatCompletionContentPartImageParam( - type="image_url", - image_url={ - "url": casted_image_param["image_url"], - "detail": casted_image_param["detail"], - }, - ) - ) - elif isinstance(c, dict) and c.get("type") == "input_file": - raise UserError(f"File uploads are not supported for chat completions {c}") - else: - raise UserError(f"Unknown content: {c}") - return out - - @classmethod - def items_to_messages( - cls, - items: str | Iterable[TResponseInputItem], - ) -> list[ChatCompletionMessageParam]: - """ - Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam. - - Rules: - - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam - - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam - - EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam - - InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam - - response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam - - tool calls get attached to the *current* assistant message, or create one if none. - - tool outputs => ChatCompletionToolMessageParam - """ - - if isinstance(items, str): - return [ - ChatCompletionUserMessageParam( - role="user", - content=items, - ) - ] - - result: list[ChatCompletionMessageParam] = [] - current_assistant_msg: ChatCompletionAssistantMessageParam | None = None - - def flush_assistant_message() -> None: - nonlocal current_assistant_msg - if current_assistant_msg is not None: - # The API doesn't support empty arrays for tool_calls - if not current_assistant_msg.get("tool_calls"): - del current_assistant_msg["tool_calls"] - result.append(current_assistant_msg) - current_assistant_msg = None - - def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: - nonlocal current_assistant_msg - if current_assistant_msg is None: - current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant") - current_assistant_msg["tool_calls"] = [] - return current_assistant_msg - - for item in items: - # 1) Check easy input message - if easy_msg := cls.maybe_easy_input_message(item): - role = easy_msg["role"] - content = easy_msg["content"] - - if role == "user": - flush_assistant_message() - msg_user: ChatCompletionUserMessageParam = { - "role": "user", - "content": cls.extract_all_content(content), - } - result.append(msg_user) - elif role == "system": - flush_assistant_message() - msg_system: ChatCompletionSystemMessageParam = { - "role": "system", - "content": cls.extract_text_content(content), - } - result.append(msg_system) - elif role == "developer": - flush_assistant_message() - msg_developer: ChatCompletionDeveloperMessageParam = { - "role": "developer", - "content": cls.extract_text_content(content), - } - result.append(msg_developer) - elif role == "assistant": - flush_assistant_message() - msg_assistant: ChatCompletionAssistantMessageParam = { - "role": "assistant", - "content": cls.extract_text_content(content), - } - result.append(msg_assistant) - else: - raise UserError(f"Unexpected role in easy_input_message: {role}") - - # 2) Check input message - elif in_msg := cls.maybe_input_message(item): - role = in_msg["role"] - content = in_msg["content"] - flush_assistant_message() - - if role == "user": - msg_user = { - "role": "user", - "content": cls.extract_all_content(content), - } - result.append(msg_user) - elif role == "system": - msg_system = { - "role": "system", - "content": cls.extract_text_content(content), - } - result.append(msg_system) - elif role == "developer": - msg_developer = { - "role": "developer", - "content": cls.extract_text_content(content), - } - result.append(msg_developer) - else: - raise UserError(f"Unexpected role in input_message: {role}") - - # 3) response output message => assistant - elif resp_msg := cls.maybe_response_output_message(item): - flush_assistant_message() - new_asst = ChatCompletionAssistantMessageParam(role="assistant") - contents = resp_msg["content"] - - text_segments = [] - for c in contents: - if c["type"] == "output_text": - text_segments.append(c["text"]) - elif c["type"] == "refusal": - new_asst["refusal"] = c["refusal"] - elif c["type"] == "output_audio": - # Can't handle this, b/c chat completions expects an ID which we dont have - raise UserError( - f"Only audio IDs are supported for chat completions, but got: {c}" - ) - else: - raise UserError(f"Unknown content type in ResponseOutputMessage: {c}") - - if text_segments: - combined = "\n".join(text_segments) - new_asst["content"] = combined - - new_asst["tool_calls"] = [] - current_assistant_msg = new_asst - - # 4) function/file-search calls => attach to assistant - elif file_search := cls.maybe_file_search_call(item): - asst = ensure_assistant_message() - tool_calls = list(asst.get("tool_calls", [])) - new_tool_call = ChatCompletionMessageToolCallParam( - id=file_search["id"], - type="function", - function={ - "name": "file_search_call", - "arguments": json.dumps( - { - "queries": file_search.get("queries", []), - "status": file_search.get("status"), - } - ), - }, - ) - tool_calls.append(new_tool_call) - asst["tool_calls"] = tool_calls - - elif func_call := cls.maybe_function_tool_call(item): - asst = ensure_assistant_message() - tool_calls = list(asst.get("tool_calls", [])) - arguments = func_call["arguments"] if func_call["arguments"] else "{}" - new_tool_call = ChatCompletionMessageToolCallParam( - id=func_call["call_id"], - type="function", - function={ - "name": func_call["name"], - "arguments": arguments, - }, - ) - tool_calls.append(new_tool_call) - asst["tool_calls"] = tool_calls - # 5) function call output => tool message - elif func_output := cls.maybe_function_tool_call_output(item): - flush_assistant_message() - msg: ChatCompletionToolMessageParam = { - "role": "tool", - "tool_call_id": func_output["call_id"], - "content": func_output["output"], - } - result.append(msg) - - # 6) item reference => handle or raise - elif item_ref := cls.maybe_item_reference(item): - raise UserError( - f"Encountered an item_reference, which is not supported: {item_ref}" - ) - - # 7) If we haven't recognized it => fail or ignore - else: - raise UserError(f"Unhandled item type or structure: {item}") - - flush_assistant_message() - return result - - -class ToolConverter: - @classmethod - def to_openai(cls, tool: Tool) -> ChatCompletionToolParam: - if isinstance(tool, FunctionTool): - return { - "type": "function", - "function": { - "name": tool.name, - "description": tool.description or "", - "parameters": tool.params_json_schema, - }, - } - - raise UserError( - f"Hosted tools are not supported with the ChatCompletions API. Got tool type: " - f"{type(tool)}, tool: {tool}" - ) - - @classmethod - def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam: - return { - "type": "function", - "function": { - "name": handoff.tool_name, - "description": handoff.tool_description, - "parameters": handoff.input_json_schema, - }, - } diff --git a/src/agents/models/openai_provider.py b/src/agents/models/openai_provider.py deleted file mode 100644 index e7e922ab..00000000 --- a/src/agents/models/openai_provider.py +++ /dev/null @@ -1,91 +0,0 @@ -from __future__ import annotations - -import httpx -from openai import AsyncOpenAI, DefaultAsyncHttpxClient - -from . import _openai_shared -from .interface import Model, ModelProvider -from .openai_chatcompletions import OpenAIChatCompletionsModel -from .openai_responses import OpenAIResponsesModel - -DEFAULT_MODEL: str = "gpt-4o" - - -_http_client: httpx.AsyncClient | None = None - - -# If we create a new httpx client for each request, that would mean no sharing of connection pools, -# which would mean worse latency and resource usage. So, we share the client across requests. -def shared_http_client() -> httpx.AsyncClient: - global _http_client - if _http_client is None: - _http_client = DefaultAsyncHttpxClient() - return _http_client - - -class OpenAIProvider(ModelProvider): - def __init__( - self, - *, - api_key: str | None = None, - base_url: str | None = None, - openai_client: AsyncOpenAI | None = None, - organization: str | None = None, - project: str | None = None, - use_responses: bool | None = None, - ) -> None: - """Create a new OpenAI provider. - - Args: - api_key: The API key to use for the OpenAI client. If not provided, we will use the - default API key. - base_url: The base URL to use for the OpenAI client. If not provided, we will use the - default base URL. - openai_client: An optional OpenAI client to use. If not provided, we will create a new - OpenAI client using the api_key and base_url. - organization: The organization to use for the OpenAI client. - project: The project to use for the OpenAI client. - use_responses: Whether to use the OpenAI responses API. - """ - if openai_client is not None: - assert api_key is None and base_url is None, ( - "Don't provide api_key or base_url if you provide openai_client" - ) - self._client: AsyncOpenAI | None = openai_client - else: - self._client = None - self._stored_api_key = api_key - self._stored_base_url = base_url - self._stored_organization = organization - self._stored_project = project - - if use_responses is not None: - self._use_responses = use_responses - else: - self._use_responses = _openai_shared.get_use_responses_by_default() - - # We lazy load the client in case you never actually use OpenAIProvider(). Otherwise - # AsyncOpenAI() raises an error if you don't have an API key set. - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI( - api_key=self._stored_api_key or _openai_shared.get_default_openai_key(), - base_url=self._stored_base_url, - organization=self._stored_organization, - project=self._stored_project, - http_client=shared_http_client(), - ) - - return self._client - - def get_model(self, model_name: str | None) -> Model: - if model_name is None: - model_name = DEFAULT_MODEL - - client = self._get_client() - - return ( - OpenAIResponsesModel(model=model_name, openai_client=client) - if self._use_responses - else OpenAIChatCompletionsModel(model=model_name, openai_client=client) - ) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py deleted file mode 100644 index 06828884..00000000 --- a/src/agents/models/openai_responses.py +++ /dev/null @@ -1,393 +0,0 @@ -from __future__ import annotations - -import json -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Literal, overload - -from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven -from openai.types import ChatModel -from openai.types.responses import ( - Response, - ResponseCompletedEvent, - ResponseStreamEvent, - ResponseTextConfigParam, - ToolParam, - WebSearchToolParam, - response_create_params, -) - -from .. import _debug -from ..agent_output import AgentOutputSchema -from ..exceptions import UserError -from ..handoffs import Handoff -from ..items import ItemHelpers, ModelResponse, TResponseInputItem -from ..logger import logger -from ..tool import ComputerTool, FileSearchTool, FunctionTool, Tool, WebSearchTool -from ..tracing import SpanError, response_span -from ..usage import Usage -from ..version import __version__ -from .interface import Model, ModelTracing - -if TYPE_CHECKING: - from ..model_settings import ModelSettings - - -_USER_AGENT = f"Agents/Python {__version__}" -_HEADERS = {"User-Agent": _USER_AGENT} - -# From the Responses API -IncludeLiteral = Literal[ - "file_search_call.results", - "message.input_image.image_url", - "computer_call_output.output.image_url", -] - - -class OpenAIResponsesModel(Model): - """ - Implementation of `Model` that uses the OpenAI Responses API. - """ - - def __init__( - self, - model: str | ChatModel, - openai_client: AsyncOpenAI, - ) -> None: - self.model = model - self._client = openai_client - - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else NOT_GIVEN - - async def get_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> ModelResponse: - with response_span(disabled=tracing.is_disabled()) as span_response: - try: - response = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - stream=False, - ) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("LLM responded") - else: - logger.debug( - "LLM resp:\n" - f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n" - ) - - usage = ( - Usage( - requests=1, - input_tokens=response.usage.input_tokens, - output_tokens=response.usage.output_tokens, - total_tokens=response.usage.total_tokens, - ) - if response.usage - else Usage() - ) - - if tracing.include_data(): - span_response.span_data.response = response - span_response.span_data.input = input - except Exception as e: - span_response.set_error( - SpanError( - message="Error getting response", - data={ - "error": str(e) if tracing.include_data() else e.__class__.__name__, - }, - ) - ) - request_id = e.request_id if isinstance(e, APIStatusError) else None - logger.error(f"Error getting response: {e}. (request_id: {request_id})") - raise - - return ModelResponse( - output=response.output, - usage=usage, - referenceable_id=response.id, - ) - - async def stream_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> AsyncIterator[ResponseStreamEvent]: - """ - Yields a partial message as it is generated, as well as the usage information. - """ - with response_span(disabled=tracing.is_disabled()) as span_response: - try: - stream = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - stream=True, - ) - - final_response: Response | None = None - - async for chunk in stream: - if isinstance(chunk, ResponseCompletedEvent): - final_response = chunk.response - yield chunk - - if final_response and tracing.include_data(): - span_response.span_data.response = final_response - span_response.span_data.input = input - - except Exception as e: - span_response.set_error( - SpanError( - message="Error streaming response", - data={ - "error": str(e) if tracing.include_data() else e.__class__.__name__, - }, - ) - ) - logger.error(f"Error streaming response: {e}") - raise - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - stream: Literal[True], - ) -> AsyncStream[ResponseStreamEvent]: ... - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - stream: Literal[False], - ) -> Response: ... - - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - stream: Literal[True] | Literal[False] = False, - ) -> Response | AsyncStream[ResponseStreamEvent]: - list_input = ItemHelpers.input_to_new_input_list(input) - - parallel_tool_calls = ( - True - if model_settings.parallel_tool_calls and tools and len(tools) > 0 - else False - if model_settings.parallel_tool_calls is False - else NOT_GIVEN - ) - - tool_choice = Converter.convert_tool_choice(model_settings.tool_choice) - converted_tools = Converter.convert_tools(tools, handoffs) - response_format = Converter.get_response_format(output_schema) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Calling LLM") - else: - logger.debug( - f"Calling LLM {self.model} with input:\n" - f"{json.dumps(list_input, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n" - f"Stream: {stream}\n" - f"Tool choice: {tool_choice}\n" - f"Response format: {response_format}\n" - ) - - return await self._client.responses.create( - instructions=self._non_null_or_not_given(system_instructions), - model=self.model, - input=list_input, - include=converted_tools.includes, - tools=converted_tools.tools, - temperature=self._non_null_or_not_given(model_settings.temperature), - top_p=self._non_null_or_not_given(model_settings.top_p), - truncation=self._non_null_or_not_given(model_settings.truncation), - max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens), - tool_choice=tool_choice, - parallel_tool_calls=parallel_tool_calls, - stream=stream, - extra_headers=_HEADERS, - text=response_format, - store=self._non_null_or_not_given(model_settings.store), - reasoning=self._non_null_or_not_given(model_settings.reasoning), - metadata=self._non_null_or_not_given(model_settings.metadata) - ) - - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = AsyncOpenAI() - return self._client - - -@dataclass -class ConvertedTools: - tools: list[ToolParam] - includes: list[IncludeLiteral] - - -class Converter: - @classmethod - def convert_tool_choice( - cls, tool_choice: Literal["auto", "required", "none"] | str | None - ) -> response_create_params.ToolChoice | NotGiven: - if tool_choice is None: - return NOT_GIVEN - elif tool_choice == "required": - return "required" - elif tool_choice == "auto": - return "auto" - elif tool_choice == "none": - return "none" - elif tool_choice == "file_search": - return { - "type": "file_search", - } - elif tool_choice == "web_search_preview": - return { - "type": "web_search_preview", - } - elif tool_choice == "computer_use_preview": - return { - "type": "computer_use_preview", - } - else: - return { - "type": "function", - "name": tool_choice, - } - - @classmethod - def get_response_format( - cls, output_schema: AgentOutputSchema | None - ) -> ResponseTextConfigParam | NotGiven: - if output_schema is None or output_schema.is_plain_text(): - return NOT_GIVEN - else: - return { - "format": { - "type": "json_schema", - "name": "final_output", - "schema": output_schema.json_schema(), - "strict": output_schema.strict_json_schema, - } - } - - @classmethod - def convert_tools( - cls, - tools: list[Tool], - handoffs: list[Handoff[Any]], - ) -> ConvertedTools: - converted_tools: list[ToolParam] = [] - includes: list[IncludeLiteral] = [] - - computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)] - if len(computer_tools) > 1: - raise UserError(f"You can only provide one computer tool. Got {len(computer_tools)}") - - for tool in tools: - converted_tool, include = cls._convert_tool(tool) - converted_tools.append(converted_tool) - if include: - includes.append(include) - - for handoff in handoffs: - converted_tools.append(cls._convert_handoff_tool(handoff)) - - return ConvertedTools(tools=converted_tools, includes=includes) - - @classmethod - def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]: - """Returns converted tool and includes""" - - if isinstance(tool, FunctionTool): - converted_tool: ToolParam = { - "name": tool.name, - "parameters": tool.params_json_schema, - "strict": tool.strict_json_schema, - "type": "function", - "description": tool.description, - } - includes: IncludeLiteral | None = None - elif isinstance(tool, WebSearchTool): - ws: WebSearchToolParam = { - "type": "web_search_preview", - "user_location": tool.user_location, - "search_context_size": tool.search_context_size, - } - converted_tool = ws - includes = None - elif isinstance(tool, FileSearchTool): - converted_tool = { - "type": "file_search", - "vector_store_ids": tool.vector_store_ids, - } - if tool.max_num_results: - converted_tool["max_num_results"] = tool.max_num_results - if tool.ranking_options: - converted_tool["ranking_options"] = tool.ranking_options - if tool.filters: - converted_tool["filters"] = tool.filters - - includes = "file_search_call.results" if tool.include_search_results else None - elif isinstance(tool, ComputerTool): - converted_tool = { - "type": "computer_use_preview", - "environment": tool.computer.environment, - "display_width": tool.computer.dimensions[0], - "display_height": tool.computer.dimensions[1], - } - includes = None - - else: - raise UserError(f"Unknown tool type: {type(tool)}, tool") - - return converted_tool, includes - - @classmethod - def _convert_handoff_tool(cls, handoff: Handoff) -> ToolParam: - return { - "name": handoff.tool_name, - "parameters": handoff.input_json_schema, - "strict": handoff.strict_json_schema, - "type": "function", - "description": handoff.tool_description, - } diff --git a/src/agents/py.typed b/src/agents/py.typed deleted file mode 100644 index 8b137891..00000000 --- a/src/agents/py.typed +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/agents/result.py b/src/agents/result.py deleted file mode 100644 index 40a64806..00000000 --- a/src/agents/result.py +++ /dev/null @@ -1,225 +0,0 @@ -from __future__ import annotations - -import abc -import asyncio -from collections.abc import AsyncIterator -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, cast - -from typing_extensions import TypeVar - -from ._run_impl import QueueCompleteSentinel -from .agent import Agent -from .agent_output import AgentOutputSchema -from .exceptions import InputGuardrailTripwireTriggered, MaxTurnsExceeded -from .guardrail import InputGuardrailResult, OutputGuardrailResult -from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem -from .logger import logger -from .stream_events import StreamEvent -from .tracing import Trace -from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming - -if TYPE_CHECKING: - from ._run_impl import QueueCompleteSentinel - from .agent import Agent - -T = TypeVar("T") - - -@dataclass -class RunResultBase(abc.ABC): - input: str | list[TResponseInputItem] - """The original input items i.e. the items before run() was called. This may be a mutated - version of the input, if there are handoff input filters that mutate the input. - """ - - new_items: list[RunItem] - """The new items generated during the agent run. These include things like new messages, tool - calls and their outputs, etc. - """ - - raw_responses: list[ModelResponse] - """The raw LLM responses generated by the model during the agent run.""" - - final_output: Any - """The output of the last agent.""" - - input_guardrail_results: list[InputGuardrailResult] - """Guardrail results for the input messages.""" - - output_guardrail_results: list[OutputGuardrailResult] - """Guardrail results for the final output of the agent.""" - - @property - @abc.abstractmethod - def last_agent(self) -> Agent[Any]: - """The last agent that was run.""" - - def final_output_as(self, cls: type[T], raise_if_incorrect_type: bool = False) -> T: - """A convenience method to cast the final output to a specific type. By default, the cast - is only for the typechecker. If you set `raise_if_incorrect_type` to True, we'll raise a - TypeError if the final output is not of the given type. - - Args: - cls: The type to cast the final output to. - raise_if_incorrect_type: If True, we'll raise a TypeError if the final output is not of - the given type. - - Returns: - The final output casted to the given type. - """ - if raise_if_incorrect_type and not isinstance(self.final_output, cls): - raise TypeError(f"Final output is not of type {cls.__name__}") - - return cast(T, self.final_output) - - def to_input_list(self) -> list[TResponseInputItem]: - """Creates a new input list, merging the original input with all the new items generated.""" - original_items: list[TResponseInputItem] = ItemHelpers.input_to_new_input_list(self.input) - new_items = [item.to_input_item() for item in self.new_items] - - return original_items + new_items - - -@dataclass -class RunResult(RunResultBase): - _last_agent: Agent[Any] - - @property - def last_agent(self) -> Agent[Any]: - """The last agent that was run.""" - return self._last_agent - - def __str__(self) -> str: - return pretty_print_result(self) - - -@dataclass -class RunResultStreaming(RunResultBase): - """The result of an agent run in streaming mode. You can use the `stream_events` method to - receive semantic events as they are generated. - - The streaming method will raise: - - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit. - - A GuardrailTripwireTriggered exception if a guardrail is tripped. - """ - - current_agent: Agent[Any] - """The current agent that is running.""" - - current_turn: int - """The current turn number.""" - - max_turns: int - """The maximum number of turns the agent can run for.""" - - final_output: Any - """The final output of the agent. This is None until the agent has finished running.""" - - _current_agent_output_schema: AgentOutputSchema | None = field(repr=False) - - _trace: Trace | None = field(repr=False) - - is_complete: bool = False - """Whether the agent has finished running.""" - - # Queues that the background run_loop writes to - _event_queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel] = field( - default_factory=asyncio.Queue, repr=False - ) - _input_guardrail_queue: asyncio.Queue[InputGuardrailResult] = field( - default_factory=asyncio.Queue, repr=False - ) - - # Store the asyncio tasks that we're waiting on - _run_impl_task: asyncio.Task[Any] | None = field(default=None, repr=False) - _input_guardrails_task: asyncio.Task[Any] | None = field(default=None, repr=False) - _output_guardrails_task: asyncio.Task[Any] | None = field(default=None, repr=False) - _stored_exception: Exception | None = field(default=None, repr=False) - - @property - def last_agent(self) -> Agent[Any]: - """The last agent that was run. Updates as the agent run progresses, so the true last agent - is only available after the agent run is complete. - """ - return self.current_agent - - async def stream_events(self) -> AsyncIterator[StreamEvent]: - """Stream deltas for new items as they are generated. We're using the types from the - OpenAI Responses API, so these are semantic events: each event has a `type` field that - describes the type of the event, along with the data for that event. - - This will raise: - - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit. - - A GuardrailTripwireTriggered exception if a guardrail is tripped. - """ - while True: - self._check_errors() - if self._stored_exception: - logger.debug("Breaking due to stored exception") - self.is_complete = True - break - - if self.is_complete and self._event_queue.empty(): - break - - try: - item = await self._event_queue.get() - except asyncio.CancelledError: - break - - if isinstance(item, QueueCompleteSentinel): - self._event_queue.task_done() - # Check for errors, in case the queue was completed due to an exception - self._check_errors() - break - - yield item - self._event_queue.task_done() - - if self._trace: - self._trace.finish(reset_current=True) - - self._cleanup_tasks() - - if self._stored_exception: - raise self._stored_exception - - def _check_errors(self): - if self.current_turn > self.max_turns: - self._stored_exception = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded") - - # Fetch all the completed guardrail results from the queue and raise if needed - while not self._input_guardrail_queue.empty(): - guardrail_result = self._input_guardrail_queue.get_nowait() - if guardrail_result.output.tripwire_triggered: - self._stored_exception = InputGuardrailTripwireTriggered(guardrail_result) - - # Check the tasks for any exceptions - if self._run_impl_task and self._run_impl_task.done(): - exc = self._run_impl_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._input_guardrails_task and self._input_guardrails_task.done(): - exc = self._input_guardrails_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._output_guardrails_task and self._output_guardrails_task.done(): - exc = self._output_guardrails_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - def _cleanup_tasks(self): - if self._run_impl_task and not self._run_impl_task.done(): - self._run_impl_task.cancel() - - if self._input_guardrails_task and not self._input_guardrails_task.done(): - self._input_guardrails_task.cancel() - - if self._output_guardrails_task and not self._output_guardrails_task.done(): - self._output_guardrails_task.cancel() - - def __str__(self) -> str: - return pretty_print_run_result_streaming(self) diff --git a/src/agents/run.py b/src/agents/run.py deleted file mode 100644 index 0159822a..00000000 --- a/src/agents/run.py +++ /dev/null @@ -1,942 +0,0 @@ -from __future__ import annotations - -import asyncio -import copy -from dataclasses import dataclass, field -from typing import Any, cast - -from openai.types.responses import ResponseCompletedEvent - -from ._run_impl import ( - AgentToolUseTracker, - NextStepFinalOutput, - NextStepHandoff, - NextStepRunAgain, - QueueCompleteSentinel, - RunImpl, - SingleStepResult, - TraceCtxManager, - get_model_tracing_impl, -) -from .agent import Agent -from .agent_output import AgentOutputSchema -from .exceptions import ( - AgentsException, - InputGuardrailTripwireTriggered, - MaxTurnsExceeded, - ModelBehaviorError, - OutputGuardrailTripwireTriggered, -) -from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult -from .handoffs import Handoff, HandoffInputFilter, handoff -from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem -from .lifecycle import RunHooks -from .logger import logger -from .model_settings import ModelSettings -from .models.interface import Model, ModelProvider -from .models.openai_provider import OpenAIProvider -from .result import RunResult, RunResultStreaming -from .run_context import RunContextWrapper, TContext -from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent -from .tool import Tool -from .tracing import Span, SpanError, agent_span, get_current_trace, trace -from .tracing.span_data import AgentSpanData -from .usage import Usage -from .util import _coro, _error_tracing - -DEFAULT_MAX_TURNS = 10 - - -@dataclass -class RunConfig: - """Configures settings for the entire agent run.""" - - model: str | Model | None = None - """The model to use for the entire agent run. If set, will override the model set on every - agent. The model_provider passed in below must be able to resolve this model name. - """ - - model_provider: ModelProvider = field(default_factory=OpenAIProvider) - """The model provider to use when looking up string model names. Defaults to OpenAI.""" - - model_settings: ModelSettings | None = None - """Configure global model settings. Any non-null values will override the agent-specific model - settings. - """ - - handoff_input_filter: HandoffInputFilter | None = None - """A global input filter to apply to all handoffs. If `Handoff.input_filter` is set, then that - will take precedence. The input filter allows you to edit the inputs that are sent to the new - agent. See the documentation in `Handoff.input_filter` for more details. - """ - - input_guardrails: list[InputGuardrail[Any]] | None = None - """A list of input guardrails to run on the initial run input.""" - - output_guardrails: list[OutputGuardrail[Any]] | None = None - """A list of output guardrails to run on the final output of the run.""" - - tracing_disabled: bool = False - """Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run. - """ - - trace_include_sensitive_data: bool = True - """Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or - LLM generations) in traces. If False, we'll still create spans for these events, but the - sensitive data will not be included. - """ - - workflow_name: str = "Agent workflow" - """The name of the run, used for tracing. Should be a logical name for the run, like - "Code generation workflow" or "Customer support agent". - """ - - trace_id: str | None = None - """A custom trace ID to use for tracing. If not provided, we will generate a new trace ID.""" - - group_id: str | None = None - """ - A grouping identifier to use for tracing, to link multiple traces from the same conversation - or process. For example, you might use a chat thread ID. - """ - - trace_metadata: dict[str, Any] | None = None - """ - An optional dictionary of additional metadata to include with the trace. - """ - - -class Runner: - @classmethod - async def run( - cls, - starting_agent: Agent[TContext], - input: str | list[TResponseInputItem], - *, - context: TContext | None = None, - max_turns: int = DEFAULT_MAX_TURNS, - hooks: RunHooks[TContext] | None = None, - run_config: RunConfig | None = None, - ) -> RunResult: - """Run a workflow starting at the given agent. The agent will run in a loop until a final - output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. - - In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - - Note that only the first agent's input guardrails are run. - - Args: - starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. - context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). - hooks: An object that receives callbacks on various lifecycle events. - run_config: Global settings for the entire agent run. - - Returns: - A run result containing all the inputs, guardrail results and the output of the last - agent. Agents may perform handoffs, so we don't know the specific type of the output. - """ - if hooks is None: - hooks = RunHooks[Any]() - if run_config is None: - run_config = RunConfig() - - tool_use_tracker = AgentToolUseTracker() - - with TraceCtxManager( - workflow_name=run_config.workflow_name, - trace_id=run_config.trace_id, - group_id=run_config.group_id, - metadata=run_config.trace_metadata, - disabled=run_config.tracing_disabled, - ): - current_turn = 0 - original_input: str | list[TResponseInputItem] = copy.deepcopy(input) - generated_items: list[RunItem] = [] - model_responses: list[ModelResponse] = [] - - context_wrapper: RunContextWrapper[TContext] = RunContextWrapper( - context=context, # type: ignore - ) - - input_guardrail_results: list[InputGuardrailResult] = [] - - current_span: Span[AgentSpanData] | None = None - current_agent = starting_agent - should_run_agent_start_hooks = True - - try: - while True: - # Start an agent span if we don't have one. This span is ended if the current - # agent changes, or if the agent loop ends. - if current_span is None: - handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)] - if output_schema := cls._get_output_schema(current_agent): - output_type_name = output_schema.output_type_name() - else: - output_type_name = "str" - - current_span = agent_span( - name=current_agent.name, - handoffs=handoff_names, - output_type=output_type_name, - ) - current_span.start(mark_as_current=True) - - all_tools = await cls._get_all_tools(current_agent) - current_span.span_data.tools = [t.name for t in all_tools] - - current_turn += 1 - if current_turn > max_turns: - _error_tracing.attach_error_to_span( - current_span, - SpanError( - message="Max turns exceeded", - data={"max_turns": max_turns}, - ), - ) - raise MaxTurnsExceeded(f"Max turns ({max_turns}) exceeded") - - logger.debug( - f"Running agent {current_agent.name} (turn {current_turn})", - ) - - if current_turn == 1: - input_guardrail_results, turn_result = await asyncio.gather( - cls._run_input_guardrails( - starting_agent, - starting_agent.input_guardrails - + (run_config.input_guardrails or []), - copy.deepcopy(input), - context_wrapper, - ), - cls._run_single_turn( - agent=current_agent, - all_tools=all_tools, - original_input=original_input, - generated_items=generated_items, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - should_run_agent_start_hooks=should_run_agent_start_hooks, - tool_use_tracker=tool_use_tracker, - ), - ) - else: - turn_result = await cls._run_single_turn( - agent=current_agent, - all_tools=all_tools, - original_input=original_input, - generated_items=generated_items, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - should_run_agent_start_hooks=should_run_agent_start_hooks, - tool_use_tracker=tool_use_tracker, - ) - should_run_agent_start_hooks = False - - model_responses.append(turn_result.model_response) - original_input = turn_result.original_input - generated_items = turn_result.generated_items - - if isinstance(turn_result.next_step, NextStepFinalOutput): - output_guardrail_results = await cls._run_output_guardrails( - current_agent.output_guardrails + (run_config.output_guardrails or []), - current_agent, - turn_result.next_step.output, - context_wrapper, - ) - return RunResult( - input=original_input, - new_items=generated_items, - raw_responses=model_responses, - final_output=turn_result.next_step.output, - _last_agent=current_agent, - input_guardrail_results=input_guardrail_results, - output_guardrail_results=output_guardrail_results, - ) - elif isinstance(turn_result.next_step, NextStepHandoff): - current_agent = cast(Agent[TContext], turn_result.next_step.new_agent) - current_span.finish(reset_current=True) - current_span = None - should_run_agent_start_hooks = True - elif isinstance(turn_result.next_step, NextStepRunAgain): - pass - else: - raise AgentsException( - f"Unknown next step type: {type(turn_result.next_step)}" - ) - finally: - if current_span: - current_span.finish(reset_current=True) - - @classmethod - def run_sync( - cls, - starting_agent: Agent[TContext], - input: str | list[TResponseInputItem], - *, - context: TContext | None = None, - max_turns: int = DEFAULT_MAX_TURNS, - hooks: RunHooks[TContext] | None = None, - run_config: RunConfig | None = None, - ) -> RunResult: - """Run a workflow synchronously, starting at the given agent. Note that this just wraps the - `run` method, so it will not work if there's already an event loop (e.g. inside an async - function, or in a Jupyter notebook or async context like FastAPI). For those cases, use - the `run` method instead. - - The agent will run in a loop until a final output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. - - In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - - Note that only the first agent's input guardrails are run. - - Args: - starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. - context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). - hooks: An object that receives callbacks on various lifecycle events. - run_config: Global settings for the entire agent run. - - Returns: - A run result containing all the inputs, guardrail results and the output of the last - agent. Agents may perform handoffs, so we don't know the specific type of the output. - """ - return asyncio.get_event_loop().run_until_complete( - cls.run( - starting_agent, - input, - context=context, - max_turns=max_turns, - hooks=hooks, - run_config=run_config, - ) - ) - - @classmethod - def run_streamed( - cls, - starting_agent: Agent[TContext], - input: str | list[TResponseInputItem], - context: TContext | None = None, - max_turns: int = DEFAULT_MAX_TURNS, - hooks: RunHooks[TContext] | None = None, - run_config: RunConfig | None = None, - ) -> RunResultStreaming: - """Run a workflow starting at the given agent in streaming mode. The returned result object - contains a method you can use to stream semantic events as they are generated. - - The agent will run in a loop until a final output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. - - In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - - Note that only the first agent's input guardrails are run. - - Args: - starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. - context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). - hooks: An object that receives callbacks on various lifecycle events. - run_config: Global settings for the entire agent run. - - Returns: - A result object that contains data about the run, as well as a method to stream events. - """ - if hooks is None: - hooks = RunHooks[Any]() - if run_config is None: - run_config = RunConfig() - - # If there's already a trace, we don't create a new one. In addition, we can't end the - # trace here, because the actual work is done in `stream_events` and this method ends - # before that. - new_trace = ( - None - if get_current_trace() - else trace( - workflow_name=run_config.workflow_name, - trace_id=run_config.trace_id, - group_id=run_config.group_id, - metadata=run_config.trace_metadata, - disabled=run_config.tracing_disabled, - ) - ) - # Need to start the trace here, because the current trace contextvar is captured at - # asyncio.create_task time - if new_trace: - new_trace.start(mark_as_current=True) - - output_schema = cls._get_output_schema(starting_agent) - context_wrapper: RunContextWrapper[TContext] = RunContextWrapper( - context=context # type: ignore - ) - - streamed_result = RunResultStreaming( - input=copy.deepcopy(input), - new_items=[], - current_agent=starting_agent, - raw_responses=[], - final_output=None, - is_complete=False, - current_turn=0, - max_turns=max_turns, - input_guardrail_results=[], - output_guardrail_results=[], - _current_agent_output_schema=output_schema, - _trace=new_trace, - ) - - # Kick off the actual agent loop in the background and return the streamed result object. - streamed_result._run_impl_task = asyncio.create_task( - cls._run_streamed_impl( - starting_input=input, - streamed_result=streamed_result, - starting_agent=starting_agent, - max_turns=max_turns, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - ) - ) - return streamed_result - - @classmethod - async def _run_input_guardrails_with_queue( - cls, - agent: Agent[Any], - guardrails: list[InputGuardrail[TContext]], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - streamed_result: RunResultStreaming, - parent_span: Span[Any], - ): - queue = streamed_result._input_guardrail_queue - - # We'll run the guardrails and push them onto the queue as they complete - guardrail_tasks = [ - asyncio.create_task( - RunImpl.run_single_input_guardrail(agent, guardrail, input, context) - ) - for guardrail in guardrails - ] - guardrail_results = [] - try: - for done in asyncio.as_completed(guardrail_tasks): - result = await done - if result.output.tripwire_triggered: - _error_tracing.attach_error_to_span( - parent_span, - SpanError( - message="Guardrail tripwire triggered", - data={ - "guardrail": result.guardrail.get_name(), - "type": "input_guardrail", - }, - ), - ) - queue.put_nowait(result) - guardrail_results.append(result) - except Exception: - for t in guardrail_tasks: - t.cancel() - raise - - streamed_result.input_guardrail_results = guardrail_results - - @classmethod - async def _run_streamed_impl( - cls, - starting_input: str | list[TResponseInputItem], - streamed_result: RunResultStreaming, - starting_agent: Agent[TContext], - max_turns: int, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - ): - current_span: Span[AgentSpanData] | None = None - current_agent = starting_agent - current_turn = 0 - should_run_agent_start_hooks = True - tool_use_tracker = AgentToolUseTracker() - - streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent)) - - try: - while True: - if streamed_result.is_complete: - break - - # Start an agent span if we don't have one. This span is ended if the current - # agent changes, or if the agent loop ends. - if current_span is None: - handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)] - if output_schema := cls._get_output_schema(current_agent): - output_type_name = output_schema.output_type_name() - else: - output_type_name = "str" - - current_span = agent_span( - name=current_agent.name, - handoffs=handoff_names, - output_type=output_type_name, - ) - current_span.start(mark_as_current=True) - - all_tools = await cls._get_all_tools(current_agent) - tool_names = [t.name for t in all_tools] - current_span.span_data.tools = tool_names - current_turn += 1 - streamed_result.current_turn = current_turn - - if current_turn > max_turns: - _error_tracing.attach_error_to_span( - current_span, - SpanError( - message="Max turns exceeded", - data={"max_turns": max_turns}, - ), - ) - streamed_result._event_queue.put_nowait(QueueCompleteSentinel()) - break - - if current_turn == 1: - # Run the input guardrails in the background and put the results on the queue - streamed_result._input_guardrails_task = asyncio.create_task( - cls._run_input_guardrails_with_queue( - starting_agent, - starting_agent.input_guardrails + (run_config.input_guardrails or []), - copy.deepcopy(ItemHelpers.input_to_new_input_list(starting_input)), - context_wrapper, - streamed_result, - current_span, - ) - ) - try: - turn_result = await cls._run_single_turn_streamed( - streamed_result, - current_agent, - hooks, - context_wrapper, - run_config, - should_run_agent_start_hooks, - tool_use_tracker, - all_tools, - ) - should_run_agent_start_hooks = False - - streamed_result.raw_responses = streamed_result.raw_responses + [ - turn_result.model_response - ] - streamed_result.input = turn_result.original_input - streamed_result.new_items = turn_result.generated_items - - if isinstance(turn_result.next_step, NextStepHandoff): - current_agent = turn_result.next_step.new_agent - current_span.finish(reset_current=True) - current_span = None - should_run_agent_start_hooks = True - streamed_result._event_queue.put_nowait( - AgentUpdatedStreamEvent(new_agent=current_agent) - ) - elif isinstance(turn_result.next_step, NextStepFinalOutput): - streamed_result._output_guardrails_task = asyncio.create_task( - cls._run_output_guardrails( - current_agent.output_guardrails - + (run_config.output_guardrails or []), - current_agent, - turn_result.next_step.output, - context_wrapper, - ) - ) - - try: - output_guardrail_results = await streamed_result._output_guardrails_task - except Exception: - # Exceptions will be checked in the stream_events loop - output_guardrail_results = [] - - streamed_result.output_guardrail_results = output_guardrail_results - streamed_result.final_output = turn_result.next_step.output - streamed_result.is_complete = True - streamed_result._event_queue.put_nowait(QueueCompleteSentinel()) - elif isinstance(turn_result.next_step, NextStepRunAgain): - pass - except Exception as e: - if current_span: - _error_tracing.attach_error_to_span( - current_span, - SpanError( - message="Error in agent run", - data={"error": str(e)}, - ), - ) - streamed_result.is_complete = True - streamed_result._event_queue.put_nowait(QueueCompleteSentinel()) - raise - - streamed_result.is_complete = True - finally: - if current_span: - current_span.finish(reset_current=True) - - @classmethod - async def _run_single_turn_streamed( - cls, - streamed_result: RunResultStreaming, - agent: Agent[TContext], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - should_run_agent_start_hooks: bool, - tool_use_tracker: AgentToolUseTracker, - all_tools: list[Tool], - ) -> SingleStepResult: - if should_run_agent_start_hooks: - await asyncio.gather( - hooks.on_agent_start(context_wrapper, agent), - ( - agent.hooks.on_start(context_wrapper, agent) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - output_schema = cls._get_output_schema(agent) - - streamed_result.current_agent = agent - streamed_result._current_agent_output_schema = output_schema - - system_prompt = await agent.get_system_prompt(context_wrapper) - - handoffs = cls._get_handoffs(agent) - model = cls._get_model(agent, run_config) - model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) - - final_response: ModelResponse | None = None - - input = ItemHelpers.input_to_new_input_list(streamed_result.input) - input.extend([item.to_input_item() for item in streamed_result.new_items]) - - # 1. Stream the output events - async for event in model.stream_response( - system_prompt, - input, - model_settings, - all_tools, - output_schema, - handoffs, - get_model_tracing_impl( - run_config.tracing_disabled, run_config.trace_include_sensitive_data - ), - ): - if isinstance(event, ResponseCompletedEvent): - usage = ( - Usage( - requests=1, - input_tokens=event.response.usage.input_tokens, - output_tokens=event.response.usage.output_tokens, - total_tokens=event.response.usage.total_tokens, - ) - if event.response.usage - else Usage() - ) - final_response = ModelResponse( - output=event.response.output, - usage=usage, - referenceable_id=event.response.id, - ) - - streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event)) - - # 2. At this point, the streaming is complete for this turn of the agent loop. - if not final_response: - raise ModelBehaviorError("Model did not produce a final response!") - - # 3. Now, we can process the turn as we do in the non-streaming case - single_step_result = await cls._get_single_step_result_from_response( - agent=agent, - original_input=streamed_result.input, - pre_step_items=streamed_result.new_items, - new_response=final_response, - output_schema=output_schema, - all_tools=all_tools, - handoffs=handoffs, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - tool_use_tracker=tool_use_tracker, - ) - - RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue) - return single_step_result - - @classmethod - async def _run_single_turn( - cls, - *, - agent: Agent[TContext], - all_tools: list[Tool], - original_input: str | list[TResponseInputItem], - generated_items: list[RunItem], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - should_run_agent_start_hooks: bool, - tool_use_tracker: AgentToolUseTracker, - ) -> SingleStepResult: - # Ensure we run the hooks before anything else - if should_run_agent_start_hooks: - await asyncio.gather( - hooks.on_agent_start(context_wrapper, agent), - ( - agent.hooks.on_start(context_wrapper, agent) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - system_prompt = await agent.get_system_prompt(context_wrapper) - - output_schema = cls._get_output_schema(agent) - handoffs = cls._get_handoffs(agent) - input = ItemHelpers.input_to_new_input_list(original_input) - input.extend([generated_item.to_input_item() for generated_item in generated_items]) - - new_response = await cls._get_new_response( - agent, - system_prompt, - input, - output_schema, - all_tools, - handoffs, - context_wrapper, - run_config, - tool_use_tracker, - ) - - return await cls._get_single_step_result_from_response( - agent=agent, - original_input=original_input, - pre_step_items=generated_items, - new_response=new_response, - output_schema=output_schema, - all_tools=all_tools, - handoffs=handoffs, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - tool_use_tracker=tool_use_tracker, - ) - - @classmethod - async def _get_single_step_result_from_response( - cls, - *, - agent: Agent[TContext], - all_tools: list[Tool], - original_input: str | list[TResponseInputItem], - pre_step_items: list[RunItem], - new_response: ModelResponse, - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - tool_use_tracker: AgentToolUseTracker, - ) -> SingleStepResult: - processed_response = RunImpl.process_model_response( - agent=agent, - all_tools=all_tools, - response=new_response, - output_schema=output_schema, - handoffs=handoffs, - ) - - tool_use_tracker.add_tool_use(agent, processed_response.tools_used) - - return await RunImpl.execute_tools_and_side_effects( - agent=agent, - original_input=original_input, - pre_step_items=pre_step_items, - new_response=new_response, - processed_response=processed_response, - output_schema=output_schema, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - ) - - @classmethod - async def _run_input_guardrails( - cls, - agent: Agent[Any], - guardrails: list[InputGuardrail[TContext]], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - ) -> list[InputGuardrailResult]: - if not guardrails: - return [] - - guardrail_tasks = [ - asyncio.create_task( - RunImpl.run_single_input_guardrail(agent, guardrail, input, context) - ) - for guardrail in guardrails - ] - - guardrail_results = [] - - for done in asyncio.as_completed(guardrail_tasks): - result = await done - if result.output.tripwire_triggered: - # Cancel all guardrail tasks if a tripwire is triggered. - for t in guardrail_tasks: - t.cancel() - _error_tracing.attach_error_to_current_span( - SpanError( - message="Guardrail tripwire triggered", - data={"guardrail": result.guardrail.get_name()}, - ) - ) - raise InputGuardrailTripwireTriggered(result) - else: - guardrail_results.append(result) - - return guardrail_results - - @classmethod - async def _run_output_guardrails( - cls, - guardrails: list[OutputGuardrail[TContext]], - agent: Agent[TContext], - agent_output: Any, - context: RunContextWrapper[TContext], - ) -> list[OutputGuardrailResult]: - if not guardrails: - return [] - - guardrail_tasks = [ - asyncio.create_task( - RunImpl.run_single_output_guardrail(guardrail, agent, agent_output, context) - ) - for guardrail in guardrails - ] - - guardrail_results = [] - - for done in asyncio.as_completed(guardrail_tasks): - result = await done - if result.output.tripwire_triggered: - # Cancel all guardrail tasks if a tripwire is triggered. - for t in guardrail_tasks: - t.cancel() - _error_tracing.attach_error_to_current_span( - SpanError( - message="Guardrail tripwire triggered", - data={"guardrail": result.guardrail.get_name()}, - ) - ) - raise OutputGuardrailTripwireTriggered(result) - else: - guardrail_results.append(result) - - return guardrail_results - - @classmethod - async def _get_new_response( - cls, - agent: Agent[TContext], - system_prompt: str | None, - input: list[TResponseInputItem], - output_schema: AgentOutputSchema | None, - all_tools: list[Tool], - handoffs: list[Handoff], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - tool_use_tracker: AgentToolUseTracker, - ) -> ModelResponse: - model = cls._get_model(agent, run_config) - model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) - - new_response = await model.get_response( - system_instructions=system_prompt, - input=input, - model_settings=model_settings, - tools=all_tools, - output_schema=output_schema, - handoffs=handoffs, - tracing=get_model_tracing_impl( - run_config.tracing_disabled, run_config.trace_include_sensitive_data - ), - ) - - context_wrapper.usage.add(new_response.usage) - - return new_response - - @classmethod - def _get_output_schema(cls, agent: Agent[Any]) -> AgentOutputSchema | None: - if agent.output_type is None or agent.output_type is str: - return None - - return AgentOutputSchema(agent.output_type) - - @classmethod - def _get_handoffs(cls, agent: Agent[Any]) -> list[Handoff]: - handoffs = [] - for handoff_item in agent.handoffs: - if isinstance(handoff_item, Handoff): - handoffs.append(handoff_item) - elif isinstance(handoff_item, Agent): - handoffs.append(handoff(handoff_item)) - return handoffs - - @classmethod - async def _get_all_tools(cls, agent: Agent[Any]) -> list[Tool]: - return await agent.get_all_tools() - - @classmethod - def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model: - if isinstance(run_config.model, Model): - return run_config.model - elif isinstance(run_config.model, str): - return run_config.model_provider.get_model(run_config.model) - elif isinstance(agent.model, Model): - return agent.model - - return run_config.model_provider.get_model(agent.model) diff --git a/src/agents/run_context.py b/src/agents/run_context.py deleted file mode 100644 index 579a215f..00000000 --- a/src/agents/run_context.py +++ /dev/null @@ -1,26 +0,0 @@ -from dataclasses import dataclass, field -from typing import Any, Generic - -from typing_extensions import TypeVar - -from .usage import Usage - -TContext = TypeVar("TContext", default=Any) - - -@dataclass -class RunContextWrapper(Generic[TContext]): - """This wraps the context object that you passed to `Runner.run()`. It also contains - information about the usage of the agent run so far. - - NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code - you implement, like tool functions, callbacks, hooks, etc. - """ - - context: TContext - """The context object (or None), passed by you to `Runner.run()`""" - - usage: Usage = field(default_factory=Usage) - """The usage of the agent run so far. For streamed responses, the usage will be stale until the - last chunk of the stream is processed. - """ diff --git a/src/agents/stream_events.py b/src/agents/stream_events.py deleted file mode 100644 index bd37d11f..00000000 --- a/src/agents/stream_events.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any, Literal, Union - -from typing_extensions import TypeAlias - -from .agent import Agent -from .items import RunItem, TResponseStreamEvent - - -@dataclass -class RawResponsesStreamEvent: - """Streaming event from the LLM. These are 'raw' events, i.e. they are directly passed through - from the LLM. - """ - - data: TResponseStreamEvent - """The raw responses streaming event from the LLM.""" - - type: Literal["raw_response_event"] = "raw_response_event" - """The type of the event.""" - - -@dataclass -class RunItemStreamEvent: - """Streaming events that wrap a `RunItem`. As the agent processes the LLM response, it will - generate these events for new messages, tool calls, tool outputs, handoffs, etc. - """ - - name: Literal[ - "message_output_created", - "handoff_requested", - "handoff_occured", - "tool_called", - "tool_output", - "reasoning_item_created", - ] - """The name of the event.""" - - item: RunItem - """The item that was created.""" - - type: Literal["run_item_stream_event"] = "run_item_stream_event" - - -@dataclass -class AgentUpdatedStreamEvent: - """Event that notifies that there is a new agent running.""" - - new_agent: Agent[Any] - """The new agent.""" - - type: Literal["agent_updated_stream_event"] = "agent_updated_stream_event" - - -StreamEvent: TypeAlias = Union[RawResponsesStreamEvent, RunItemStreamEvent, AgentUpdatedStreamEvent] -"""A streaming event from an agent.""" diff --git a/src/agents/strict_schema.py b/src/agents/strict_schema.py deleted file mode 100644 index 3f37660a..00000000 --- a/src/agents/strict_schema.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from openai import NOT_GIVEN -from typing_extensions import TypeGuard - -from .exceptions import UserError - -_EMPTY_SCHEMA = { - "additionalProperties": False, - "type": "object", - "properties": {}, - "required": [], -} - - -def ensure_strict_json_schema( - schema: dict[str, Any], -) -> dict[str, Any]: - """Mutates the given JSON schema to ensure it conforms to the `strict` standard - that the OpenAI API expects. - """ - if schema == {}: - return _EMPTY_SCHEMA - return _ensure_strict_json_schema(schema, path=(), root=schema) - - -# Adapted from https://github.com/openai/openai-python/blob/main/src/openai/lib/_pydantic.py -def _ensure_strict_json_schema( - json_schema: object, - *, - path: tuple[str, ...], - root: dict[str, object], -) -> dict[str, Any]: - if not is_dict(json_schema): - raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}") - - defs = json_schema.get("$defs") - if is_dict(defs): - for def_name, def_schema in defs.items(): - _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root) - - definitions = json_schema.get("definitions") - if is_dict(definitions): - for definition_name, definition_schema in definitions.items(): - _ensure_strict_json_schema( - definition_schema, path=(*path, "definitions", definition_name), root=root - ) - - typ = json_schema.get("type") - if typ == "object" and "additionalProperties" not in json_schema: - json_schema["additionalProperties"] = False - elif ( - typ == "object" - and "additionalProperties" in json_schema - and json_schema["additionalProperties"] - ): - raise UserError( - "additionalProperties should not be set for object types. This could be because " - "you're using an older version of Pydantic, or because you configured additional " - "properties to be allowed. If you really need this, update the function or output tool " - "to not use a strict schema." - ) - - # object types - # { 'type': 'object', 'properties': { 'a': {...} } } - properties = json_schema.get("properties") - if is_dict(properties): - json_schema["required"] = list(properties.keys()) - json_schema["properties"] = { - key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root) - for key, prop_schema in properties.items() - } - - # arrays - # { 'type': 'array', 'items': {...} } - items = json_schema.get("items") - if is_dict(items): - json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root) - - # unions - any_of = json_schema.get("anyOf") - if is_list(any_of): - json_schema["anyOf"] = [ - _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root) - for i, variant in enumerate(any_of) - ] - - # intersections - all_of = json_schema.get("allOf") - if is_list(all_of): - if len(all_of) == 1: - json_schema.update( - _ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root) - ) - json_schema.pop("allOf") - else: - json_schema["allOf"] = [ - _ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root) - for i, entry in enumerate(all_of) - ] - - # strip `None` defaults as there's no meaningful distinction here - # the schema will still be `nullable` and the model will default - # to using `None` anyway - if json_schema.get("default", NOT_GIVEN) is None: - json_schema.pop("default") - - # we can't use `$ref`s if there are also other properties defined, e.g. - # `{"$ref": "...", "description": "my description"}` - # - # so we unravel the ref - # `{"type": "string", "description": "my description"}` - ref = json_schema.get("$ref") - if ref and has_more_than_n_keys(json_schema, 1): - assert isinstance(ref, str), f"Received non-string $ref - {ref}" - - resolved = resolve_ref(root=root, ref=ref) - if not is_dict(resolved): - raise ValueError( - f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}" - ) - - # properties from the json schema take priority over the ones on the `$ref` - json_schema.update({**resolved, **json_schema}) - json_schema.pop("$ref") - # Since the schema expanded from `$ref` might not have `additionalProperties: false` applied - # we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid - return _ensure_strict_json_schema(json_schema, path=path, root=root) - - return json_schema - - -def resolve_ref(*, root: dict[str, object], ref: str) -> object: - if not ref.startswith("#/"): - raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/") - - path = ref[2:].split("/") - resolved = root - for key in path: - value = resolved[key] - assert is_dict(value), ( - f"encountered non-dictionary entry while resolving {ref} - {resolved}" - ) - resolved = value - - return resolved - - -def is_dict(obj: object) -> TypeGuard[dict[str, object]]: - # just pretend that we know there are only `str` keys - # as that check is not worth the performance cost - return isinstance(obj, dict) - - -def is_list(obj: object) -> TypeGuard[list[object]]: - return isinstance(obj, list) - - -def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool: - i = 0 - for _ in obj.keys(): - i += 1 - if i > n: - return True - return False diff --git a/src/agents/tool.py b/src/agents/tool.py deleted file mode 100644 index c1c16242..00000000 --- a/src/agents/tool.py +++ /dev/null @@ -1,310 +0,0 @@ -from __future__ import annotations - -import inspect -import json -from collections.abc import Awaitable -from dataclasses import dataclass -from typing import Any, Callable, Literal, Union, overload - -from openai.types.responses.file_search_tool_param import Filters, RankingOptions -from openai.types.responses.web_search_tool_param import UserLocation -from pydantic import ValidationError -from typing_extensions import Concatenate, ParamSpec - -from . import _debug -from .computer import AsyncComputer, Computer -from .exceptions import ModelBehaviorError -from .function_schema import DocstringStyle, function_schema -from .items import RunItem -from .logger import logger -from .run_context import RunContextWrapper -from .tracing import SpanError -from .util import _error_tracing -from .util._types import MaybeAwaitable - -ToolParams = ParamSpec("ToolParams") - -ToolFunctionWithoutContext = Callable[ToolParams, Any] -ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any] - -ToolFunction = Union[ToolFunctionWithoutContext[ToolParams], ToolFunctionWithContext[ToolParams]] - - -@dataclass -class FunctionToolResult: - tool: FunctionTool - """The tool that was run.""" - - output: Any - """The output of the tool.""" - - run_item: RunItem - """The run item that was produced as a result of the tool call.""" - - -@dataclass -class FunctionTool: - """A tool that wraps a function. In most cases, you should use the `function_tool` helpers to - create a FunctionTool, as they let you easily wrap a Python function. - """ - - name: str - """The name of the tool, as shown to the LLM. Generally the name of the function.""" - - description: str - """A description of the tool, as shown to the LLM.""" - - params_json_schema: dict[str, Any] - """The JSON schema for the tool's parameters.""" - - on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[Any]] - """A function that invokes the tool with the given context and parameters. The params passed - are: - 1. The tool run context. - 2. The arguments from the LLM, as a JSON string. - - You must return a string representation of the tool output, or something we can call `str()` on. - In case of errors, you can either raise an Exception (which will cause the run to fail) or - return a string error message (which will be sent back to the LLM). - """ - - strict_json_schema: bool = True - """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True, - as it increases the likelihood of correct JSON input.""" - - -@dataclass -class FileSearchTool: - """A hosted tool that lets the LLM search through a vector store. Currently only supported with - OpenAI models, using the Responses API. - """ - - vector_store_ids: list[str] - """The IDs of the vector stores to search.""" - - max_num_results: int | None = None - """The maximum number of results to return.""" - - include_search_results: bool = False - """Whether to include the search results in the output produced by the LLM.""" - - ranking_options: RankingOptions | None = None - """Ranking options for search.""" - - filters: Filters | None = None - """A filter to apply based on file attributes.""" - - @property - def name(self): - return "file_search" - - -@dataclass -class WebSearchTool: - """A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models, - using the Responses API. - """ - - user_location: UserLocation | None = None - """Optional location for the search. Lets you customize results to be relevant to a location.""" - - search_context_size: Literal["low", "medium", "high"] = "medium" - """The amount of context to use for the search.""" - - @property - def name(self): - return "web_search_preview" - - -@dataclass -class ComputerTool: - """A hosted tool that lets the LLM control a computer.""" - - computer: Computer | AsyncComputer - """The computer implementation, which describes the environment and dimensions of the computer, - as well as implements the computer actions like click, screenshot, etc. - """ - - @property - def name(self): - return "computer_use_preview" - - -Tool = Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool] -"""A tool that can be used in an agent.""" - - -def default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str: - """The default tool error function, which just returns a generic error message.""" - return f"An error occurred while running the tool. Please try again. Error: {str(error)}" - - -ToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]] - - -@overload -def function_tool( - func: ToolFunction[...], - *, - name_override: str | None = None, - description_override: str | None = None, - docstring_style: DocstringStyle | None = None, - use_docstring_info: bool = True, - failure_error_function: ToolErrorFunction | None = None, - strict_mode: bool = True, -) -> FunctionTool: - """Overload for usage as @function_tool (no parentheses).""" - ... - - -@overload -def function_tool( - *, - name_override: str | None = None, - description_override: str | None = None, - docstring_style: DocstringStyle | None = None, - use_docstring_info: bool = True, - failure_error_function: ToolErrorFunction | None = None, - strict_mode: bool = True, -) -> Callable[[ToolFunction[...]], FunctionTool]: - """Overload for usage as @function_tool(...).""" - ... - - -def function_tool( - func: ToolFunction[...] | None = None, - *, - name_override: str | None = None, - description_override: str | None = None, - docstring_style: DocstringStyle | None = None, - use_docstring_info: bool = True, - failure_error_function: ToolErrorFunction | None = default_tool_error_function, - strict_mode: bool = True, -) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]: - """ - Decorator to create a FunctionTool from a function. By default, we will: - 1. Parse the function signature to create a JSON schema for the tool's parameters. - 2. Use the function's docstring to populate the tool's description. - 3. Use the function's docstring to populate argument descriptions. - The docstring style is detected automatically, but you can override it. - - If the function takes a `RunContextWrapper` as the first argument, it *must* match the - context type of the agent that uses the tool. - - Args: - func: The function to wrap. - name_override: If provided, use this name for the tool instead of the function's name. - description_override: If provided, use this description for the tool instead of the - function's docstring. - docstring_style: If provided, use this style for the tool's docstring. If not provided, - we will attempt to auto-detect the style. - use_docstring_info: If True, use the function's docstring to populate the tool's - description and argument descriptions. - failure_error_function: If provided, use this function to generate an error message when - the tool call fails. The error message is sent to the LLM. If you pass None, then no - error message will be sent and instead an Exception will be raised. - strict_mode: Whether to enable strict mode for the tool's JSON schema. We *strongly* - recommend setting this to True, as it increases the likelihood of correct JSON input. - If False, it allows non-strict JSON schemas. For example, if a parameter has a default - value, it will be optional, additional properties are allowed, etc. See here for more: - https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas - """ - - def _create_function_tool(the_func: ToolFunction[...]) -> FunctionTool: - schema = function_schema( - func=the_func, - name_override=name_override, - description_override=description_override, - docstring_style=docstring_style, - use_docstring_info=use_docstring_info, - strict_json_schema=strict_mode, - ) - - async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> Any: - try: - json_data: dict[str, Any] = json.loads(input) if input else {} - except Exception as e: - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invalid JSON input for tool {schema.name}") - else: - logger.debug(f"Invalid JSON input for tool {schema.name}: {input}") - raise ModelBehaviorError( - f"Invalid JSON input for tool {schema.name}: {input}" - ) from e - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invoking tool {schema.name}") - else: - logger.debug(f"Invoking tool {schema.name} with input {input}") - - try: - parsed = ( - schema.params_pydantic_model(**json_data) - if json_data - else schema.params_pydantic_model() - ) - except ValidationError as e: - raise ModelBehaviorError(f"Invalid JSON input for tool {schema.name}: {e}") from e - - args, kwargs_dict = schema.to_call_args(parsed) - - if not _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Tool call args: {args}, kwargs: {kwargs_dict}") - - if inspect.iscoroutinefunction(the_func): - if schema.takes_context: - result = await the_func(ctx, *args, **kwargs_dict) - else: - result = await the_func(*args, **kwargs_dict) - else: - if schema.takes_context: - result = the_func(ctx, *args, **kwargs_dict) - else: - result = the_func(*args, **kwargs_dict) - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Tool {schema.name} completed.") - else: - logger.debug(f"Tool {schema.name} returned {result}") - - return result - - async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> Any: - try: - return await _on_invoke_tool_impl(ctx, input) - except Exception as e: - if failure_error_function is None: - raise - - result = failure_error_function(ctx, e) - if inspect.isawaitable(result): - return await result - - _error_tracing.attach_error_to_current_span( - SpanError( - message="Error running tool (non-fatal)", - data={ - "tool_name": schema.name, - "error": str(e), - }, - ) - ) - return result - - return FunctionTool( - name=schema.name, - description=schema.description or "", - params_json_schema=schema.params_json_schema, - on_invoke_tool=_on_invoke_tool, - strict_json_schema=strict_mode, - ) - - # If func is actually a callable, we were used as @function_tool with no parentheses - if callable(func): - return _create_function_tool(func) - - # Otherwise, we were used as @function_tool(...), so return a decorator - def decorator(real_func: ToolFunction[...]) -> FunctionTool: - return _create_function_tool(real_func) - - return decorator diff --git a/src/agents/tracing/__init__.py b/src/agents/tracing/__init__.py deleted file mode 100644 index 9df94426..00000000 --- a/src/agents/tracing/__init__.py +++ /dev/null @@ -1,113 +0,0 @@ -import atexit - -from .create import ( - agent_span, - custom_span, - function_span, - generation_span, - get_current_span, - get_current_trace, - guardrail_span, - handoff_span, - mcp_tools_span, - response_span, - speech_group_span, - speech_span, - trace, - transcription_span, -) -from .processor_interface import TracingProcessor -from .processors import default_exporter, default_processor -from .setup import GLOBAL_TRACE_PROVIDER -from .span_data import ( - AgentSpanData, - CustomSpanData, - FunctionSpanData, - GenerationSpanData, - GuardrailSpanData, - HandoffSpanData, - MCPListToolsSpanData, - ResponseSpanData, - SpanData, - SpeechGroupSpanData, - SpeechSpanData, - TranscriptionSpanData, -) -from .spans import Span, SpanError -from .traces import Trace -from .util import gen_span_id, gen_trace_id - -__all__ = [ - "add_trace_processor", - "agent_span", - "custom_span", - "function_span", - "generation_span", - "get_current_span", - "get_current_trace", - "guardrail_span", - "handoff_span", - "response_span", - "set_trace_processors", - "set_tracing_disabled", - "trace", - "Trace", - "SpanError", - "Span", - "SpanData", - "AgentSpanData", - "CustomSpanData", - "FunctionSpanData", - "GenerationSpanData", - "GuardrailSpanData", - "HandoffSpanData", - "MCPListToolsSpanData", - "ResponseSpanData", - "SpeechGroupSpanData", - "SpeechSpanData", - "TranscriptionSpanData", - "TracingProcessor", - "gen_trace_id", - "gen_span_id", - "speech_group_span", - "speech_span", - "transcription_span", - "mcp_tools_span", -] - - -def add_trace_processor(span_processor: TracingProcessor) -> None: - """ - Adds a new trace processor. This processor will receive all traces/spans. - """ - GLOBAL_TRACE_PROVIDER.register_processor(span_processor) - - -def set_trace_processors(processors: list[TracingProcessor]) -> None: - """ - Set the list of trace processors. This will replace the current list of processors. - """ - GLOBAL_TRACE_PROVIDER.set_processors(processors) - - -def set_tracing_disabled(disabled: bool) -> None: - """ - Set whether tracing is globally disabled. - """ - GLOBAL_TRACE_PROVIDER.set_disabled(disabled) - - -def set_tracing_export_api_key(api_key: str) -> None: - """ - Set the OpenAI API key for the backend exporter. - """ - default_exporter().set_api_key(api_key) - - -# Add the default processor, which exports traces and spans to the backend in batches. You can -# change the default behavior by either: -# 1. calling add_trace_processor(), which adds additional processors, or -# 2. calling set_trace_processors(), which replaces the default processor. -add_trace_processor(default_processor()) - -atexit.register(GLOBAL_TRACE_PROVIDER.shutdown) diff --git a/src/agents/tracing/create.py b/src/agents/tracing/create.py deleted file mode 100644 index b6fe4610..00000000 --- a/src/agents/tracing/create.py +++ /dev/null @@ -1,455 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping, Sequence -from typing import TYPE_CHECKING, Any - -from ..logger import logger -from .setup import GLOBAL_TRACE_PROVIDER -from .span_data import ( - AgentSpanData, - CustomSpanData, - FunctionSpanData, - GenerationSpanData, - GuardrailSpanData, - HandoffSpanData, - MCPListToolsSpanData, - ResponseSpanData, - SpeechGroupSpanData, - SpeechSpanData, - TranscriptionSpanData, -) -from .spans import Span -from .traces import Trace - -if TYPE_CHECKING: - from openai.types.responses import Response - - -def trace( - workflow_name: str, - trace_id: str | None = None, - group_id: str | None = None, - metadata: dict[str, Any] | None = None, - disabled: bool = False, -) -> Trace: - """ - Create a new trace. The trace will not be started automatically; you should either use - it as a context manager (`with trace(...):`) or call `trace.start()` + `trace.finish()` - manually. - - In addition to the workflow name and optional grouping identifier, you can provide - an arbitrary metadata dictionary to attach additional user-defined information to - the trace. - - Args: - workflow_name: The name of the logical app or workflow. For example, you might provide - "code_bot" for a coding agent, or "customer_support_agent" for a customer support agent. - trace_id: The ID of the trace. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_trace_id()` to generate a trace ID, to guarantee that IDs are - correctly formatted. - group_id: Optional grouping identifier to link multiple traces from the same conversation - or process. For instance, you might use a chat thread ID. - metadata: Optional dictionary of additional metadata to attach to the trace. - disabled: If True, we will return a Trace but the Trace will not be recorded. This will - not be checked if there's an existing trace and `even_if_trace_running` is True. - - Returns: - The newly created trace object. - """ - current_trace = GLOBAL_TRACE_PROVIDER.get_current_trace() - if current_trace: - logger.warning( - "Trace already exists. Creating a new trace, but this is probably a mistake." - ) - - return GLOBAL_TRACE_PROVIDER.create_trace( - name=workflow_name, - trace_id=trace_id, - group_id=group_id, - metadata=metadata, - disabled=disabled, - ) - - -def get_current_trace() -> Trace | None: - """Returns the currently active trace, if present.""" - return GLOBAL_TRACE_PROVIDER.get_current_trace() - - -def get_current_span() -> Span[Any] | None: - """Returns the currently active span, if present.""" - return GLOBAL_TRACE_PROVIDER.get_current_span() - - -def agent_span( - name: str, - handoffs: list[str] | None = None, - tools: list[str] | None = None, - output_type: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[AgentSpanData]: - """Create a new agent span. The span will not be started automatically, you should either do - `with agent_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - name: The name of the agent. - handoffs: Optional list of agent names to which this agent could hand off control. - tools: Optional list of tool names available to this agent. - output_type: Optional name of the output type produced by the agent. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created agent span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=AgentSpanData(name=name, handoffs=handoffs, tools=tools, output_type=output_type), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def function_span( - name: str, - input: str | None = None, - output: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[FunctionSpanData]: - """Create a new function span. The span will not be started automatically, you should either do - `with function_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - name: The name of the function. - input: The input to the function. - output: The output of the function. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created function span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=FunctionSpanData(name=name, input=input, output=output), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def generation_span( - input: Sequence[Mapping[str, Any]] | None = None, - output: Sequence[Mapping[str, Any]] | None = None, - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - usage: dict[str, Any] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[GenerationSpanData]: - """Create a new generation span. The span will not be started automatically, you should either - do `with generation_span() ...` or call `span.start()` + `span.finish()` manually. - - This span captures the details of a model generation, including the - input message sequence, any generated outputs, the model name and - configuration, and usage data. If you only need to capture a model - response identifier, use `response_span()` instead. - - Args: - input: The sequence of input messages sent to the model. - output: The sequence of output messages received from the model. - model: The model identifier used for the generation. - model_config: The model configuration (hyperparameters) used. - usage: A dictionary of usage information (input tokens, output tokens, etc.). - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created generation span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=GenerationSpanData( - input=input, - output=output, - model=model, - model_config=model_config, - usage=usage, - ), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def response_span( - response: Response | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[ResponseSpanData]: - """Create a new response span. The span will not be started automatically, you should either do - `with response_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - response: The OpenAI Response object. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=ResponseSpanData(response=response), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def handoff_span( - from_agent: str | None = None, - to_agent: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[HandoffSpanData]: - """Create a new handoff span. The span will not be started automatically, you should either do - `with handoff_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - from_agent: The name of the agent that is handing off. - to_agent: The name of the agent that is receiving the handoff. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created handoff span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=HandoffSpanData(from_agent=from_agent, to_agent=to_agent), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def custom_span( - name: str, - data: dict[str, Any] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[CustomSpanData]: - """Create a new custom span, to which you can add your own metadata. The span will not be - started automatically, you should either do `with custom_span() ...` or call - `span.start()` + `span.finish()` manually. - - Args: - name: The name of the custom span. - data: Arbitrary structured data to associate with the span. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created custom span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=CustomSpanData(name=name, data=data or {}), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def guardrail_span( - name: str, - triggered: bool = False, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[GuardrailSpanData]: - """Create a new guardrail span. The span will not be started automatically, you should either - do `with guardrail_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - name: The name of the guardrail. - triggered: Whether the guardrail was triggered. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=GuardrailSpanData(name=name, triggered=triggered), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def transcription_span( - model: str | None = None, - input: str | None = None, - input_format: str | None = "pcm", - output: str | None = None, - model_config: Mapping[str, Any] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[TranscriptionSpanData]: - """Create a new transcription span. The span will not be started automatically, you should - either do `with transcription_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - model: The name of the model used for the speech-to-text. - input: The audio input of the speech-to-text transcription, as a base64 encoded string of - audio bytes. - input_format: The format of the audio input (defaults to "pcm"). - output: The output of the speech-to-text transcription. - model_config: The model configuration (hyperparameters) used. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created speech-to-text span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=TranscriptionSpanData( - input=input, - input_format=input_format, - output=output, - model=model, - model_config=model_config, - ), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def speech_span( - model: str | None = None, - input: str | None = None, - output: str | None = None, - output_format: str | None = "pcm", - model_config: Mapping[str, Any] | None = None, - first_content_at: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[SpeechSpanData]: - """Create a new speech span. The span will not be started automatically, you should either do - `with speech_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - model: The name of the model used for the text-to-speech. - input: The text input of the text-to-speech. - output: The audio output of the text-to-speech as base64 encoded string of PCM audio bytes. - output_format: The format of the audio output (defaults to "pcm"). - model_config: The model configuration (hyperparameters) used. - first_content_at: The time of the first byte of the audio output. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=SpeechSpanData( - model=model, - input=input, - output=output, - output_format=output_format, - model_config=model_config, - first_content_at=first_content_at, - ), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def speech_group_span( - input: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[SpeechGroupSpanData]: - """Create a new speech group span. The span will not be started automatically, you should - either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - input: The input text used for the speech request. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=SpeechGroupSpanData(input=input), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def mcp_tools_span( - server: str | None = None, - result: list[str] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[MCPListToolsSpanData]: - """Create a new MCP list tools span. The span will not be started automatically, you should - either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - server: The name of the MCP server. - result: The result of the MCP list tools call. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=MCPListToolsSpanData(server=server, result=result), - span_id=span_id, - parent=parent, - disabled=disabled, - ) diff --git a/src/agents/tracing/logger.py b/src/agents/tracing/logger.py deleted file mode 100644 index 661d09b5..00000000 --- a/src/agents/tracing/logger.py +++ /dev/null @@ -1,3 +0,0 @@ -import logging - -logger = logging.getLogger("openai.agents.tracing") diff --git a/src/agents/tracing/processor_interface.py b/src/agents/tracing/processor_interface.py deleted file mode 100644 index 4dcd897c..00000000 --- a/src/agents/tracing/processor_interface.py +++ /dev/null @@ -1,69 +0,0 @@ -import abc -from typing import TYPE_CHECKING, Any - -if TYPE_CHECKING: - from .spans import Span - from .traces import Trace - - -class TracingProcessor(abc.ABC): - """Interface for processing spans.""" - - @abc.abstractmethod - def on_trace_start(self, trace: "Trace") -> None: - """Called when a trace is started. - - Args: - trace: The trace that started. - """ - pass - - @abc.abstractmethod - def on_trace_end(self, trace: "Trace") -> None: - """Called when a trace is finished. - - Args: - trace: The trace that started. - """ - pass - - @abc.abstractmethod - def on_span_start(self, span: "Span[Any]") -> None: - """Called when a span is started. - - Args: - span: The span that started. - """ - pass - - @abc.abstractmethod - def on_span_end(self, span: "Span[Any]") -> None: - """Called when a span is finished. Should not block or raise exceptions. - - Args: - span: The span that finished. - """ - pass - - @abc.abstractmethod - def shutdown(self) -> None: - """Called when the application stops.""" - pass - - @abc.abstractmethod - def force_flush(self) -> None: - """Forces an immediate flush of all queued spans/traces.""" - pass - - -class TracingExporter(abc.ABC): - """Exports traces and spans. For example, could log them or send them to a backend.""" - - @abc.abstractmethod - def export(self, items: list["Trace | Span[Any]"]) -> None: - """Exports a list of traces and spans. - - Args: - items: The items to export. - """ - pass diff --git a/src/agents/tracing/processors.py b/src/agents/tracing/processors.py deleted file mode 100644 index f929d05d..00000000 --- a/src/agents/tracing/processors.py +++ /dev/null @@ -1,276 +0,0 @@ -from __future__ import annotations - -import os -import queue -import random -import threading -import time -from functools import cached_property -from typing import Any - -import httpx - -from ..logger import logger -from .processor_interface import TracingExporter, TracingProcessor -from .spans import Span -from .traces import Trace - - -class ConsoleSpanExporter(TracingExporter): - """Prints the traces and spans to the console.""" - - def export(self, items: list[Trace | Span[Any]]) -> None: - for item in items: - if isinstance(item, Trace): - print(f"[Exporter] Export trace_id={item.trace_id}, name={item.name}, ") - else: - print(f"[Exporter] Export span: {item.export()}") - - -class BackendSpanExporter(TracingExporter): - def __init__( - self, - api_key: str | None = None, - organization: str | None = None, - project: str | None = None, - endpoint: str = "https://api.openai.com/v1/traces/ingest", - max_retries: int = 3, - base_delay: float = 1.0, - max_delay: float = 30.0, - ): - """ - Args: - api_key: The API key for the "Authorization" header. Defaults to - `os.environ["OPENAI_API_KEY"]` if not provided. - organization: The OpenAI organization to use. Defaults to - `os.environ["OPENAI_ORG_ID"]` if not provided. - project: The OpenAI project to use. Defaults to - `os.environ["OPENAI_PROJECT_ID"]` if not provided. - endpoint: The HTTP endpoint to which traces/spans are posted. - max_retries: Maximum number of retries upon failures. - base_delay: Base delay (in seconds) for the first backoff. - max_delay: Maximum delay (in seconds) for backoff growth. - """ - self._api_key = api_key - self._organization = organization - self._project = project - self.endpoint = endpoint - self.max_retries = max_retries - self.base_delay = base_delay - self.max_delay = max_delay - - # Keep a client open for connection pooling across multiple export calls - self._client = httpx.Client(timeout=httpx.Timeout(timeout=60, connect=5.0)) - - def set_api_key(self, api_key: str): - """Set the OpenAI API key for the exporter. - - Args: - api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python - client. - """ - # We're specifically setting the underlying cached property as well - self._api_key = api_key - self.api_key = api_key - - @cached_property - def api_key(self): - return self._api_key or os.environ.get("OPENAI_API_KEY") - - @cached_property - def organization(self): - return self._organization or os.environ.get("OPENAI_ORG_ID") - - @cached_property - def project(self): - return self._project or os.environ.get("OPENAI_PROJECT_ID") - - def export(self, items: list[Trace | Span[Any]]) -> None: - if not items: - return - - if not self.api_key: - logger.warning("OPENAI_API_KEY is not set, skipping trace export") - return - - data = [item.export() for item in items if item.export()] - payload = {"data": data} - - headers = { - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - "OpenAI-Beta": "traces=v1", - } - - # Exponential backoff loop - attempt = 0 - delay = self.base_delay - while True: - attempt += 1 - try: - response = self._client.post(url=self.endpoint, headers=headers, json=payload) - - # If the response is successful, break out of the loop - if response.status_code < 300: - logger.debug(f"Exported {len(items)} items") - return - - # If the response is a client error (4xx), we wont retry - if 400 <= response.status_code < 500: - logger.error( - f"[non-fatal] Tracing client error {response.status_code}: {response.text}" - ) - return - - # For 5xx or other unexpected codes, treat it as transient and retry - logger.warning( - f"[non-fatal] Tracing: server error {response.status_code}, retrying." - ) - except httpx.RequestError as exc: - # Network or other I/O error, we'll retry - logger.warning(f"[non-fatal] Tracing: request failed: {exc}") - - # If we reach here, we need to retry or give up - if attempt >= self.max_retries: - logger.error("[non-fatal] Tracing: max retries reached, giving up on this batch.") - return - - # Exponential backoff + jitter - sleep_time = delay + random.uniform(0, 0.1 * delay) # 10% jitter - time.sleep(sleep_time) - delay = min(delay * 2, self.max_delay) - - def close(self): - """Close the underlying HTTP client.""" - self._client.close() - - -class BatchTraceProcessor(TracingProcessor): - """Some implementation notes: - 1. Using Queue, which is thread-safe. - 2. Using a background thread to export spans, to minimize any performance issues. - 3. Spans are stored in memory until they are exported. - """ - - def __init__( - self, - exporter: TracingExporter, - max_queue_size: int = 8192, - max_batch_size: int = 128, - schedule_delay: float = 5.0, - export_trigger_ratio: float = 0.7, - ): - """ - Args: - exporter: The exporter to use. - max_queue_size: The maximum number of spans to store in the queue. After this, we will - start dropping spans. - max_batch_size: The maximum number of spans to export in a single batch. - schedule_delay: The delay between checks for new spans to export. - export_trigger_ratio: The ratio of the queue size at which we will trigger an export. - """ - self._exporter = exporter - self._queue: queue.Queue[Trace | Span[Any]] = queue.Queue(maxsize=max_queue_size) - self._max_queue_size = max_queue_size - self._max_batch_size = max_batch_size - self._schedule_delay = schedule_delay - self._shutdown_event = threading.Event() - - # The queue size threshold at which we export immediately. - self._export_trigger_size = int(max_queue_size * export_trigger_ratio) - - # Track when we next *must* perform a scheduled export - self._next_export_time = time.time() + self._schedule_delay - - self._worker_thread = threading.Thread(target=self._run, daemon=True) - self._worker_thread.start() - - def on_trace_start(self, trace: Trace) -> None: - try: - self._queue.put_nowait(trace) - except queue.Full: - logger.warning("Queue is full, dropping trace.") - - def on_trace_end(self, trace: Trace) -> None: - # We send traces via on_trace_start, so we don't need to do anything here. - pass - - def on_span_start(self, span: Span[Any]) -> None: - # We send spans via on_span_end, so we don't need to do anything here. - pass - - def on_span_end(self, span: Span[Any]) -> None: - try: - self._queue.put_nowait(span) - except queue.Full: - logger.warning("Queue is full, dropping span.") - - def shutdown(self, timeout: float | None = None): - """ - Called when the application stops. We signal our thread to stop, then join it. - """ - self._shutdown_event.set() - self._worker_thread.join(timeout=timeout) - - def force_flush(self): - """ - Forces an immediate flush of all queued spans. - """ - self._export_batches(force=True) - - def _run(self): - while not self._shutdown_event.is_set(): - current_time = time.time() - queue_size = self._queue.qsize() - - # If it's time for a scheduled flush or queue is above the trigger threshold - if current_time >= self._next_export_time or queue_size >= self._export_trigger_size: - self._export_batches(force=False) - # Reset the next scheduled flush time - self._next_export_time = time.time() + self._schedule_delay - else: - # Sleep a short interval so we don't busy-wait. - time.sleep(0.2) - - # Final drain after shutdown - self._export_batches(force=True) - - def _export_batches(self, force: bool = False): - """Drains the queue and exports in batches. If force=True, export everything. - Otherwise, export up to `max_batch_size` repeatedly until the queue is empty or below a - certain threshold. - """ - while True: - items_to_export: list[Span[Any] | Trace] = [] - - # Gather a batch of spans up to max_batch_size - while not self._queue.empty() and ( - force or len(items_to_export) < self._max_batch_size - ): - try: - items_to_export.append(self._queue.get_nowait()) - except queue.Empty: - # Another thread might have emptied the queue between checks - break - - # If we collected nothing, we're done - if not items_to_export: - break - - # Export the batch - self._exporter.export(items_to_export) - - -# Create a shared global instance: -_global_exporter = BackendSpanExporter() -_global_processor = BatchTraceProcessor(_global_exporter) - - -def default_exporter() -> BackendSpanExporter: - """The default exporter, which exports traces and spans to the backend in batches.""" - return _global_exporter - - -def default_processor() -> BatchTraceProcessor: - """The default processor, which exports traces and spans to the backend in batches.""" - return _global_processor diff --git a/src/agents/tracing/scope.py b/src/agents/tracing/scope.py deleted file mode 100644 index 1d31c1bd..00000000 --- a/src/agents/tracing/scope.py +++ /dev/null @@ -1,49 +0,0 @@ -# Holds the current active span -import contextvars -from typing import TYPE_CHECKING, Any - -from ..logger import logger - -if TYPE_CHECKING: - from .spans import Span - from .traces import Trace - -_current_span: contextvars.ContextVar["Span[Any] | None"] = contextvars.ContextVar( - "current_span", default=None -) - -_current_trace: contextvars.ContextVar["Trace | None"] = contextvars.ContextVar( - "current_trace", default=None -) - - -class Scope: - """ - Manages the current span and trace in the context. - """ - - @classmethod - def get_current_span(cls) -> "Span[Any] | None": - return _current_span.get() - - @classmethod - def set_current_span(cls, span: "Span[Any] | None") -> "contextvars.Token[Span[Any] | None]": - return _current_span.set(span) - - @classmethod - def reset_current_span(cls, token: "contextvars.Token[Span[Any] | None]") -> None: - _current_span.reset(token) - - @classmethod - def get_current_trace(cls) -> "Trace | None": - return _current_trace.get() - - @classmethod - def set_current_trace(cls, trace: "Trace | None") -> "contextvars.Token[Trace | None]": - logger.debug(f"Setting current trace: {trace.trace_id if trace else None}") - return _current_trace.set(trace) - - @classmethod - def reset_current_trace(cls, token: "contextvars.Token[Trace | None]") -> None: - logger.debug("Resetting current trace") - _current_trace.reset(token) diff --git a/src/agents/tracing/setup.py b/src/agents/tracing/setup.py deleted file mode 100644 index 3a7c6ade..00000000 --- a/src/agents/tracing/setup.py +++ /dev/null @@ -1,211 +0,0 @@ -from __future__ import annotations - -import os -import threading -from typing import Any - -from ..logger import logger -from . import util -from .processor_interface import TracingProcessor -from .scope import Scope -from .spans import NoOpSpan, Span, SpanImpl, TSpanData -from .traces import NoOpTrace, Trace, TraceImpl - - -class SynchronousMultiTracingProcessor(TracingProcessor): - """ - Forwards all calls to a list of TracingProcessors, in order of registration. - """ - - def __init__(self): - # Using a tuple to avoid race conditions when iterating over processors - self._processors: tuple[TracingProcessor, ...] = () - self._lock = threading.Lock() - - def add_tracing_processor(self, tracing_processor: TracingProcessor): - """ - Add a processor to the list of processors. Each processor will receive all traces/spans. - """ - with self._lock: - self._processors += (tracing_processor,) - - def set_processors(self, processors: list[TracingProcessor]): - """ - Set the list of processors. This will replace the current list of processors. - """ - with self._lock: - self._processors = tuple(processors) - - def on_trace_start(self, trace: Trace) -> None: - """ - Called when a trace is started. - """ - for processor in self._processors: - processor.on_trace_start(trace) - - def on_trace_end(self, trace: Trace) -> None: - """ - Called when a trace is finished. - """ - for processor in self._processors: - processor.on_trace_end(trace) - - def on_span_start(self, span: Span[Any]) -> None: - """ - Called when a span is started. - """ - for processor in self._processors: - processor.on_span_start(span) - - def on_span_end(self, span: Span[Any]) -> None: - """ - Called when a span is finished. - """ - for processor in self._processors: - processor.on_span_end(span) - - def shutdown(self) -> None: - """ - Called when the application stops. - """ - for processor in self._processors: - logger.debug(f"Shutting down trace processor {processor}") - processor.shutdown() - - def force_flush(self): - """ - Force the processors to flush their buffers. - """ - for processor in self._processors: - processor.force_flush() - - -class TraceProvider: - def __init__(self): - self._multi_processor = SynchronousMultiTracingProcessor() - self._disabled = os.environ.get("OPENAI_AGENTS_DISABLE_TRACING", "false").lower() in ( - "true", - "1", - ) - - def register_processor(self, processor: TracingProcessor): - """ - Add a processor to the list of processors. Each processor will receive all traces/spans. - """ - self._multi_processor.add_tracing_processor(processor) - - def set_processors(self, processors: list[TracingProcessor]): - """ - Set the list of processors. This will replace the current list of processors. - """ - self._multi_processor.set_processors(processors) - - def get_current_trace(self) -> Trace | None: - """ - Returns the currently active trace, if any. - """ - return Scope.get_current_trace() - - def get_current_span(self) -> Span[Any] | None: - """ - Returns the currently active span, if any. - """ - return Scope.get_current_span() - - def set_disabled(self, disabled: bool) -> None: - """ - Set whether tracing is disabled. - """ - self._disabled = disabled - - def create_trace( - self, - name: str, - trace_id: str | None = None, - group_id: str | None = None, - metadata: dict[str, Any] | None = None, - disabled: bool = False, - ) -> Trace: - """ - Create a new trace. - """ - if self._disabled or disabled: - logger.debug(f"Tracing is disabled. Not creating trace {name}") - return NoOpTrace() - - trace_id = trace_id or util.gen_trace_id() - - logger.debug(f"Creating trace {name} with id {trace_id}") - - return TraceImpl( - name=name, - trace_id=trace_id, - group_id=group_id, - metadata=metadata, - processor=self._multi_processor, - ) - - def create_span( - self, - span_data: TSpanData, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, - ) -> Span[TSpanData]: - """ - Create a new span. - """ - if self._disabled or disabled: - logger.debug(f"Tracing is disabled. Not creating span {span_data}") - return NoOpSpan(span_data) - - if not parent: - current_span = Scope.get_current_span() - current_trace = Scope.get_current_trace() - if current_trace is None: - logger.error( - "No active trace. Make sure to start a trace with `trace()` first" - "Returning NoOpSpan." - ) - return NoOpSpan(span_data) - elif isinstance(current_trace, NoOpTrace) or isinstance(current_span, NoOpSpan): - logger.debug( - f"Parent {current_span} or {current_trace} is no-op, returning NoOpSpan" - ) - return NoOpSpan(span_data) - - parent_id = current_span.span_id if current_span else None - trace_id = current_trace.trace_id - - elif isinstance(parent, Trace): - if isinstance(parent, NoOpTrace): - logger.debug(f"Parent {parent} is no-op, returning NoOpSpan") - return NoOpSpan(span_data) - trace_id = parent.trace_id - parent_id = None - elif isinstance(parent, Span): - if isinstance(parent, NoOpSpan): - logger.debug(f"Parent {parent} is no-op, returning NoOpSpan") - return NoOpSpan(span_data) - parent_id = parent.span_id - trace_id = parent.trace_id - - logger.debug(f"Creating span {span_data} with id {span_id}") - - return SpanImpl( - trace_id=trace_id, - span_id=span_id, - parent_id=parent_id, - processor=self._multi_processor, - span_data=span_data, - ) - - def shutdown(self) -> None: - try: - logger.debug("Shutting down trace provider") - self._multi_processor.shutdown() - except Exception as e: - logger.error(f"Error shutting down trace provider: {e}") - - -GLOBAL_TRACE_PROVIDER = TraceProvider() diff --git a/src/agents/tracing/span_data.py b/src/agents/tracing/span_data.py deleted file mode 100644 index 260e4c45..00000000 --- a/src/agents/tracing/span_data.py +++ /dev/null @@ -1,374 +0,0 @@ -from __future__ import annotations - -import abc -from collections.abc import Mapping, Sequence -from typing import TYPE_CHECKING, Any - -if TYPE_CHECKING: - from openai.types.responses import Response, ResponseInputItemParam - - -class SpanData(abc.ABC): - """ - Represents span data in the trace. - """ - - @abc.abstractmethod - def export(self) -> dict[str, Any]: - """Export the span data as a dictionary.""" - pass - - @property - @abc.abstractmethod - def type(self) -> str: - """Return the type of the span.""" - pass - - -class AgentSpanData(SpanData): - """ - Represents an Agent Span in the trace. - Includes name, handoffs, tools, and output type. - """ - - __slots__ = ("name", "handoffs", "tools", "output_type") - - def __init__( - self, - name: str, - handoffs: list[str] | None = None, - tools: list[str] | None = None, - output_type: str | None = None, - ): - self.name = name - self.handoffs: list[str] | None = handoffs - self.tools: list[str] | None = tools - self.output_type: str | None = output_type - - @property - def type(self) -> str: - return "agent" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "handoffs": self.handoffs, - "tools": self.tools, - "output_type": self.output_type, - } - - -class FunctionSpanData(SpanData): - """ - Represents a Function Span in the trace. - Includes input, output and MCP data (if applicable). - """ - - __slots__ = ("name", "input", "output", "mcp_data") - - def __init__( - self, - name: str, - input: str | None, - output: Any | None, - mcp_data: dict[str, Any] | None = None, - ): - self.name = name - self.input = input - self.output = output - self.mcp_data = mcp_data - - @property - def type(self) -> str: - return "function" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "input": self.input, - "output": str(self.output) if self.output else None, - "mcp_data": self.mcp_data, - } - - -class GenerationSpanData(SpanData): - """ - Represents a Generation Span in the trace. - Includes input, output, model, model configuration, and usage. - """ - - __slots__ = ( - "input", - "output", - "model", - "model_config", - "usage", - ) - - def __init__( - self, - input: Sequence[Mapping[str, Any]] | None = None, - output: Sequence[Mapping[str, Any]] | None = None, - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - usage: dict[str, Any] | None = None, - ): - self.input = input - self.output = output - self.model = model - self.model_config = model_config - self.usage = usage - - @property - def type(self) -> str: - return "generation" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": self.input, - "output": self.output, - "model": self.model, - "model_config": self.model_config, - "usage": self.usage, - } - - -class ResponseSpanData(SpanData): - """ - Represents a Response Span in the trace. - Includes response and input. - """ - - __slots__ = ("response", "input") - - def __init__( - self, - response: Response | None = None, - input: str | list[ResponseInputItemParam] | None = None, - ) -> None: - self.response = response - # This is not used by the OpenAI trace processors, but is useful for other tracing - # processor implementations - self.input = input - - @property - def type(self) -> str: - return "response" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "response_id": self.response.id if self.response else None, - } - - -class HandoffSpanData(SpanData): - """ - Represents a Handoff Span in the trace. - Includes source and desitnation agents. - """ - - __slots__ = ("from_agent", "to_agent") - - def __init__(self, from_agent: str | None, to_agent: str | None): - self.from_agent = from_agent - self.to_agent = to_agent - - @property - def type(self) -> str: - return "handoff" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "from_agent": self.from_agent, - "to_agent": self.to_agent, - } - - -class CustomSpanData(SpanData): - """ - Represents a Custom Span in the trace. - Includes name and data property bag. - """ - - __slots__ = ("name", "data") - - def __init__(self, name: str, data: dict[str, Any]): - self.name = name - self.data = data - - @property - def type(self) -> str: - return "custom" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "data": self.data, - } - - -class GuardrailSpanData(SpanData): - """ - Represents a Guardrail Span in the trace. - Includes name and triggered status. - """ - - __slots__ = ("name", "triggered") - - def __init__(self, name: str, triggered: bool = False): - self.name = name - self.triggered = triggered - - @property - def type(self) -> str: - return "guardrail" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "triggered": self.triggered, - } - - -class TranscriptionSpanData(SpanData): - """ - Represents a Transcription Span in the trace. - Includes input, output, model, and model configuration. - """ - - __slots__ = ( - "input", - "output", - "model", - "model_config", - ) - - def __init__( - self, - input: str | None = None, - input_format: str | None = "pcm", - output: str | None = None, - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - ): - self.input = input - self.input_format = input_format - self.output = output - self.model = model - self.model_config = model_config - - @property - def type(self) -> str: - return "transcription" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": { - "data": self.input or "", - "format": self.input_format, - }, - "output": self.output, - "model": self.model, - "model_config": self.model_config, - } - - -class SpeechSpanData(SpanData): - """ - Represents a Speech Span in the trace. - Includes input, output, model, model configuration, and first content timestamp. - """ - - __slots__ = ("input", "output", "model", "model_config", "first_content_at") - - def __init__( - self, - input: str | None = None, - output: str | None = None, - output_format: str | None = "pcm", - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - first_content_at: str | None = None, - ): - self.input = input - self.output = output - self.output_format = output_format - self.model = model - self.model_config = model_config - self.first_content_at = first_content_at - - @property - def type(self) -> str: - return "speech" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": self.input, - "output": { - "data": self.output or "", - "format": self.output_format, - }, - "model": self.model, - "model_config": self.model_config, - "first_content_at": self.first_content_at, - } - - -class SpeechGroupSpanData(SpanData): - """ - Represents a Speech Group Span in the trace. - """ - - __slots__ = "input" - - def __init__( - self, - input: str | None = None, - ): - self.input = input - - @property - def type(self) -> str: - return "speech-group" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": self.input, - } - - -class MCPListToolsSpanData(SpanData): - """ - Represents an MCP List Tools Span in the trace. - Includes server and result. - """ - - __slots__ = ( - "server", - "result", - ) - - def __init__(self, server: str | None = None, result: list[str] | None = None): - self.server = server - self.result = result - - @property - def type(self) -> str: - return "mcp_tools" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "server": self.server, - "result": self.result, - } diff --git a/src/agents/tracing/spans.py b/src/agents/tracing/spans.py deleted file mode 100644 index ee933e73..00000000 --- a/src/agents/tracing/spans.py +++ /dev/null @@ -1,264 +0,0 @@ -from __future__ import annotations - -import abc -import contextvars -from typing import Any, Generic, TypeVar - -from typing_extensions import TypedDict - -from ..logger import logger -from . import util -from .processor_interface import TracingProcessor -from .scope import Scope -from .span_data import SpanData - -TSpanData = TypeVar("TSpanData", bound=SpanData) - - -class SpanError(TypedDict): - message: str - data: dict[str, Any] | None - - -class Span(abc.ABC, Generic[TSpanData]): - @property - @abc.abstractmethod - def trace_id(self) -> str: - pass - - @property - @abc.abstractmethod - def span_id(self) -> str: - pass - - @property - @abc.abstractmethod - def span_data(self) -> TSpanData: - pass - - @abc.abstractmethod - def start(self, mark_as_current: bool = False): - """ - Start the span. - - Args: - mark_as_current: If true, the span will be marked as the current span. - """ - pass - - @abc.abstractmethod - def finish(self, reset_current: bool = False) -> None: - """ - Finish the span. - - Args: - reset_current: If true, the span will be reset as the current span. - """ - pass - - @abc.abstractmethod - def __enter__(self) -> Span[TSpanData]: - pass - - @abc.abstractmethod - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - @property - @abc.abstractmethod - def parent_id(self) -> str | None: - pass - - @abc.abstractmethod - def set_error(self, error: SpanError) -> None: - pass - - @property - @abc.abstractmethod - def error(self) -> SpanError | None: - pass - - @abc.abstractmethod - def export(self) -> dict[str, Any] | None: - pass - - @property - @abc.abstractmethod - def started_at(self) -> str | None: - pass - - @property - @abc.abstractmethod - def ended_at(self) -> str | None: - pass - - -class NoOpSpan(Span[TSpanData]): - __slots__ = ("_span_data", "_prev_span_token") - - def __init__(self, span_data: TSpanData): - self._span_data = span_data - self._prev_span_token: contextvars.Token[Span[TSpanData] | None] | None = None - - @property - def trace_id(self) -> str: - return "no-op" - - @property - def span_id(self) -> str: - return "no-op" - - @property - def span_data(self) -> TSpanData: - return self._span_data - - @property - def parent_id(self) -> str | None: - return None - - def start(self, mark_as_current: bool = False): - if mark_as_current: - self._prev_span_token = Scope.set_current_span(self) - - def finish(self, reset_current: bool = False) -> None: - if reset_current and self._prev_span_token is not None: - Scope.reset_current_span(self._prev_span_token) - self._prev_span_token = None - - def __enter__(self) -> Span[TSpanData]: - self.start(mark_as_current=True) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - reset_current = True - if exc_type is GeneratorExit: - logger.debug("GeneratorExit, skipping span reset") - reset_current = False - - self.finish(reset_current=reset_current) - - def set_error(self, error: SpanError) -> None: - pass - - @property - def error(self) -> SpanError | None: - return None - - def export(self) -> dict[str, Any] | None: - return None - - @property - def started_at(self) -> str | None: - return None - - @property - def ended_at(self) -> str | None: - return None - - -class SpanImpl(Span[TSpanData]): - __slots__ = ( - "_trace_id", - "_span_id", - "_parent_id", - "_started_at", - "_ended_at", - "_error", - "_prev_span_token", - "_processor", - "_span_data", - ) - - def __init__( - self, - trace_id: str, - span_id: str | None, - parent_id: str | None, - processor: TracingProcessor, - span_data: TSpanData, - ): - self._trace_id = trace_id - self._span_id = span_id or util.gen_span_id() - self._parent_id = parent_id - self._started_at: str | None = None - self._ended_at: str | None = None - self._processor = processor - self._error: SpanError | None = None - self._prev_span_token: contextvars.Token[Span[TSpanData] | None] | None = None - self._span_data = span_data - - @property - def trace_id(self) -> str: - return self._trace_id - - @property - def span_id(self) -> str: - return self._span_id - - @property - def span_data(self) -> TSpanData: - return self._span_data - - @property - def parent_id(self) -> str | None: - return self._parent_id - - def start(self, mark_as_current: bool = False): - if self.started_at is not None: - logger.warning("Span already started") - return - - self._started_at = util.time_iso() - self._processor.on_span_start(self) - if mark_as_current: - self._prev_span_token = Scope.set_current_span(self) - - def finish(self, reset_current: bool = False) -> None: - if self.ended_at is not None: - logger.warning("Span already finished") - return - - self._ended_at = util.time_iso() - self._processor.on_span_end(self) - if reset_current and self._prev_span_token is not None: - Scope.reset_current_span(self._prev_span_token) - self._prev_span_token = None - - def __enter__(self) -> Span[TSpanData]: - self.start(mark_as_current=True) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - reset_current = True - if exc_type is GeneratorExit: - logger.debug("GeneratorExit, skipping span reset") - reset_current = False - - self.finish(reset_current=reset_current) - - def set_error(self, error: SpanError) -> None: - self._error = error - - @property - def error(self) -> SpanError | None: - return self._error - - @property - def started_at(self) -> str | None: - return self._started_at - - @property - def ended_at(self) -> str | None: - return self._ended_at - - def export(self) -> dict[str, Any] | None: - return { - "object": "trace.span", - "id": self.span_id, - "trace_id": self.trace_id, - "parent_id": self._parent_id, - "started_at": self._started_at, - "ended_at": self._ended_at, - "span_data": self.span_data.export(), - "error": self._error, - } diff --git a/src/agents/tracing/traces.py b/src/agents/tracing/traces.py deleted file mode 100644 index 53d06284..00000000 --- a/src/agents/tracing/traces.py +++ /dev/null @@ -1,195 +0,0 @@ -from __future__ import annotations - -import abc -import contextvars -from typing import Any - -from ..logger import logger -from . import util -from .processor_interface import TracingProcessor -from .scope import Scope - - -class Trace: - """ - A trace is the root level object that tracing creates. It represents a logical "workflow". - """ - - @abc.abstractmethod - def __enter__(self) -> Trace: - pass - - @abc.abstractmethod - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - @abc.abstractmethod - def start(self, mark_as_current: bool = False): - """ - Start the trace. - - Args: - mark_as_current: If true, the trace will be marked as the current trace. - """ - pass - - @abc.abstractmethod - def finish(self, reset_current: bool = False): - """ - Finish the trace. - - Args: - reset_current: If true, the trace will be reset as the current trace. - """ - pass - - @property - @abc.abstractmethod - def trace_id(self) -> str: - """ - The trace ID. - """ - pass - - @property - @abc.abstractmethod - def name(self) -> str: - """ - The name of the workflow being traced. - """ - pass - - @abc.abstractmethod - def export(self) -> dict[str, Any] | None: - """ - Export the trace as a dictionary. - """ - pass - - -class NoOpTrace(Trace): - """ - A no-op trace that will not be recorded. - """ - - def __init__(self): - self._started = False - self._prev_context_token: contextvars.Token[Trace | None] | None = None - - def __enter__(self) -> Trace: - if self._started: - if not self._prev_context_token: - logger.error("Trace already started but no context token set") - return self - - self._started = True - self.start(mark_as_current=True) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.finish(reset_current=True) - - def start(self, mark_as_current: bool = False): - if mark_as_current: - self._prev_context_token = Scope.set_current_trace(self) - - def finish(self, reset_current: bool = False): - if reset_current and self._prev_context_token is not None: - Scope.reset_current_trace(self._prev_context_token) - self._prev_context_token = None - - @property - def trace_id(self) -> str: - return "no-op" - - @property - def name(self) -> str: - return "no-op" - - def export(self) -> dict[str, Any] | None: - return None - - -NO_OP_TRACE = NoOpTrace() - - -class TraceImpl(Trace): - """ - A trace that will be recorded by the tracing library. - """ - - __slots__ = ( - "_name", - "_trace_id", - "group_id", - "metadata", - "_prev_context_token", - "_processor", - "_started", - ) - - def __init__( - self, - name: str, - trace_id: str | None, - group_id: str | None, - metadata: dict[str, Any] | None, - processor: TracingProcessor, - ): - self._name = name - self._trace_id = trace_id or util.gen_trace_id() - self.group_id = group_id - self.metadata = metadata - self._prev_context_token: contextvars.Token[Trace | None] | None = None - self._processor = processor - self._started = False - - @property - def trace_id(self) -> str: - return self._trace_id - - @property - def name(self) -> str: - return self._name - - def start(self, mark_as_current: bool = False): - if self._started: - return - - self._started = True - self._processor.on_trace_start(self) - - if mark_as_current: - self._prev_context_token = Scope.set_current_trace(self) - - def finish(self, reset_current: bool = False): - if not self._started: - return - - self._processor.on_trace_end(self) - - if reset_current and self._prev_context_token is not None: - Scope.reset_current_trace(self._prev_context_token) - self._prev_context_token = None - - def __enter__(self) -> Trace: - if self._started: - if not self._prev_context_token: - logger.error("Trace already started but no context token set") - return self - - self.start(mark_as_current=True) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.finish(reset_current=exc_type is not GeneratorExit) - - def export(self) -> dict[str, Any] | None: - return { - "object": "trace", - "id": self.trace_id, - "workflow_name": self.name, - "group_id": self.group_id, - "metadata": self.metadata, - } diff --git a/src/agents/tracing/util.py b/src/agents/tracing/util.py deleted file mode 100644 index f546b4e5..00000000 --- a/src/agents/tracing/util.py +++ /dev/null @@ -1,22 +0,0 @@ -import uuid -from datetime import datetime, timezone - - -def time_iso() -> str: - """Returns the current time in ISO 8601 format.""" - return datetime.now(timezone.utc).isoformat() - - -def gen_trace_id() -> str: - """Generates a new trace ID.""" - return f"trace_{uuid.uuid4().hex}" - - -def gen_span_id() -> str: - """Generates a new span ID.""" - return f"span_{uuid.uuid4().hex[:24]}" - - -def gen_group_id() -> str: - """Generates a new group ID.""" - return f"group_{uuid.uuid4().hex[:24]}" diff --git a/src/agents/usage.py b/src/agents/usage.py deleted file mode 100644 index 23d989b4..00000000 --- a/src/agents/usage.py +++ /dev/null @@ -1,22 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class Usage: - requests: int = 0 - """Total requests made to the LLM API.""" - - input_tokens: int = 0 - """Total input tokens sent, across all requests.""" - - output_tokens: int = 0 - """Total output tokens received, across all requests.""" - - total_tokens: int = 0 - """Total tokens sent and received, across all requests.""" - - def add(self, other: "Usage") -> None: - self.requests += other.requests if other.requests else 0 - self.input_tokens += other.input_tokens if other.input_tokens else 0 - self.output_tokens += other.output_tokens if other.output_tokens else 0 - self.total_tokens += other.total_tokens if other.total_tokens else 0 diff --git a/src/agents/util/__init__.py b/src/agents/util/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agents/util/_coro.py b/src/agents/util/_coro.py deleted file mode 100644 index 647ab86a..00000000 --- a/src/agents/util/_coro.py +++ /dev/null @@ -1,2 +0,0 @@ -async def noop_coroutine() -> None: - pass diff --git a/src/agents/util/_error_tracing.py b/src/agents/util/_error_tracing.py deleted file mode 100644 index 09dbb1de..00000000 --- a/src/agents/util/_error_tracing.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Any - -from ..logger import logger -from ..tracing import Span, SpanError, get_current_span - - -def attach_error_to_span(span: Span[Any], error: SpanError) -> None: - span.set_error(error) - - -def attach_error_to_current_span(error: SpanError) -> None: - span = get_current_span() - if span: - attach_error_to_span(span, error) - else: - logger.warning(f"No span to add error {error} to") diff --git a/src/agents/util/_json.py b/src/agents/util/_json.py deleted file mode 100644 index 1e081f68..00000000 --- a/src/agents/util/_json.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import annotations - -from typing import Literal - -from pydantic import TypeAdapter, ValidationError -from typing_extensions import TypeVar - -from ..exceptions import ModelBehaviorError -from ..tracing import SpanError -from ._error_tracing import attach_error_to_current_span - -T = TypeVar("T") - - -def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T: - partial_setting: bool | Literal["off", "on", "trailing-strings"] = ( - "trailing-strings" if partial else False - ) - try: - validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting) - return validated - except ValidationError as e: - attach_error_to_current_span( - SpanError( - message="Invalid JSON provided", - data={}, - ) - ) - raise ModelBehaviorError( - f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}" - ) from e diff --git a/src/agents/util/_pretty_print.py b/src/agents/util/_pretty_print.py deleted file mode 100644 index afd3e2b1..00000000 --- a/src/agents/util/_pretty_print.py +++ /dev/null @@ -1,56 +0,0 @@ -from typing import TYPE_CHECKING - -from pydantic import BaseModel - -if TYPE_CHECKING: - from ..result import RunResult, RunResultBase, RunResultStreaming - - -def _indent(text: str, indent_level: int) -> str: - indent_string = " " * indent_level - return "\n".join(f"{indent_string}{line}" for line in text.splitlines()) - - -def _final_output_str(result: "RunResultBase") -> str: - if result.final_output is None: - return "None" - elif isinstance(result.final_output, str): - return result.final_output - elif isinstance(result.final_output, BaseModel): - return result.final_output.model_dump_json(indent=2) - else: - return str(result.final_output) - - -def pretty_print_result(result: "RunResult") -> str: - output = "RunResult:" - output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)' - output += ( - f"\n- Final output ({type(result.final_output).__name__}):\n" - f"{_indent(_final_output_str(result), 2)}" - ) - output += f"\n- {len(result.new_items)} new item(s)" - output += f"\n- {len(result.raw_responses)} raw response(s)" - output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)" - output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)" - output += "\n(See `RunResult` for more details)" - - return output - - -def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str: - output = "RunResultStreaming:" - output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)' - output += f"\n- Current turn: {result.current_turn}" - output += f"\n- Max turns: {result.max_turns}" - output += f"\n- Is complete: {result.is_complete}" - output += ( - f"\n- Final output ({type(result.final_output).__name__}):\n" - f"{_indent(_final_output_str(result), 2)}" - ) - output += f"\n- {len(result.new_items)} new item(s)" - output += f"\n- {len(result.raw_responses)} raw response(s)" - output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)" - output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)" - output += "\n(See `RunResultStreaming` for more details)" - return output diff --git a/src/agents/util/_transforms.py b/src/agents/util/_transforms.py deleted file mode 100644 index b303074d..00000000 --- a/src/agents/util/_transforms.py +++ /dev/null @@ -1,11 +0,0 @@ -import re - - -def transform_string_function_style(name: str) -> str: - # Replace spaces with underscores - name = name.replace(" ", "_") - - # Replace non-alphanumeric characters with underscores - name = re.sub(r"[^a-zA-Z0-9]", "_", name) - - return name.lower() diff --git a/src/agents/util/_types.py b/src/agents/util/_types.py deleted file mode 100644 index 8571a694..00000000 --- a/src/agents/util/_types.py +++ /dev/null @@ -1,7 +0,0 @@ -from collections.abc import Awaitable -from typing import Union - -from typing_extensions import TypeVar - -T = TypeVar("T") -MaybeAwaitable = Union[Awaitable[T], T] diff --git a/src/agents/util/schemas.py b/src/agents/util/schemas.py deleted file mode 100644 index c26c7ecb..00000000 --- a/src/agents/util/schemas.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Literal, Optional, Dict, Union -from pydantic import BaseModel, Field - -class NewTask(BaseModel): - action: Literal["new_task"] - task_type: str - user_prompt: str - params: Dict = Field(default_factory=dict) - first_agent: Optional[str] = "auto" - -class NewMessage(BaseModel): - action: Literal["new_message"] - task_id: str - message: str - agent_session_id: Optional[str] = None - -Inbound = Union[NewTask, NewMessage] diff --git a/src/agents/util/services.py b/src/agents/util/services.py deleted file mode 100644 index 7e8f3142..00000000 --- a/src/agents/util/services.py +++ /dev/null @@ -1,34 +0,0 @@ -from your_orm_models import Task, AgentSession, Message # adapt import path -from agents.util.webhook import post_webhook, STRUCTURED_URL, CLARIFICATION_URL -from agents.runner import run_agent, decide_session # your existing runner - -async def handle_new_task(p: NewTask): - task = Task.create( - user_id=p.request_user.id, - title=p.user_prompt[:40], - type=p.task_type, - status="pending", - params=p.params, - ) - first_def = "manager" if p.first_agent == "auto" else p.first_agent - session = AgentSession.create( - task=task, agent_definition=first_def, status="running" - ) - Message.create(task=task, role="user", content=p.user_prompt) - await run_agent(session) - return {"task_id": task.id} - -async def handle_new_message(p: NewMessage): - Message.create( - task_id=p.task_id, - agent_session_id=p.agent_session_id, - role="user", - content=p.message, - ) - session = ( - AgentSession.get(p.agent_session_id) - if p.agent_session_id - else decide_session(p.task_id) - ) - await run_agent(session) - return {"ok": True} diff --git a/src/agents/util/webhook.py b/src/agents/util/webhook.py deleted file mode 100644 index 72e36954..00000000 --- a/src/agents/util/webhook.py +++ /dev/null @@ -1,51 +0,0 @@ -"""utils/webhook.py -A single, reusable helper for posting JSON payloads to Bubble‑workflow URLs. - -Usage in your FastAPI code: - - from agents.utils.webhook import send_webhook - - url = TASK_URL_MAP[task_type] # looked up from env‑vars - await send_webhook(url, flattened_payload) - -You keep *all* Bubble‑specific routing logic (task_type → URL) in your -FastAPI service, while this helper focuses solely on safe, idempotent -HTTP posting and basic allow‑list protection. -""" -from __future__ import annotations - -import os -import json -import httpx -from typing import Any, Mapping - -# ----------------------------------------------------------------------------- -# Configuration -# ----------------------------------------------------------------------------- -# Only allow POSTs to URLs that start with one of these roots (prevents exfiltration) -ALLOWED_ROOTS = os.getenv("BUBBLE_DOMAIN_ROOTS", "https://rgtnow.com").split(",") - -# Optional default timeout (seconds) for outbound webhook calls. -HTTP_TIMEOUT = float(os.getenv("WEBHOOK_TIMEOUT", "10")) - -# ----------------------------------------------------------------------------- -# Public helper -# ----------------------------------------------------------------------------- -async def send_webhook(target_url: str, payload: Mapping[str, Any]) -> None: - """POST *payload* as JSON to *target_url*. - - Raises: - ValueError: if *target_url* is outside the allowed Bubble domain roots. - httpx.HTTPStatusError: if Bubble responds with an error status code. - """ - if not any(target_url.startswith(root.strip()) for root in ALLOWED_ROOTS): - raise ValueError( - f"Refusing to POST to {target_url!r} — must begin with one of {ALLOWED_ROOTS!r}" - ) - - async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client: - print("=== Webhook Dispatch →", target_url, "===\n", - json.dumps(payload, indent=2, default=str)) - resp = await client.post(target_url, json=payload) - resp.raise_for_status() - return None diff --git a/src/agents/version.py b/src/agents/version.py deleted file mode 100644 index 9b22499e..00000000 --- a/src/agents/version.py +++ /dev/null @@ -1,7 +0,0 @@ -import importlib.metadata - -try: - __version__ = importlib.metadata.version("openai-agents") -except importlib.metadata.PackageNotFoundError: - # Fallback if running from source without being installed - __version__ = "0.0.0" diff --git a/src/agents/voice/__init__.py b/src/agents/voice/__init__.py deleted file mode 100644 index 499c064c..00000000 --- a/src/agents/voice/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -from .events import VoiceStreamEvent, VoiceStreamEventAudio, VoiceStreamEventLifecycle -from .exceptions import STTWebsocketConnectionError -from .input import AudioInput, StreamedAudioInput -from .model import ( - StreamedTranscriptionSession, - STTModel, - STTModelSettings, - TTSModel, - TTSModelSettings, - VoiceModelProvider, -) -from .models.openai_model_provider import OpenAIVoiceModelProvider -from .models.openai_stt import OpenAISTTModel, OpenAISTTTranscriptionSession -from .models.openai_tts import OpenAITTSModel -from .pipeline import VoicePipeline -from .pipeline_config import VoicePipelineConfig -from .result import StreamedAudioResult -from .utils import get_sentence_based_splitter -from .workflow import ( - SingleAgentVoiceWorkflow, - SingleAgentWorkflowCallbacks, - VoiceWorkflowBase, - VoiceWorkflowHelper, -) - -__all__ = [ - "AudioInput", - "StreamedAudioInput", - "STTModel", - "STTModelSettings", - "TTSModel", - "TTSModelSettings", - "VoiceModelProvider", - "StreamedAudioResult", - "SingleAgentVoiceWorkflow", - "OpenAIVoiceModelProvider", - "OpenAISTTModel", - "OpenAITTSModel", - "VoiceStreamEventAudio", - "VoiceStreamEventLifecycle", - "VoiceStreamEvent", - "VoicePipeline", - "VoicePipelineConfig", - "get_sentence_based_splitter", - "VoiceWorkflowHelper", - "VoiceWorkflowBase", - "SingleAgentWorkflowCallbacks", - "StreamedTranscriptionSession", - "OpenAISTTTranscriptionSession", - "STTWebsocketConnectionError", -] diff --git a/src/agents/voice/events.py b/src/agents/voice/events.py deleted file mode 100644 index bdcd0815..00000000 --- a/src/agents/voice/events.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Literal, Union - -from typing_extensions import TypeAlias - -from .imports import np, npt - - -@dataclass -class VoiceStreamEventAudio: - """Streaming event from the VoicePipeline""" - - data: npt.NDArray[np.int16 | np.float32] | None - """The audio data.""" - - type: Literal["voice_stream_event_audio"] = "voice_stream_event_audio" - """The type of event.""" - - -@dataclass -class VoiceStreamEventLifecycle: - """Streaming event from the VoicePipeline""" - - event: Literal["turn_started", "turn_ended", "session_ended"] - """The event that occurred.""" - - type: Literal["voice_stream_event_lifecycle"] = "voice_stream_event_lifecycle" - """The type of event.""" - - -@dataclass -class VoiceStreamEventError: - """Streaming event from the VoicePipeline""" - - error: Exception - """The error that occurred.""" - - type: Literal["voice_stream_event_error"] = "voice_stream_event_error" - """The type of event.""" - - -VoiceStreamEvent: TypeAlias = Union[ - VoiceStreamEventAudio, VoiceStreamEventLifecycle, VoiceStreamEventError -] -"""An event from the `VoicePipeline`, streamed via `StreamedAudioResult.stream()`.""" diff --git a/src/agents/voice/exceptions.py b/src/agents/voice/exceptions.py deleted file mode 100644 index 97dccac8..00000000 --- a/src/agents/voice/exceptions.py +++ /dev/null @@ -1,8 +0,0 @@ -from ..exceptions import AgentsException - - -class STTWebsocketConnectionError(AgentsException): - """Exception raised when the STT websocket connection fails.""" - - def __init__(self, message: str): - self.message = message diff --git a/src/agents/voice/imports.py b/src/agents/voice/imports.py deleted file mode 100644 index b1c09508..00000000 --- a/src/agents/voice/imports.py +++ /dev/null @@ -1,11 +0,0 @@ -try: - import numpy as np - import numpy.typing as npt - import websockets -except ImportError as _e: - raise ImportError( - "`numpy` + `websockets` are required to use voice. You can install them via the optional " - "dependency group: `pip install 'openai-agents[voice]'`." - ) from _e - -__all__ = ["np", "npt", "websockets"] diff --git a/src/agents/voice/input.py b/src/agents/voice/input.py deleted file mode 100644 index 8613d27a..00000000 --- a/src/agents/voice/input.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import asyncio -import base64 -import io -import wave -from dataclasses import dataclass - -from ..exceptions import UserError -from .imports import np, npt - -DEFAULT_SAMPLE_RATE = 24000 - - -def _buffer_to_audio_file( - buffer: npt.NDArray[np.int16 | np.float32], - frame_rate: int = DEFAULT_SAMPLE_RATE, - sample_width: int = 2, - channels: int = 1, -) -> tuple[str, io.BytesIO, str]: - if buffer.dtype == np.float32: - # convert to int16 - buffer = np.clip(buffer, -1.0, 1.0) - buffer = (buffer * 32767).astype(np.int16) - elif buffer.dtype != np.int16: - raise UserError("Buffer must be a numpy array of int16 or float32") - - audio_file = io.BytesIO() - with wave.open(audio_file, "w") as wav_file: - wav_file.setnchannels(channels) - wav_file.setsampwidth(sample_width) - wav_file.setframerate(frame_rate) - wav_file.writeframes(buffer.tobytes()) - audio_file.seek(0) - - # (filename, bytes, content_type) - return ("audio.wav", audio_file, "audio/wav") - - -@dataclass -class AudioInput: - """Static audio to be used as input for the VoicePipeline.""" - - buffer: npt.NDArray[np.int16 | np.float32] - """ - A buffer containing the audio data for the agent. Must be a numpy array of int16 or float32. - """ - - frame_rate: int = DEFAULT_SAMPLE_RATE - """The sample rate of the audio data. Defaults to 24000.""" - - sample_width: int = 2 - """The sample width of the audio data. Defaults to 2.""" - - channels: int = 1 - """The number of channels in the audio data. Defaults to 1.""" - - def to_audio_file(self) -> tuple[str, io.BytesIO, str]: - """Returns a tuple of (filename, bytes, content_type)""" - return _buffer_to_audio_file(self.buffer, self.frame_rate, self.sample_width, self.channels) - - def to_base64(self) -> str: - """Returns the audio data as a base64 encoded string.""" - if self.buffer.dtype == np.float32: - # convert to int16 - self.buffer = np.clip(self.buffer, -1.0, 1.0) - self.buffer = (self.buffer * 32767).astype(np.int16) - elif self.buffer.dtype != np.int16: - raise UserError("Buffer must be a numpy array of int16 or float32") - - return base64.b64encode(self.buffer.tobytes()).decode("utf-8") - - -class StreamedAudioInput: - """Audio input represented as a stream of audio data. You can pass this to the `VoicePipeline` - and then push audio data into the queue using the `add_audio` method. - """ - - def __init__(self): - self.queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = asyncio.Queue() - - async def add_audio(self, audio: npt.NDArray[np.int16 | np.float32]): - """Adds more audio data to the stream. - - Args: - audio: The audio data to add. Must be a numpy array of int16 or float32. - """ - await self.queue.put(audio) diff --git a/src/agents/voice/model.py b/src/agents/voice/model.py deleted file mode 100644 index 220d4b48..00000000 --- a/src/agents/voice/model.py +++ /dev/null @@ -1,193 +0,0 @@ -from __future__ import annotations - -import abc -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import Any, Callable, Literal - -from .imports import np, npt -from .input import AudioInput, StreamedAudioInput -from .utils import get_sentence_based_splitter - -DEFAULT_TTS_INSTRUCTIONS = ( - "You will receive partial sentences. Do not complete the sentence, just read out the text." -) -DEFAULT_TTS_BUFFER_SIZE = 120 - - -@dataclass -class TTSModelSettings: - """Settings for a TTS model.""" - - voice: ( - Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"] | None - ) = None - """ - The voice to use for the TTS model. If not provided, the default voice for the respective model - will be used. - """ - - buffer_size: int = 120 - """The minimal size of the chunks of audio data that are being streamed out.""" - - dtype: npt.DTypeLike = np.int16 - """The data type for the audio data to be returned in.""" - - transform_data: ( - Callable[[npt.NDArray[np.int16 | np.float32]], npt.NDArray[np.int16 | np.float32]] | None - ) = None - """ - A function to transform the data from the TTS model. This is useful if you want the resulting - audio stream to have the data in a specific shape already. - """ - - instructions: str = ( - "You will receive partial sentences. Do not complete the sentence just read out the text." - ) - """ - The instructions to use for the TTS model. This is useful if you want to control the tone of the - audio output. - """ - - text_splitter: Callable[[str], tuple[str, str]] = get_sentence_based_splitter() - """ - A function to split the text into chunks. This is useful if you want to split the text into - chunks before sending it to the TTS model rather than waiting for the whole text to be - processed. - """ - - speed: float | None = None - """The speed with which the TTS model will read the text. Between 0.25 and 4.0.""" - - -class TTSModel(abc.ABC): - """A text-to-speech model that can convert text into audio output.""" - - @property - @abc.abstractmethod - def model_name(self) -> str: - """The name of the TTS model.""" - pass - - @abc.abstractmethod - def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]: - """Given a text string, produces a stream of audio bytes, in PCM format. - - Args: - text: The text to convert to audio. - - Returns: - An async iterator of audio bytes, in PCM format. - """ - pass - - -class StreamedTranscriptionSession(abc.ABC): - """A streamed transcription of audio input.""" - - @abc.abstractmethod - def transcribe_turns(self) -> AsyncIterator[str]: - """Yields a stream of text transcriptions. Each transcription is a turn in the conversation. - - This method is expected to return only after `close()` is called. - """ - pass - - @abc.abstractmethod - async def close(self) -> None: - """Closes the session.""" - pass - - -@dataclass -class STTModelSettings: - """Settings for a speech-to-text model.""" - - prompt: str | None = None - """Instructions for the model to follow.""" - - language: str | None = None - """The language of the audio input.""" - - temperature: float | None = None - """The temperature of the model.""" - - turn_detection: dict[str, Any] | None = None - """The turn detection settings for the model when using streamed audio input.""" - - -class STTModel(abc.ABC): - """A speech-to-text model that can convert audio input into text.""" - - @property - @abc.abstractmethod - def model_name(self) -> str: - """The name of the STT model.""" - pass - - @abc.abstractmethod - async def transcribe( - self, - input: AudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> str: - """Given an audio input, produces a text transcription. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - trace_include_sensitive_data: Whether to include sensitive data in traces. - trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces. - - Returns: - The text transcription of the audio input. - """ - pass - - @abc.abstractmethod - async def create_session( - self, - input: StreamedAudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> StreamedTranscriptionSession: - """Creates a new transcription session, which you can push audio to, and receive a stream - of text transcriptions. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - trace_include_sensitive_data: Whether to include sensitive data in traces. - trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces. - - Returns: - A new transcription session. - """ - pass - - -class VoiceModelProvider(abc.ABC): - """The base interface for a voice model provider. - - A model provider is responsible for creating speech-to-text and text-to-speech models, given a - name. - """ - - @abc.abstractmethod - def get_stt_model(self, model_name: str | None) -> STTModel: - """Get a speech-to-text model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The speech-to-text model. - """ - pass - - @abc.abstractmethod - def get_tts_model(self, model_name: str | None) -> TTSModel: - """Get a text-to-speech model by name.""" diff --git a/src/agents/voice/models/__init__.py b/src/agents/voice/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agents/voice/models/openai_model_provider.py b/src/agents/voice/models/openai_model_provider.py deleted file mode 100644 index 094df4cc..00000000 --- a/src/agents/voice/models/openai_model_provider.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -import httpx -from openai import AsyncOpenAI, DefaultAsyncHttpxClient - -from ...models import _openai_shared -from ..model import STTModel, TTSModel, VoiceModelProvider -from .openai_stt import OpenAISTTModel -from .openai_tts import OpenAITTSModel - -_http_client: httpx.AsyncClient | None = None - - -# If we create a new httpx client for each request, that would mean no sharing of connection pools, -# which would mean worse latency and resource usage. So, we share the client across requests. -def shared_http_client() -> httpx.AsyncClient: - global _http_client - if _http_client is None: - _http_client = DefaultAsyncHttpxClient() - return _http_client - - -DEFAULT_STT_MODEL = "gpt-4o-transcribe" -DEFAULT_TTS_MODEL = "gpt-4o-mini-tts" - - -class OpenAIVoiceModelProvider(VoiceModelProvider): - """A voice model provider that uses OpenAI models.""" - - def __init__( - self, - *, - api_key: str | None = None, - base_url: str | None = None, - openai_client: AsyncOpenAI | None = None, - organization: str | None = None, - project: str | None = None, - ) -> None: - """Create a new OpenAI voice model provider. - - Args: - api_key: The API key to use for the OpenAI client. If not provided, we will use the - default API key. - base_url: The base URL to use for the OpenAI client. If not provided, we will use the - default base URL. - openai_client: An optional OpenAI client to use. If not provided, we will create a new - OpenAI client using the api_key and base_url. - organization: The organization to use for the OpenAI client. - project: The project to use for the OpenAI client. - """ - if openai_client is not None: - assert api_key is None and base_url is None, ( - "Don't provide api_key or base_url if you provide openai_client" - ) - self._client: AsyncOpenAI | None = openai_client - else: - self._client = None - self._stored_api_key = api_key - self._stored_base_url = base_url - self._stored_organization = organization - self._stored_project = project - - # We lazy load the client in case you never actually use OpenAIProvider(). Otherwise - # AsyncOpenAI() raises an error if you don't have an API key set. - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI( - api_key=self._stored_api_key or _openai_shared.get_default_openai_key(), - base_url=self._stored_base_url, - organization=self._stored_organization, - project=self._stored_project, - http_client=shared_http_client(), - ) - - return self._client - - def get_stt_model(self, model_name: str | None) -> STTModel: - """Get a speech-to-text model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The speech-to-text model. - """ - return OpenAISTTModel(model_name or DEFAULT_STT_MODEL, self._get_client()) - - def get_tts_model(self, model_name: str | None) -> TTSModel: - """Get a text-to-speech model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The text-to-speech model. - """ - return OpenAITTSModel(model_name or DEFAULT_TTS_MODEL, self._get_client()) diff --git a/src/agents/voice/models/openai_stt.py b/src/agents/voice/models/openai_stt.py deleted file mode 100644 index 1ae4ea14..00000000 --- a/src/agents/voice/models/openai_stt.py +++ /dev/null @@ -1,456 +0,0 @@ -from __future__ import annotations - -import asyncio -import base64 -import json -import time -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import Any, cast - -from openai import AsyncOpenAI - -from ... import _debug -from ...exceptions import AgentsException -from ...logger import logger -from ...tracing import Span, SpanError, TranscriptionSpanData, transcription_span -from ..exceptions import STTWebsocketConnectionError -from ..imports import np, npt, websockets -from ..input import AudioInput, StreamedAudioInput -from ..model import StreamedTranscriptionSession, STTModel, STTModelSettings - -EVENT_INACTIVITY_TIMEOUT = 1000 # Timeout for inactivity in event processing -SESSION_CREATION_TIMEOUT = 10 # Timeout waiting for session.created event -SESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event - -DEFAULT_TURN_DETECTION = {"type": "semantic_vad"} - - -@dataclass -class ErrorSentinel: - error: Exception - - -class SessionCompleteSentinel: - pass - - -class WebsocketDoneSentinel: - pass - - -def _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str: - concatenated_audio = np.concatenate(audio_data) - if concatenated_audio.dtype == np.float32: - # convert to int16 - concatenated_audio = np.clip(concatenated_audio, -1.0, 1.0) - concatenated_audio = (concatenated_audio * 32767).astype(np.int16) - audio_bytes = concatenated_audio.tobytes() - return base64.b64encode(audio_bytes).decode("utf-8") - - -async def _wait_for_event( - event_queue: asyncio.Queue[dict[str, Any]], expected_types: list[str], timeout: float -): - """ - Wait for an event from event_queue whose type is in expected_types within the specified timeout. - """ - start_time = time.time() - while True: - remaining = timeout - (time.time() - start_time) - if remaining <= 0: - raise TimeoutError(f"Timeout waiting for event(s): {expected_types}") - evt = await asyncio.wait_for(event_queue.get(), timeout=remaining) - evt_type = evt.get("type", "") - if evt_type in expected_types: - return evt - elif evt_type == "error": - raise Exception(f"Error event: {evt.get('error')}") - - -class OpenAISTTTranscriptionSession(StreamedTranscriptionSession): - """A transcription session for OpenAI's STT model.""" - - def __init__( - self, - input: StreamedAudioInput, - client: AsyncOpenAI, - model: str, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ): - self.connected: bool = False - self._client = client - self._model = model - self._settings = settings - self._turn_detection = settings.turn_detection or DEFAULT_TURN_DETECTION - self._trace_include_sensitive_data = trace_include_sensitive_data - self._trace_include_sensitive_audio_data = trace_include_sensitive_audio_data - - self._input_queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = input.queue - self._output_queue: asyncio.Queue[str | ErrorSentinel | SessionCompleteSentinel] = ( - asyncio.Queue() - ) - self._websocket: websockets.ClientConnection | None = None - self._event_queue: asyncio.Queue[dict[str, Any] | WebsocketDoneSentinel] = asyncio.Queue() - self._state_queue: asyncio.Queue[dict[str, Any]] = asyncio.Queue() - self._turn_audio_buffer: list[npt.NDArray[np.int16 | np.float32]] = [] - self._tracing_span: Span[TranscriptionSpanData] | None = None - - # tasks - self._listener_task: asyncio.Task[Any] | None = None - self._process_events_task: asyncio.Task[Any] | None = None - self._stream_audio_task: asyncio.Task[Any] | None = None - self._connection_task: asyncio.Task[Any] | None = None - self._stored_exception: Exception | None = None - - def _start_turn(self) -> None: - self._tracing_span = transcription_span( - model=self._model, - model_config={ - "temperature": self._settings.temperature, - "language": self._settings.language, - "prompt": self._settings.prompt, - "turn_detection": self._turn_detection, - }, - ) - self._tracing_span.start() - - def _end_turn(self, _transcript: str) -> None: - if len(_transcript) < 1: - return - - if self._tracing_span: - if self._trace_include_sensitive_audio_data: - self._tracing_span.span_data.input = _audio_to_base64(self._turn_audio_buffer) - - self._tracing_span.span_data.input_format = "pcm" - - if self._trace_include_sensitive_data: - self._tracing_span.span_data.output = _transcript - - self._tracing_span.finish() - self._turn_audio_buffer = [] - self._tracing_span = None - - async def _event_listener(self) -> None: - assert self._websocket is not None, "Websocket not initialized" - - async for message in self._websocket: - try: - event = json.loads(message) - - if event.get("type") == "error": - raise STTWebsocketConnectionError(f"Error event: {event.get('error')}") - - if event.get("type") in [ - "session.updated", - "transcription_session.updated", - "session.created", - "transcription_session.created", - ]: - await self._state_queue.put(event) - - await self._event_queue.put(event) - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise STTWebsocketConnectionError("Error parsing events") from e - await self._event_queue.put(WebsocketDoneSentinel()) - - async def _configure_session(self) -> None: - assert self._websocket is not None, "Websocket not initialized" - await self._websocket.send( - json.dumps( - { - "type": "transcription_session.update", - "session": { - "input_audio_format": "pcm16", - "input_audio_transcription": {"model": self._model}, - "turn_detection": self._turn_detection, - }, - } - ) - ) - - async def _setup_connection(self, ws: websockets.ClientConnection) -> None: - self._websocket = ws - self._listener_task = asyncio.create_task(self._event_listener()) - - try: - event = await _wait_for_event( - self._state_queue, - ["session.created", "transcription_session.created"], - SESSION_CREATION_TIMEOUT, - ) - except TimeoutError as e: - wrapped_err = STTWebsocketConnectionError( - "Timeout waiting for transcription_session.created event" - ) - await self._output_queue.put(ErrorSentinel(wrapped_err)) - raise wrapped_err from e - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - - await self._configure_session() - - try: - event = await _wait_for_event( - self._state_queue, - ["session.updated", "transcription_session.updated"], - SESSION_UPDATE_TIMEOUT, - ) - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Session updated") - else: - logger.debug(f"Session updated: {event}") - except TimeoutError as e: - wrapped_err = STTWebsocketConnectionError( - "Timeout waiting for transcription_session.updated event" - ) - await self._output_queue.put(ErrorSentinel(wrapped_err)) - raise wrapped_err from e - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise - - async def _handle_events(self) -> None: - while True: - try: - event = await asyncio.wait_for( - self._event_queue.get(), timeout=EVENT_INACTIVITY_TIMEOUT - ) - if isinstance(event, WebsocketDoneSentinel): - # processed all events and websocket is done - break - - event_type = event.get("type", "unknown") - if event_type == "conversation.item.input_audio_transcription.completed": - transcript = cast(str, event.get("transcript", "")) - if len(transcript) > 0: - self._end_turn(transcript) - self._start_turn() - await self._output_queue.put(transcript) - await asyncio.sleep(0) # yield control - except asyncio.TimeoutError: - # No new events for a while. Assume the session is done. - break - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - await self._output_queue.put(SessionCompleteSentinel()) - - async def _stream_audio( - self, audio_queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] - ) -> None: - assert self._websocket is not None, "Websocket not initialized" - self._start_turn() - while True: - buffer = await audio_queue.get() - if buffer is None: - break - - self._turn_audio_buffer.append(buffer) - try: - await self._websocket.send( - json.dumps( - { - "type": "input_audio_buffer.append", - "audio": base64.b64encode(buffer.tobytes()).decode("utf-8"), - } - ) - ) - except websockets.ConnectionClosed: - break - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - - await asyncio.sleep(0) # yield control - - async def _process_websocket_connection(self) -> None: - try: - async with websockets.connect( - "wss://api.openai.com/v1/realtime?intent=transcription", - additional_headers={ - "Authorization": f"Bearer {self._client.api_key}", - "OpenAI-Beta": "realtime=v1", - "OpenAI-Log-Session": "1", - }, - ) as ws: - await self._setup_connection(ws) - self._process_events_task = asyncio.create_task(self._handle_events()) - self._stream_audio_task = asyncio.create_task(self._stream_audio(self._input_queue)) - self.connected = True - if self._listener_task: - await self._listener_task - else: - logger.error("Listener task not initialized") - raise AgentsException("Listener task not initialized") - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - - def _check_errors(self) -> None: - if self._connection_task and self._connection_task.done(): - exc = self._connection_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._process_events_task and self._process_events_task.done(): - exc = self._process_events_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._stream_audio_task and self._stream_audio_task.done(): - exc = self._stream_audio_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._listener_task and self._listener_task.done(): - exc = self._listener_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - def _cleanup_tasks(self) -> None: - if self._listener_task and not self._listener_task.done(): - self._listener_task.cancel() - - if self._process_events_task and not self._process_events_task.done(): - self._process_events_task.cancel() - - if self._stream_audio_task and not self._stream_audio_task.done(): - self._stream_audio_task.cancel() - - if self._connection_task and not self._connection_task.done(): - self._connection_task.cancel() - - async def transcribe_turns(self) -> AsyncIterator[str]: - self._connection_task = asyncio.create_task(self._process_websocket_connection()) - - while True: - try: - turn = await self._output_queue.get() - except asyncio.CancelledError: - break - - if ( - turn is None - or isinstance(turn, ErrorSentinel) - or isinstance(turn, SessionCompleteSentinel) - ): - self._output_queue.task_done() - break - yield turn - self._output_queue.task_done() - - if self._tracing_span: - self._end_turn("") - - if self._websocket: - await self._websocket.close() - - self._check_errors() - if self._stored_exception: - raise self._stored_exception - - async def close(self) -> None: - if self._websocket: - await self._websocket.close() - - self._cleanup_tasks() - - -class OpenAISTTModel(STTModel): - """A speech-to-text model for OpenAI.""" - - def __init__( - self, - model: str, - openai_client: AsyncOpenAI, - ): - """Create a new OpenAI speech-to-text model. - - Args: - model: The name of the model to use. - openai_client: The OpenAI client to use. - """ - self.model = model - self._client = openai_client - - @property - def model_name(self) -> str: - return self.model - - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else None # NOT_GIVEN - - async def transcribe( - self, - input: AudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> str: - """Transcribe an audio input. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - - Returns: - The transcribed text. - """ - with transcription_span( - model=self.model, - input=input.to_base64() if trace_include_sensitive_audio_data else "", - input_format="pcm", - model_config={ - "temperature": self._non_null_or_not_given(settings.temperature), - "language": self._non_null_or_not_given(settings.language), - "prompt": self._non_null_or_not_given(settings.prompt), - }, - ) as span: - try: - response = await self._client.audio.transcriptions.create( - model=self.model, - file=input.to_audio_file(), - prompt=self._non_null_or_not_given(settings.prompt), - language=self._non_null_or_not_given(settings.language), - temperature=self._non_null_or_not_given(settings.temperature), - ) - if trace_include_sensitive_data: - span.span_data.output = response.text - return response.text - except Exception as e: - span.span_data.output = "" - span.set_error(SpanError(message=str(e), data={})) - raise e - - async def create_session( - self, - input: StreamedAudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> StreamedTranscriptionSession: - """Create a new transcription session. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - trace_include_sensitive_data: Whether to include sensitive data in traces. - trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces. - - Returns: - A new transcription session. - """ - return OpenAISTTTranscriptionSession( - input, - self._client, - self.model, - settings, - trace_include_sensitive_data, - trace_include_sensitive_audio_data, - ) diff --git a/src/agents/voice/models/openai_tts.py b/src/agents/voice/models/openai_tts.py deleted file mode 100644 index 3b7dcf15..00000000 --- a/src/agents/voice/models/openai_tts.py +++ /dev/null @@ -1,54 +0,0 @@ -from collections.abc import AsyncIterator -from typing import Literal - -from openai import AsyncOpenAI - -from ..model import TTSModel, TTSModelSettings - -DEFAULT_VOICE: Literal["ash"] = "ash" - - -class OpenAITTSModel(TTSModel): - """A text-to-speech model for OpenAI.""" - - def __init__( - self, - model: str, - openai_client: AsyncOpenAI, - ): - """Create a new OpenAI text-to-speech model. - - Args: - model: The name of the model to use. - openai_client: The OpenAI client to use. - """ - self.model = model - self._client = openai_client - - @property - def model_name(self) -> str: - return self.model - - async def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]: - """Run the text-to-speech model. - - Args: - text: The text to convert to speech. - settings: The settings to use for the text-to-speech model. - - Returns: - An iterator of audio chunks. - """ - response = self._client.audio.speech.with_streaming_response.create( - model=self.model, - voice=settings.voice or DEFAULT_VOICE, - input=text, - response_format="pcm", - extra_body={ - "instructions": settings.instructions, - }, - ) - - async with response as stream: - async for chunk in stream.iter_bytes(chunk_size=1024): - yield chunk diff --git a/src/agents/voice/pipeline.py b/src/agents/voice/pipeline.py deleted file mode 100644 index d1dac57c..00000000 --- a/src/agents/voice/pipeline.py +++ /dev/null @@ -1,151 +0,0 @@ -from __future__ import annotations - -import asyncio - -from .._run_impl import TraceCtxManager -from ..exceptions import UserError -from ..logger import logger -from .input import AudioInput, StreamedAudioInput -from .model import STTModel, TTSModel -from .pipeline_config import VoicePipelineConfig -from .result import StreamedAudioResult -from .workflow import VoiceWorkflowBase - - -class VoicePipeline: - """An opinionated voice agent pipeline. It works in three steps: - 1. Transcribe audio input into text. - 2. Run the provided `workflow`, which produces a sequence of text responses. - 3. Convert the text responses into streaming audio output. - """ - - def __init__( - self, - *, - workflow: VoiceWorkflowBase, - stt_model: STTModel | str | None = None, - tts_model: TTSModel | str | None = None, - config: VoicePipelineConfig | None = None, - ): - """Create a new voice pipeline. - - Args: - workflow: The workflow to run. See `VoiceWorkflowBase`. - stt_model: The speech-to-text model to use. If not provided, a default OpenAI - model will be used. - tts_model: The text-to-speech model to use. If not provided, a default OpenAI - model will be used. - config: The pipeline configuration. If not provided, a default configuration will be - used. - """ - self.workflow = workflow - self.stt_model = stt_model if isinstance(stt_model, STTModel) else None - self.tts_model = tts_model if isinstance(tts_model, TTSModel) else None - self._stt_model_name = stt_model if isinstance(stt_model, str) else None - self._tts_model_name = tts_model if isinstance(tts_model, str) else None - self.config = config or VoicePipelineConfig() - - async def run(self, audio_input: AudioInput | StreamedAudioInput) -> StreamedAudioResult: - """Run the voice pipeline. - - Args: - audio_input: The audio input to process. This can either be an `AudioInput` instance, - which is a single static buffer, or a `StreamedAudioInput` instance, which is a - stream of audio data that you can append to. - - Returns: - A `StreamedAudioResult` instance. You can use this object to stream audio events and - play them out. - """ - if isinstance(audio_input, AudioInput): - return await self._run_single_turn(audio_input) - elif isinstance(audio_input, StreamedAudioInput): - return await self._run_multi_turn(audio_input) - else: - raise UserError(f"Unsupported audio input type: {type(audio_input)}") - - def _get_tts_model(self) -> TTSModel: - if not self.tts_model: - self.tts_model = self.config.model_provider.get_tts_model(self._tts_model_name) - return self.tts_model - - def _get_stt_model(self) -> STTModel: - if not self.stt_model: - self.stt_model = self.config.model_provider.get_stt_model(self._stt_model_name) - return self.stt_model - - async def _process_audio_input(self, audio_input: AudioInput) -> str: - model = self._get_stt_model() - return await model.transcribe( - audio_input, - self.config.stt_settings, - self.config.trace_include_sensitive_data, - self.config.trace_include_sensitive_audio_data, - ) - - async def _run_single_turn(self, audio_input: AudioInput) -> StreamedAudioResult: - # Since this is single turn, we can use the TraceCtxManager to manage starting/ending the - # trace - with TraceCtxManager( - workflow_name=self.config.workflow_name or "Voice Agent", - trace_id=None, # Automatically generated - group_id=self.config.group_id, - metadata=self.config.trace_metadata, - disabled=self.config.tracing_disabled, - ): - input_text = await self._process_audio_input(audio_input) - - output = StreamedAudioResult( - self._get_tts_model(), self.config.tts_settings, self.config - ) - - async def stream_events(): - try: - async for text_event in self.workflow.run(input_text): - await output._add_text(text_event) - await output._turn_done() - await output._done() - except Exception as e: - logger.error(f"Error processing single turn: {e}") - await output._add_error(e) - raise e - - output._set_task(asyncio.create_task(stream_events())) - return output - - async def _run_multi_turn(self, audio_input: StreamedAudioInput) -> StreamedAudioResult: - with TraceCtxManager( - workflow_name=self.config.workflow_name or "Voice Agent", - trace_id=None, - group_id=self.config.group_id, - metadata=self.config.trace_metadata, - disabled=self.config.tracing_disabled, - ): - output = StreamedAudioResult( - self._get_tts_model(), self.config.tts_settings, self.config - ) - - transcription_session = await self._get_stt_model().create_session( - audio_input, - self.config.stt_settings, - self.config.trace_include_sensitive_data, - self.config.trace_include_sensitive_audio_data, - ) - - async def process_turns(): - try: - async for input_text in transcription_session.transcribe_turns(): - result = self.workflow.run(input_text) - async for text_event in result: - await output._add_text(text_event) - await output._turn_done() - except Exception as e: - logger.error(f"Error processing turns: {e}") - await output._add_error(e) - raise e - finally: - await transcription_session.close() - await output._done() - - output._set_task(asyncio.create_task(process_turns())) - return output diff --git a/src/agents/voice/pipeline_config.py b/src/agents/voice/pipeline_config.py deleted file mode 100644 index a4871612..00000000 --- a/src/agents/voice/pipeline_config.py +++ /dev/null @@ -1,46 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, field -from typing import Any - -from ..tracing.util import gen_group_id -from .model import STTModelSettings, TTSModelSettings, VoiceModelProvider -from .models.openai_model_provider import OpenAIVoiceModelProvider - - -@dataclass -class VoicePipelineConfig: - """Configuration for a `VoicePipeline`.""" - - model_provider: VoiceModelProvider = field(default_factory=OpenAIVoiceModelProvider) - """The voice model provider to use for the pipeline. Defaults to OpenAI.""" - - tracing_disabled: bool = False - """Whether to disable tracing of the pipeline. Defaults to `False`.""" - - trace_include_sensitive_data: bool = True - """Whether to include sensitive data in traces. Defaults to `True`. This is specifically for the - voice pipeline, and not for anything that goes on inside your Workflow.""" - - trace_include_sensitive_audio_data: bool = True - """Whether to include audio data in traces. Defaults to `True`.""" - - workflow_name: str = "Voice Agent" - """The name of the workflow to use for tracing. Defaults to `Voice Agent`.""" - - group_id: str = field(default_factory=gen_group_id) - """ - A grouping identifier to use for tracing, to link multiple traces from the same conversation - or process. If not provided, we will create a random group ID. - """ - - trace_metadata: dict[str, Any] | None = None - """ - An optional dictionary of additional metadata to include with the trace. - """ - - stt_settings: STTModelSettings = field(default_factory=STTModelSettings) - """The settings to use for the STT model.""" - - tts_settings: TTSModelSettings = field(default_factory=TTSModelSettings) - """The settings to use for the TTS model.""" diff --git a/src/agents/voice/result.py b/src/agents/voice/result.py deleted file mode 100644 index fea79902..00000000 --- a/src/agents/voice/result.py +++ /dev/null @@ -1,287 +0,0 @@ -from __future__ import annotations - -import asyncio -import base64 -from collections.abc import AsyncIterator -from typing import Any - -from ..exceptions import UserError -from ..logger import logger -from ..tracing import Span, SpeechGroupSpanData, speech_group_span, speech_span -from ..tracing.util import time_iso -from .events import ( - VoiceStreamEvent, - VoiceStreamEventAudio, - VoiceStreamEventError, - VoiceStreamEventLifecycle, -) -from .imports import np, npt -from .model import TTSModel, TTSModelSettings -from .pipeline_config import VoicePipelineConfig - - -def _audio_to_base64(audio_data: list[bytes]) -> str: - joined_audio_data = b"".join(audio_data) - return base64.b64encode(joined_audio_data).decode("utf-8") - - -class StreamedAudioResult: - """The output of a `VoicePipeline`. Streams events and audio data as they're generated.""" - - def __init__( - self, - tts_model: TTSModel, - tts_settings: TTSModelSettings, - voice_pipeline_config: VoicePipelineConfig, - ): - """Create a new `StreamedAudioResult` instance. - - Args: - tts_model: The TTS model to use. - tts_settings: The TTS settings to use. - voice_pipeline_config: The voice pipeline config to use. - """ - self.tts_model = tts_model - self.tts_settings = tts_settings - self.total_output_text = "" - self.instructions = tts_settings.instructions - self.text_generation_task: asyncio.Task[Any] | None = None - - self._voice_pipeline_config = voice_pipeline_config - self._text_buffer = "" - self._turn_text_buffer = "" - self._queue: asyncio.Queue[VoiceStreamEvent] = asyncio.Queue() - self._tasks: list[asyncio.Task[Any]] = [] - self._ordered_tasks: list[ - asyncio.Queue[VoiceStreamEvent | None] - ] = [] # New: list to hold local queues for each text segment - self._dispatcher_task: asyncio.Task[Any] | None = ( - None # Task to dispatch audio chunks in order - ) - - self._done_processing = False - self._buffer_size = tts_settings.buffer_size - self._started_processing_turn = False - self._first_byte_received = False - self._generation_start_time: str | None = None - self._completed_session = False - self._stored_exception: BaseException | None = None - self._tracing_span: Span[SpeechGroupSpanData] | None = None - - async def _start_turn(self): - if self._started_processing_turn: - return - - self._tracing_span = speech_group_span() - self._tracing_span.start() - self._started_processing_turn = True - self._first_byte_received = False - self._generation_start_time = time_iso() - await self._queue.put(VoiceStreamEventLifecycle(event="turn_started")) - - def _set_task(self, task: asyncio.Task[Any]): - self.text_generation_task = task - - async def _add_error(self, error: Exception): - await self._queue.put(VoiceStreamEventError(error)) - - def _transform_audio_buffer( - self, buffer: list[bytes], output_dtype: npt.DTypeLike - ) -> npt.NDArray[np.int16 | np.float32]: - np_array = np.frombuffer(b"".join(buffer), dtype=np.int16) - - if output_dtype == np.int16: - return np_array - elif output_dtype == np.float32: - return (np_array.astype(np.float32) / 32767.0).reshape(-1, 1) - else: - raise UserError("Invalid output dtype") - - async def _stream_audio( - self, - text: str, - local_queue: asyncio.Queue[VoiceStreamEvent | None], - finish_turn: bool = False, - ): - with speech_span( - model=self.tts_model.model_name, - input=text if self._voice_pipeline_config.trace_include_sensitive_data else "", - model_config={ - "voice": self.tts_settings.voice, - "instructions": self.instructions, - "speed": self.tts_settings.speed, - }, - output_format="pcm", - parent=self._tracing_span, - ) as tts_span: - try: - first_byte_received = False - buffer: list[bytes] = [] - full_audio_data: list[bytes] = [] - - async for chunk in self.tts_model.run(text, self.tts_settings): - if not first_byte_received: - first_byte_received = True - tts_span.span_data.first_content_at = time_iso() - - if chunk: - buffer.append(chunk) - full_audio_data.append(chunk) - if len(buffer) >= self._buffer_size: - audio_np = self._transform_audio_buffer(buffer, self.tts_settings.dtype) - if self.tts_settings.transform_data: - audio_np = self.tts_settings.transform_data(audio_np) - await local_queue.put( - VoiceStreamEventAudio(data=audio_np) - ) # Use local queue - buffer = [] - if buffer: - audio_np = self._transform_audio_buffer(buffer, self.tts_settings.dtype) - if self.tts_settings.transform_data: - audio_np = self.tts_settings.transform_data(audio_np) - await local_queue.put(VoiceStreamEventAudio(data=audio_np)) # Use local queue - - if self._voice_pipeline_config.trace_include_sensitive_audio_data: - tts_span.span_data.output = _audio_to_base64(full_audio_data) - else: - tts_span.span_data.output = "" - - if finish_turn: - await local_queue.put(VoiceStreamEventLifecycle(event="turn_ended")) - else: - await local_queue.put(None) # Signal completion for this segment - except Exception as e: - tts_span.set_error( - { - "message": str(e), - "data": { - "text": text - if self._voice_pipeline_config.trace_include_sensitive_data - else "", - }, - } - ) - logger.error(f"Error streaming audio: {e}") - - # Signal completion for whole session because of error - await local_queue.put(VoiceStreamEventLifecycle(event="session_ended")) - raise e - - async def _add_text(self, text: str): - await self._start_turn() - - self._text_buffer += text - self.total_output_text += text - self._turn_text_buffer += text - - combined_sentences, self._text_buffer = self.tts_settings.text_splitter(self._text_buffer) - - if len(combined_sentences) >= 20: - local_queue: asyncio.Queue[VoiceStreamEvent | None] = asyncio.Queue() - self._ordered_tasks.append(local_queue) - self._tasks.append( - asyncio.create_task(self._stream_audio(combined_sentences, local_queue)) - ) - if self._dispatcher_task is None: - self._dispatcher_task = asyncio.create_task(self._dispatch_audio()) - - async def _turn_done(self): - if self._text_buffer: - local_queue: asyncio.Queue[VoiceStreamEvent | None] = asyncio.Queue() - self._ordered_tasks.append(local_queue) # Append the local queue for the final segment - self._tasks.append( - asyncio.create_task( - self._stream_audio(self._text_buffer, local_queue, finish_turn=True) - ) - ) - self._text_buffer = "" - self._done_processing = True - if self._dispatcher_task is None: - self._dispatcher_task = asyncio.create_task(self._dispatch_audio()) - await asyncio.gather(*self._tasks) - - def _finish_turn(self): - if self._tracing_span: - if self._voice_pipeline_config.trace_include_sensitive_data: - self._tracing_span.span_data.input = self._turn_text_buffer - else: - self._tracing_span.span_data.input = "" - - self._tracing_span.finish() - self._tracing_span = None - self._turn_text_buffer = "" - self._started_processing_turn = False - - async def _done(self): - self._completed_session = True - await self._wait_for_completion() - - async def _dispatch_audio(self): - # Dispatch audio chunks from each segment in the order they were added - while True: - if len(self._ordered_tasks) == 0: - if self._completed_session: - break - await asyncio.sleep(0) - continue - local_queue = self._ordered_tasks.pop(0) - while True: - chunk = await local_queue.get() - if chunk is None: - break - await self._queue.put(chunk) - if isinstance(chunk, VoiceStreamEventLifecycle): - local_queue.task_done() - if chunk.event == "turn_ended": - self._finish_turn() - break - await self._queue.put(VoiceStreamEventLifecycle(event="session_ended")) - - async def _wait_for_completion(self): - tasks: list[asyncio.Task[Any]] = self._tasks - if self._dispatcher_task is not None: - tasks.append(self._dispatcher_task) - await asyncio.gather(*tasks) - - def _cleanup_tasks(self): - self._finish_turn() - - for task in self._tasks: - if not task.done(): - task.cancel() - - if self._dispatcher_task and not self._dispatcher_task.done(): - self._dispatcher_task.cancel() - - if self.text_generation_task and not self.text_generation_task.done(): - self.text_generation_task.cancel() - - def _check_errors(self): - for task in self._tasks: - if task.done(): - if task.exception(): - self._stored_exception = task.exception() - break - - async def stream(self) -> AsyncIterator[VoiceStreamEvent]: - """Stream the events and audio data as they're generated.""" - while True: - try: - event = await self._queue.get() - except asyncio.CancelledError: - break - if isinstance(event, VoiceStreamEventError): - self._stored_exception = event.error - logger.error(f"Error processing output: {event.error}") - break - if event is None: - break - yield event - if event.type == "voice_stream_event_lifecycle" and event.event == "session_ended": - break - - self._check_errors() - self._cleanup_tasks() - - if self._stored_exception: - raise self._stored_exception diff --git a/src/agents/voice/utils.py b/src/agents/voice/utils.py deleted file mode 100644 index 1535bd0d..00000000 --- a/src/agents/voice/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -import re -from typing import Callable - - -def get_sentence_based_splitter( - min_sentence_length: int = 20, -) -> Callable[[str], tuple[str, str]]: - """Returns a function that splits text into chunks based on sentence boundaries. - - Args: - min_sentence_length: The minimum length of a sentence to be included in a chunk. - - Returns: - A function that splits text into chunks based on sentence boundaries. - """ - - def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]: - """ - A function to split the text into chunks. This is useful if you want to split the text into - chunks before sending it to the TTS model rather than waiting for the whole text to be - processed. - - Args: - text_buffer: The text to split. - - Returns: - A tuple of the text to process and the remaining text buffer. - """ - sentences = re.split(r"(?<=[.!?])\s+", text_buffer.strip()) - if len(sentences) >= 1: - combined_sentences = " ".join(sentences[:-1]) - if len(combined_sentences) >= min_sentence_length: - remaining_text_buffer = sentences[-1] - return combined_sentences, remaining_text_buffer - return "", text_buffer - - return sentence_based_text_splitter diff --git a/src/agents/voice/workflow.py b/src/agents/voice/workflow.py deleted file mode 100644 index c706ec41..00000000 --- a/src/agents/voice/workflow.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import annotations - -import abc -from collections.abc import AsyncIterator -from typing import Any - -from ..agent import Agent -from ..items import TResponseInputItem -from ..result import RunResultStreaming -from ..run import Runner - - -class VoiceWorkflowBase(abc.ABC): - """ - A base class for a voice workflow. You must implement the `run` method. A "workflow" is any - code you want, that receives a transcription and yields text that will be turned into speech - by a text-to-speech model. - In most cases, you'll create `Agent`s and use `Runner.run_streamed()` to run them, returning - some or all of the text events from the stream. You can use the `VoiceWorkflowHelper` class to - help with extracting text events from the stream. - If you have a simple workflow that has a single starting agent and no custom logic, you can - use `SingleAgentVoiceWorkflow` directly. - """ - - @abc.abstractmethod - def run(self, transcription: str) -> AsyncIterator[str]: - """ - Run the voice workflow. You will receive an input transcription, and must yield text that - will be spoken to the user. You can run whatever logic you want here. In most cases, the - final logic will involve calling `Runner.run_streamed()` and yielding any text events from - the stream. - """ - pass - - -class VoiceWorkflowHelper: - @classmethod - async def stream_text_from(cls, result: RunResultStreaming) -> AsyncIterator[str]: - """Wraps a `RunResultStreaming` object and yields text events from the stream.""" - async for event in result.stream_events(): - if ( - event.type == "raw_response_event" - and event.data.type == "response.output_text.delta" - ): - yield event.data.delta - - -class SingleAgentWorkflowCallbacks: - def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None: - """Called when the workflow is run.""" - pass - - -class SingleAgentVoiceWorkflow(VoiceWorkflowBase): - """A simple voice workflow that runs a single agent. Each transcription and result is added to - the input history. - For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic, - custom configs), subclass `VoiceWorkflowBase` and implement your own logic. - """ - - def __init__(self, agent: Agent[Any], callbacks: SingleAgentWorkflowCallbacks | None = None): - """Create a new single agent voice workflow. - - Args: - agent: The agent to run. - callbacks: Optional callbacks to call during the workflow. - """ - self._input_history: list[TResponseInputItem] = [] - self._current_agent = agent - self._callbacks = callbacks - - async def run(self, transcription: str) -> AsyncIterator[str]: - if self._callbacks: - self._callbacks.on_run(self, transcription) - - # Add the transcription to the input history - self._input_history.append( - { - "role": "user", - "content": transcription, - } - ) - - # Run the agent - result = Runner.run_streamed(self._current_agent, self._input_history) - - # Stream the text from the result - async for chunk in VoiceWorkflowHelper.stream_text_from(result): - yield chunk - - # Update the input history and current agent - self._input_history = result.to_input_list() - self._current_agent = result.last_agent diff --git a/src/agents/agent_onboarding.py b/src/app/agent_onboarding.py similarity index 98% rename from src/agents/agent_onboarding.py rename to src/app/agent_onboarding.py index 18eb7aa3..cf65515d 100644 --- a/src/agents/agent_onboarding.py +++ b/src/app/agent_onboarding.py @@ -3,7 +3,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import APIRouter, Request -from agents import Agent, Runner +from openai_agents.agent import Agent, Runner from datetime import datetime import json import httpx diff --git a/src/agents/agent_output.py b/src/app/agent_output.py similarity index 100% rename from src/agents/agent_output.py rename to src/app/agent_output.py diff --git a/src/agents/agent_server.py b/src/app/agent_server.py similarity index 97% rename from src/agents/agent_server.py rename to src/app/agent_server.py index 61a92706..17f856ae 100644 --- a/src/agents/agent_server.py +++ b/src/app/agent_server.py @@ -16,10 +16,8 @@ from .tool import WebSearchTool # ── SDK setup ─────────────────────────────────────────────────────────────── -load_dotenv() -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../openai-agents-python"))) -from agents import Agent, Runner, handoff, RunContextWrapper -from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +from openai_agents.agent import Agent, Runner, handoff, RunContextWrapper +from openai_agents.agent.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL CHAT_URL = os.getenv("BUBBLE_CHAT_URL") diff --git a/src/agents/profilebuilder.py b/src/app/profilebuilder.py similarity index 97% rename from src/agents/profilebuilder.py rename to src/app/profilebuilder.py index 8166e71e..64529b73 100644 --- a/src/agents/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,8 +1,8 @@ # src/agents/profilebuilder.py from agents.profilebuilder_agent import profilebuilder_agent -from agents.util.webhook import send_webhook -from agents.run import Runner +from app.util.webhook import send_webhook +from openai_agents.agent.run import Runner from fastapi import APIRouter, Request, HTTPException import os import json diff --git a/src/agents/profilebuilder_agent.py b/src/app/profilebuilder_agent.py similarity index 100% rename from src/agents/profilebuilder_agent.py rename to src/app/profilebuilder_agent.py From 4a640db772fe9bbc9d035a0daa1617153edaee62 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:04:12 +0000 Subject: [PATCH 198/230] fix: point agent_server to app.profilebuilder_agent --- src/app/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/app/__init__.py diff --git a/src/app/__init__.py b/src/app/__init__.py new file mode 100644 index 00000000..e69de29b From c8cb1171877e397f6b14b5f409aea831aebc79da Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:08:06 +0000 Subject: [PATCH 199/230] updating rerouting post sdk dependency --- ".\"git grep -n -- '*.py' \"from agents.\"" | 127 ++++++++++++++++++++ src/app/agent_server.py | 4 +- 2 files changed, 129 insertions(+), 2 deletions(-) create mode 100644 ".\"git grep -n -- '*.py' \"from agents.\"" diff --git "a/.\"git grep -n -- '*.py' \"from agents.\"" "b/.\"git grep -n -- '*.py' \"from agents.\"" new file mode 100644 index 00000000..d92b872e --- /dev/null +++ "b/.\"git grep -n -- '*.py' \"from agents.\"" @@ -0,0 +1,127 @@ +docs/handoffs.md:88:from agents.extensions import handoff_filters +docs/handoffs.md:106:from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX +docs/ja/handoffs.md:88:from agents.extensions import handoff_filters +docs/ja/handoffs.md:106:from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX +docs/ja/visualization.md:25:from agents.extensions.visualization import draw_graph +docs/ja/voice/quickstart.md:57:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +docs/ja/voice/quickstart.md:94:from agents.voice import SingleAgentVoiceWorkflow, VoicePipeline +docs/ja/voice/quickstart.md:103:from agents.voice import AudioInput +docs/ja/voice/quickstart.md:137:from agents.voice import ( +docs/ja/voice/quickstart.md:142:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +docs/visualization.md:25:from agents.extensions.visualization import draw_graph +docs/voice/quickstart.md:57:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +docs/voice/quickstart.md:94:from agents.voice import SingleAgentVoiceWorkflow, VoicePipeline +docs/voice/quickstart.md:103:from agents.voice import AudioInput +docs/voice/quickstart.md:137:from agents.voice import ( +docs/voice/quickstart.md:142:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +examples/customer_service/main.py:23:from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX +examples/financial_research_agent/agents/search_agent.py:2:from agents.model_settings import ModelSettings +examples/handoffs/message_filter.py:7:from agents.extensions import handoff_filters +examples/handoffs/message_filter_streaming.py:7:from agents.extensions import handoff_filters +examples/mcp/filesystem_example/main.py:6:from agents.mcp import MCPServer, MCPServerStdio +examples/mcp/git_example/main.py:5:from agents.mcp import MCPServer, MCPServerStdio +examples/mcp/sse_example/main.py:9:from agents.mcp import MCPServer, MCPServerSse +examples/mcp/sse_example/main.py:10:from agents.model_settings import ModelSettings +examples/research_bot/agents/search_agent.py:2:from agents.model_settings import ModelSettings +examples/voice/static/main.py:7:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +examples/voice/static/main.py:8:from agents.voice import ( +examples/voice/streamed/main.py:15:from agents.voice import StreamedAudioInput, VoicePipeline +examples/voice/streamed/my_workflow.py:6:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +examples/voice/streamed/my_workflow.py:7:from agents.voice import VoiceWorkflowBase, VoiceWorkflowHelper +src/app/agent_server.py:13:from agents.profilebuilder_agent import profilebuilder_agent +src/app/agent_server.py:14:from agents.profilebuilder import router as profilebuilder_router +src/app/profilebuilder.py:3:from agents.profilebuilder_agent import profilebuilder_agent +tests/conftest.py:5:from agents.models import _openai_shared +tests/conftest.py:6:from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel +tests/conftest.py:7:from agents.models.openai_responses import OpenAIResponsesModel +tests/conftest.py:8:from agents.tracing import set_trace_processors +tests/conftest.py:9:from agents.tracing.setup import GLOBAL_TRACE_PROVIDER +tests/fake_model.py:8:from agents.agent_output import AgentOutputSchema +tests/fake_model.py:9:from agents.handoffs import Handoff +tests/fake_model.py:10:from agents.items import ( +tests/fake_model.py:16:from agents.model_settings import ModelSettings +tests/fake_model.py:17:from agents.models.interface import Model, ModelTracing +tests/fake_model.py:18:from agents.tool import Tool +tests/fake_model.py:19:from agents.tracing import SpanError, generation_span +tests/fake_model.py:20:from agents.usage import Usage +tests/mcp/helpers.py:8:from agents.mcp import MCPServer +tests/mcp/test_caching.py:6:from agents.mcp import MCPServerStdio +tests/mcp/test_connect_disconnect.py:6:from agents.mcp import MCPServerStdio +tests/mcp/test_mcp_util.py:10:from agents.exceptions import AgentsException, ModelBehaviorError +tests/mcp/test_mcp_util.py:11:from agents.mcp import MCPServer, MCPUtil +tests/mcp/test_server_errors.py:3:from agents.exceptions import UserError +tests/mcp/test_server_errors.py:4:from agents.mcp.server import _MCPServerWithClientSession +tests/test_agent_hooks.py:10:from agents.agent import Agent +tests/test_agent_hooks.py:11:from agents.lifecycle import AgentHooks +tests/test_agent_hooks.py:12:from agents.run import Runner +tests/test_agent_hooks.py:13:from agents.run_context import RunContextWrapper, TContext +tests/test_agent_hooks.py:14:from agents.tool import Tool +tests/test_agent_runner.py:26:from agents.agent import ToolsToFinalOutputResult +tests/test_agent_runner.py:27:from agents.tool import FunctionToolResult, function_tool +tests/test_agent_runner_streamed.py:23:from agents.items import RunItem +tests/test_agent_runner_streamed.py:24:from agents.run import RunConfig +tests/test_agent_runner_streamed.py:25:from agents.stream_events import AgentUpdatedStreamEvent +tests/test_computer_action.py:34:from agents._run_impl import ComputerAction, ToolRunComputerAction +tests/test_computer_action.py:35:from agents.items import ToolCallOutputItem +tests/test_config.py:7:from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel +tests/test_config.py:8:from agents.models.openai_provider import OpenAIProvider +tests/test_config.py:9:from agents.models.openai_responses import OpenAIResponsesModel +tests/test_doc_parsing.py:1:from agents.function_schema import generate_func_documentation +tests/test_extension_filters.py:4:from agents.extensions.handoff_filters import remove_all_tools +tests/test_extension_filters.py:5:from agents.items import ( +tests/test_function_schema.py:10:from agents.exceptions import UserError +tests/test_function_schema.py:11:from agents.function_schema import function_schema +tests/test_function_tool.py:9:from agents.tool import default_tool_error_function +tests/test_function_tool_decorator.py:9:from agents.run_context import RunContextWrapper +tests/test_guardrails.py:16:from agents.guardrail import input_guardrail, output_guardrail +tests/test_openai_chatcompletions.py:33:from agents.models.fake_id import FAKE_RESPONSES_ID +tests/test_openai_chatcompletions.py:34:from agents.models.openai_chatcompletions import _Converter +tests/test_openai_chatcompletions_converter.py:41:from agents.agent_output import AgentOutputSchema +tests/test_openai_chatcompletions_converter.py:42:from agents.exceptions import UserError +tests/test_openai_chatcompletions_converter.py:43:from agents.items import TResponseInputItem +tests/test_openai_chatcompletions_converter.py:44:from agents.models.fake_id import FAKE_RESPONSES_ID +tests/test_openai_chatcompletions_converter.py:45:from agents.models.openai_chatcompletions import _Converter +tests/test_openai_chatcompletions_stream.py:20:from agents.model_settings import ModelSettings +tests/test_openai_chatcompletions_stream.py:21:from agents.models.interface import ModelTracing +tests/test_openai_chatcompletions_stream.py:22:from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel +tests/test_openai_chatcompletions_stream.py:23:from agents.models.openai_provider import OpenAIProvider +tests/test_openai_responses_converter.py:43:from agents.models.openai_responses import Converter +tests/test_output_tool.py:8:from agents.agent_output import _WRAPPER_DICT_KEY +tests/test_output_tool.py:9:from agents.util import _json +tests/test_pretty_print.py:8:from agents.agent_output import _WRAPPER_DICT_KEY +tests/test_pretty_print.py:9:from agents.util._pretty_print import pretty_print_result, pretty_print_run_result_streaming +tests/test_responses_tracing.py:7:from agents.tracing.span_data import ResponseSpanData +tests/test_run_config.py:6:from agents.models.interface import Model, ModelProvider +tests/test_run_step_execution.py:22:from agents._run_impl import ( +tests/test_run_step_processing.py:26:from agents._run_impl import RunImpl +tests/test_strict_schema.py:3:from agents.exceptions import UserError +tests/test_strict_schema.py:4:from agents.strict_schema import ensure_strict_json_schema +tests/test_tool_choice_reset.py:4:from agents._run_impl import AgentToolUseTracker, RunImpl +tests/test_tool_converter.py:5:from agents.exceptions import UserError +tests/test_tool_converter.py:6:from agents.models.openai_chatcompletions import ToolConverter +tests/test_tool_converter.py:7:from agents.tool import FileSearchTool, WebSearchTool +tests/test_tool_use_behavior.py:19:from agents._run_impl import RunImpl +tests/test_trace_processor.py:8:from agents.tracing.processor_interface import TracingProcessor +tests/test_trace_processor.py:9:from agents.tracing.processors import BackendSpanExporter, BatchTraceProcessor +tests/test_trace_processor.py:10:from agents.tracing.span_data import AgentSpanData +tests/test_trace_processor.py:11:from agents.tracing.spans import SpanImpl +tests/test_trace_processor.py:12:from agents.tracing.traces import TraceImpl +tests/test_tracing.py:9:from agents.tracing import ( +tests/test_tracing.py:19:from agents.tracing.spans import SpanError +tests/test_visualization.py:7:from agents.extensions.visualization import ( +tests/test_visualization.py:13:from agents.handoffs import Handoff +tests/testing_processor.py:7:from agents.tracing import Span, Trace, TracingProcessor +tests/tracing/test_processor_api_key.py:3:from agents.tracing.processors import BackendSpanExporter +tests/voice/fake_models.py:10: from agents.voice import ( +tests/voice/helpers.py:2: from agents.voice import StreamedAudioResult +tests/voice/test_input.py:9: from agents.voice import AudioInput, StreamedAudioInput +tests/voice/test_input.py:10: from agents.voice.input import DEFAULT_SAMPLE_RATE, _buffer_to_audio_file +tests/voice/test_openai_stt.py:12: from agents.voice import OpenAISTTTranscriptionSession, StreamedAudioInput, STTModelSettings +tests/voice/test_openai_stt.py:13: from agents.voice.exceptions import STTWebsocketConnectionError +tests/voice/test_openai_stt.py:14: from agents.voice.models.openai_stt import EVENT_INACTIVITY_TIMEOUT +tests/voice/test_openai_tts.py:9: from agents.voice import OpenAITTSModel, TTSModelSettings +tests/voice/test_pipeline.py:8: from agents.voice import AudioInput, TTSModelSettings, VoicePipeline, VoicePipelineConfig +tests/voice/test_workflow.py:12:from agents.agent_output import AgentOutputSchema +tests/voice/test_workflow.py:13:from agents.handoffs import Handoff +tests/voice/test_workflow.py:14:from agents.items import ( +tests/voice/test_workflow.py:22: from agents.voice import SingleAgentVoiceWorkflow diff --git a/src/app/agent_server.py b/src/app/agent_server.py index 17f856ae..61cce209 100644 --- a/src/app/agent_server.py +++ b/src/app/agent_server.py @@ -10,8 +10,8 @@ from fastapi import FastAPI, Request, HTTPException from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel -from agents.profilebuilder_agent import profilebuilder_agent -from agents.profilebuilder import router as profilebuilder_router +from app.profilebuilder_agent import profilebuilder_agent +from app.profilebuilder import router as profilebuilder_router from .tool import WebSearchTool From 9684338b309c089588e79859585a6371b79e00fd Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:16:23 +0000 Subject: [PATCH 200/230] deps: use PyPI openai-agents & update paths --- pyproject.toml | 84 ++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 47 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2f1e7cc0..38b711b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,25 +1,27 @@ [project] name = "rightnow-agent-app" version = "0.0.9" -description = "OpenAI Agents SDK" +description = "OpenAI Agents SDK demo app" readme = "README.md" requires-python = ">=3.9" license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] + dependencies = [ + # ---------- External deps ---------- "openai>=1.66.5", - "openai-agents @ git+https://github.com/openai/openai-agents-python.git", - "pydantic>=2.10, <3", - "griffe>=1.5.6, <2", - "typing-extensions>=4.12.2, <5", - "requests>=2.0, <3", - "types-requests>=2.0, <3", - "mcp>=1.6.0, <2; python_version >= '3.10'", + "openai-agents>=0.0.15", # ← from PyPI, no more git install + "pydantic>=2.10,<3", + "griffe>=1.5.6,<2", + "typing-extensions>=4.12.2,<5", + "requests>=2.0,<3", + "types-requests>=2.0,<3", + "mcp>=1.6.0,<2; python_version >= '3.10'", "mkdocs-static-i18n>=1.3.0", "fastapi>=0.110.0", "uvicorn>=0.34.0", - ] + classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", @@ -28,20 +30,22 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", - "Intended Audience :: Developers", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules", "License :: OSI Approved :: MIT License", ] [project.urls] -Homepage = "https://github.com/openai/openai-agents-python" +Homepage = "https://github.com/openai/openai-agents-python" Repository = "https://github.com/openai/openai-agents-python" [project.optional-dependencies] -voice = ["numpy>=2.2.0, <3; python_version>='3.10'", "websockets>=15.0, <16"] -viz = ["graphviz>=0.17"] +voice = ["numpy>=2.2.0,<3; python_version>='3.10'", "websockets>=15.0,<16"] +viz = ["graphviz>=0.17"] +# ----------------------------------------------------------------- +# Dev / tooling +# ----------------------------------------------------------------- [dependency-groups] dev = [ "mypy", @@ -65,35 +69,26 @@ dev = [ "graphviz", ] -[tool.uv.workspace] -members = ["agents"] - -[tool.uv.sources] -agents = { workspace = true } - +# ----------------------------------------------------------------- +# Build configuration +# ----------------------------------------------------------------- [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] -packages = ["src/agents"] - +packages = ["src/app"] # ← path updated +# ----------------------------------------------------------------- +# Tool configs +# ----------------------------------------------------------------- [tool.ruff] -line-length = 100 -target-version = "py39" +line-length = 100 +target-version = "py39" [tool.ruff.lint] -select = [ - "E", # pycodestyle errors - "W", # pycodestyle warnings - "F", # pyflakes - "I", # isort - "B", # flake8-bugbear - "C4", # flake8-comprehensions - "UP", # pyupgrade -] -isort = { combine-as-imports = true, known-first-party = ["agents"] } +select = ["E", "W", "F", "I", "B", "C4", "UP"] +isort = { combine-as-imports = true, known-first-party = ["app"] } # ← updated [tool.ruff.lint.pydocstyle] convention = "google" @@ -102,23 +97,22 @@ convention = "google" "examples/**/*.py" = ["E501"] [tool.mypy] -strict = true -disallow_incomplete_defs = false -disallow_untyped_defs = false -disallow_untyped_calls = false +strict = true +disallow_incomplete_defs = false +disallow_untyped_defs = false +disallow_untyped_calls = false [[tool.mypy.overrides]] module = "sounddevice.*" ignore_missing_imports = true [tool.coverage.run] -source = ["tests", "src/agents"] +source = ["tests", "src/app"] # ← updated [tool.coverage.report] show_missing = true -sort = "-Cover" -exclude_also = [ - # This is only executed while typechecking +sort = "-Cover" +exclude_also = [ "if TYPE_CHECKING:", "@abc.abstractmethod", "raise NotImplementedError", @@ -126,11 +120,10 @@ exclude_also = [ ] [tool.pytest.ini_options] -asyncio_mode = "auto" +asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" filterwarnings = [ - # This is a warning that is expected to happen: we have an async filter that raises an exception - "ignore:coroutine 'test_async_input_filter_fails..invalid_input_filter' was never awaited:RuntimeWarning", + "ignore:coroutine .* was never awaited:RuntimeWarning", ] markers = [ "allow_call_model_methods: mark test as allowing calls to real model implementations", @@ -138,6 +131,3 @@ markers = [ [tool.inline-snapshot] format-command = "ruff format --stdin-filename {filename}" - -[tool.hatch.metadata] -allow-direct-references = true From 085d44818e6e69c1971633ee48e9aa7a67b1f3e4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:21:19 +0000 Subject: [PATCH 201/230] deps: use PyPI openai-agents & update paths --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 38b711b8..109da6d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ # ---------- External deps ---------- "openai>=1.66.5", - "openai-agents>=0.0.15", # ← from PyPI, no more git install + "openai-agents>=0.0.14,<0.1.0", # ← from PyPI, no more git install "pydantic>=2.10,<3", "griffe>=1.5.6,<2", "typing-extensions>=4.12.2,<5", From dbb3b20dd2fe8ee9e91e7c626c031af987b492f9 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:29:07 +0000 Subject: [PATCH 202/230] fix: pin openai-agents and correct import path --- pyproject.toml | 2 +- src/app/agent_onboarding.py | 2 +- src/app/agent_server.py | 4 ++-- src/app/profilebuilder.py | 2 +- src/app/profilebuilder_agent.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 109da6d3..f94287ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ # ---------- External deps ---------- "openai>=1.66.5", - "openai-agents>=0.0.14,<0.1.0", # ← from PyPI, no more git install + "openai-agents>=0.0.14", # ← from PyPI, no more git install "pydantic>=2.10,<3", "griffe>=1.5.6,<2", "typing-extensions>=4.12.2,<5", diff --git a/src/app/agent_onboarding.py b/src/app/agent_onboarding.py index cf65515d..230d17c4 100644 --- a/src/app/agent_onboarding.py +++ b/src/app/agent_onboarding.py @@ -3,7 +3,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import APIRouter, Request -from openai_agents.agent import Agent, Runner +from openai_agents import Agent, Runner from datetime import datetime import json import httpx diff --git a/src/app/agent_server.py b/src/app/agent_server.py index 61cce209..f13b48b0 100644 --- a/src/app/agent_server.py +++ b/src/app/agent_server.py @@ -16,8 +16,8 @@ from .tool import WebSearchTool # ── SDK setup ─────────────────────────────────────────────────────────────── -from openai_agents.agent import Agent, Runner, handoff, RunContextWrapper -from openai_agents.agent.extensions.handoff_prompt import prompt_with_handoff_instructions +from openai_agents import Agent, Runner, handoff, RunContextWrapper +from openai_agents.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL CHAT_URL = os.getenv("BUBBLE_CHAT_URL") diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 64529b73..6be5d87e 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -2,7 +2,7 @@ from agents.profilebuilder_agent import profilebuilder_agent from app.util.webhook import send_webhook -from openai_agents.agent.run import Runner +from openai_agents.run import Runner from fastapi import APIRouter, Request, HTTPException import os import json diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index fb590707..5645cd1b 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from openai_agents.agent import Agent +from openai_agents import Agent from openai_agents.guardrails import ( output_guardrail, GuardrailFunctionOutput, From e3e1575245e30fb4b76fb58c261fe32fdc30fb6a Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:32:34 +0000 Subject: [PATCH 203/230] fix: pin openai-agents and correct import path --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f94287ae..cbb184c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ # ---------- External deps ---------- "openai>=1.66.5", - "openai-agents>=0.0.14", # ← from PyPI, no more git install + "openai-agents==0.0.14", # ← from PyPI, no more git install "pydantic>=2.10,<3", "griffe>=1.5.6,<2", "typing-extensions>=4.12.2,<5", From 9e97058419516fe086260efe4345f7e5b8bceb07 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:37:12 +0000 Subject: [PATCH 204/230] chore: remove local openai_agents shadow package --- .gitignore | 2 +- .gitmodules | 3 --- openai_agents | 1 - 3 files changed, 1 insertion(+), 5 deletions(-) delete mode 160000 openai_agents diff --git a/.gitignore b/.gitignore index 7dd22b88..dc79c72b 100644 --- a/.gitignore +++ b/.gitignore @@ -141,4 +141,4 @@ cython_debug/ .ruff_cache/ # PyPI configuration file -.pypirc \ No newline at end of file +.pypircopenai_agents/ diff --git a/.gitmodules b/.gitmodules index 83e509ef..d1f194a8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "openai_agents"] - path = openai_agents - url = https://github.com/openai/openai-agents-python.git [submodule "vendor/openai-agents-python"] path = vendor/openai-agents-python url = https://github.com/openai/openai-agents-python.git diff --git a/openai_agents b/openai_agents deleted file mode 160000 index f9763495..00000000 --- a/openai_agents +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f9763495b86afcf0c421451a92200e1141fa8dcb From c75dd6014a7ca18e07640b66492c443d07f8ec92 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 04:44:10 +0000 Subject: [PATCH 205/230] chore: remove local openai_agents shadow dir --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index dc79c72b..34252901 100644 --- a/.gitignore +++ b/.gitignore @@ -142,3 +142,4 @@ cython_debug/ # PyPI configuration file .pypircopenai_agents/ +openai_agents/ From ec2e6ac5b72784412b4f7b51d81aa3bcdc289d8b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:00:16 +0000 Subject: [PATCH 206/230] chore: use correct 'agents' import path --- src/app/agent_onboarding.py | 2 +- src/app/agent_server.py | 4 ++-- src/app/profilebuilder.py | 2 +- src/app/profilebuilder_agent.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/app/agent_onboarding.py b/src/app/agent_onboarding.py index 230d17c4..18eb7aa3 100644 --- a/src/app/agent_onboarding.py +++ b/src/app/agent_onboarding.py @@ -3,7 +3,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from fastapi import APIRouter, Request -from openai_agents import Agent, Runner +from agents import Agent, Runner from datetime import datetime import json import httpx diff --git a/src/app/agent_server.py b/src/app/agent_server.py index f13b48b0..2b9f4db3 100644 --- a/src/app/agent_server.py +++ b/src/app/agent_server.py @@ -16,8 +16,8 @@ from .tool import WebSearchTool # ── SDK setup ─────────────────────────────────────────────────────────────── -from openai_agents import Agent, Runner, handoff, RunContextWrapper -from openai_agents.extensions.handoff_prompt import prompt_with_handoff_instructions +from agents import Agent, Runner, handoff, RunContextWrapper +from agents.extensions.handoff_prompt import prompt_with_handoff_instructions # ── Environment variable for Bubble webhook URL CHAT_URL = os.getenv("BUBBLE_CHAT_URL") diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 6be5d87e..780ff162 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -2,7 +2,7 @@ from agents.profilebuilder_agent import profilebuilder_agent from app.util.webhook import send_webhook -from openai_agents.run import Runner +from agents.run import Runner from fastapi import APIRouter, Request, HTTPException import os import json diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index 5645cd1b..56a8bc08 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -1,8 +1,8 @@ # src/agents/profilebuilder_agent.py # ---------------------------------- -from openai_agents import Agent -from openai_agents.guardrails import ( +from agents import Agent +from agents.guardrails import ( output_guardrail, GuardrailFunctionOutput, ) From 433a18ead2af820c7ec6f3e06c154236ef29fc26 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:07:54 +0000 Subject: [PATCH 207/230] chore: use correct 'agents' import path --- src/app/profilebuilder.py | 2 +- src/app/profilebuilder_agent.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 780ff162..7ed9f205 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,6 +1,6 @@ # src/agents/profilebuilder.py -from agents.profilebuilder_agent import profilebuilder_agent +from app.profilebuilder_agent import profilebuilder_agent from app.util.webhook import send_webhook from agents.run import Runner from fastapi import APIRouter, Request, HTTPException diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index 56a8bc08..02a40009 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -2,10 +2,7 @@ # ---------------------------------- from agents import Agent -from agents.guardrails import ( - output_guardrail, - GuardrailFunctionOutput, -) +from agents import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut, ClarificationOut From 3c35dc7e08b73c35035dd3d442f3a7afed973bd3 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:15:41 +0000 Subject: [PATCH 208/230] chore: use correct 'agents' import path --- src/app/exceptions.py | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 src/app/exceptions.py diff --git a/src/app/exceptions.py b/src/app/exceptions.py new file mode 100644 index 00000000..ded6b392 --- /dev/null +++ b/src/app/exceptions.py @@ -0,0 +1,2 @@ +# src/app/exceptions.py +from openai_agents.exceptions import ModelBehaviorError, UserError From 73d9f7b8117bc32b78e7c171608accdc71898e76 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:24:10 +0000 Subject: [PATCH 209/230] add exceptions shim for ModelBehaviorError/UserError --- src/app/{__init__.py => __init__.py.py} | 0 src/app/exceptions.py | 10 ++++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) rename src/app/{__init__.py => __init__.py.py} (100%) diff --git a/src/app/__init__.py b/src/app/__init__.py.py similarity index 100% rename from src/app/__init__.py rename to src/app/__init__.py.py diff --git a/src/app/exceptions.py b/src/app/exceptions.py index ded6b392..211a5c48 100644 --- a/src/app/exceptions.py +++ b/src/app/exceptions.py @@ -1,2 +1,8 @@ -# src/app/exceptions.py -from openai_agents.exceptions import ModelBehaviorError, UserError +""" +Thin re-export layer so the rest of our code can just do + from .exceptions import ModelBehaviorError, UserError +without depending on the SDK’s namespace directly. +""" +from openai_agents.exceptions import ModelBehaviorError, UserError # type: ignore + +__all__ = ["ModelBehaviorError", "UserError"] From 14476899bc59c81c4a6fbe625b83329609743323 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:34:03 +0000 Subject: [PATCH 210/230] add exceptions shim for ModelBehaviorError/UserError --- src/app/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/exceptions.py b/src/app/exceptions.py index 211a5c48..2020ea2d 100644 --- a/src/app/exceptions.py +++ b/src/app/exceptions.py @@ -3,6 +3,6 @@ from .exceptions import ModelBehaviorError, UserError without depending on the SDK’s namespace directly. """ -from openai_agents.exceptions import ModelBehaviorError, UserError # type: ignore +from agents.exceptions import ModelBehaviorError, UserError # type: ignore __all__ = ["ModelBehaviorError", "UserError"] From e1b2a5c062fcfd77ddceb64c992e157c8b903f18 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:39:12 +0000 Subject: [PATCH 211/230] add exceptions shim for ModelBehaviorError/UserError --- src/app/agent_output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/agent_output.py b/src/app/agent_output.py index 0b104d7c..d3becd1c 100644 --- a/src/app/agent_output.py +++ b/src/app/agent_output.py @@ -5,7 +5,7 @@ from typing_extensions import TypedDict, get_args, get_origin from .exceptions import ModelBehaviorError, UserError -from .strict_schema import ensure_strict_json_schema +from agents.strict_schema import ensure_strict_json_schema from .tracing import SpanError from .util import _error_tracing, _json From da0bfbe3ea66ffbadf98bd3d04079c589e3d34bf Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:42:21 +0000 Subject: [PATCH 212/230] add exceptions shim for ModelBehaviorError/UserError --- src/app/agent_output.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/app/agent_output.py b/src/app/agent_output.py index d3becd1c..8a27fcf9 100644 --- a/src/app/agent_output.py +++ b/src/app/agent_output.py @@ -6,8 +6,8 @@ from .exceptions import ModelBehaviorError, UserError from agents.strict_schema import ensure_strict_json_schema -from .tracing import SpanError -from .util import _error_tracing, _json +from agents.tracing import SpanError +from agents.util import _error_tracing, _json _WRAPPER_DICT_KEY = "response" From 98d76e01fff3bf32a6766d87f3779b5d4378f52b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 05:53:32 +0000 Subject: [PATCH 213/230] add exceptions shim for ModelBehaviorError/UserError --- src/app/profilebuilder_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index 02a40009..ac323788 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -13,7 +13,7 @@ "Return ONLY a JSON object matching ProfileFieldOut OR ClarificationOut." ), output_type=ProfileFieldOut, - alternate_output_types=[ClarificationOut], + alternate_output_schemas=[ClarificationOut], ) @output_guardrail From ef37752791680f1c31fc9a3e8ac08fb8368367c4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 06:01:04 +0000 Subject: [PATCH 214/230] add exceptions shim for ModelBehaviorError/UserError --- src/app/profilebuilder_agent.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index ac323788..215fb7d6 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -1,24 +1,23 @@ -# src/agents/profilebuilder_agent.py -# ---------------------------------- +# src/app/profilebuilder_agent.py +# ------------------------------- -from agents import Agent -from agents import output_guardrail, GuardrailFunctionOutput +from openai_agents import Agent +from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput -from .agent_output import ProfileFieldOut, ClarificationOut +from .agent_output import ProfileFieldOut profile_builder = Agent( - name="Profile‑builder", + name="Profile-builder", instructions=( "Collect ONE profile field at a time from the user.\n" - "Return ONLY a JSON object matching ProfileFieldOut OR ClarificationOut." + "Return ONLY a JSON object matching the ProfileFieldOut schema." ), output_type=ProfileFieldOut, - alternate_output_schemas=[ClarificationOut], ) @output_guardrail async def schema_guardrail(ctx, agent, llm_output): - # If JSON parsed into one of the declared types, we're good. + # If the JSON parsed into ProfileFieldOut we’re good. return GuardrailFunctionOutput("schema_ok", tripwire_triggered=False) profile_builder.output_guardrails = [schema_guardrail] From 042ce780056949142cf5c95ecd9910f858e72a0b Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 06:06:51 +0000 Subject: [PATCH 215/230] update profilebuilder --- src/app/profilebuilder_agent.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index 215fb7d6..d999e097 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -1,12 +1,13 @@ # src/app/profilebuilder_agent.py # ------------------------------- -from openai_agents import Agent -from openai_agents.guardrails import output_guardrail, GuardrailFunctionOutput +from agents import Agent # ← correct package name +from agents.guardrails import output_guardrail, GuardrailFunctionOutput from .agent_output import ProfileFieldOut -profile_builder = Agent( + +profilebuilder_agent = Agent( # exported under this name name="Profile-builder", instructions=( "Collect ONE profile field at a time from the user.\n" @@ -15,9 +16,11 @@ output_type=ProfileFieldOut, ) + @output_guardrail async def schema_guardrail(ctx, agent, llm_output): # If the JSON parsed into ProfileFieldOut we’re good. return GuardrailFunctionOutput("schema_ok", tripwire_triggered=False) -profile_builder.output_guardrails = [schema_guardrail] + +profilebuilder_agent.output_guardrails = [schema_guardrail] From 486b4c85dfbfa4905169bb22b598c795d5a103b4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 06:12:33 +0000 Subject: [PATCH 216/230] update profilebuilder --- src/app/profilebuilder_agent.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index d999e097..92a517cb 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -1,8 +1,7 @@ # src/app/profilebuilder_agent.py # ------------------------------- -from agents import Agent # ← correct package name -from agents.guardrails import output_guardrail, GuardrailFunctionOutput +from agents import Agent, output_guardrail, GuardrailFunctionOutput # ← single import line from .agent_output import ProfileFieldOut From abb03faaa2aefba1c886863d67278cf1e7a2471c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 06:18:38 +0000 Subject: [PATCH 217/230] update profilebuilder --- src/app/profilebuilder.py | 2 +- src/app/util/__init__.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 src/app/util/__init__.py diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 7ed9f205..06c1700d 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,7 +1,7 @@ # src/agents/profilebuilder.py from app.profilebuilder_agent import profilebuilder_agent -from app.util.webhook import send_webhook +from .util.webhook import send_webhook from agents.run import Runner from fastapi import APIRouter, Request, HTTPException import os diff --git a/src/app/util/__init__.py b/src/app/util/__init__.py new file mode 100644 index 00000000..e69de29b From de53e4182f7072cc5ce3dd3b60612b2b5ec2ed73 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 06:34:33 +0000 Subject: [PATCH 218/230] added webhook.py under app,util --- src/app/util/webhook.py | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 src/app/util/webhook.py diff --git a/src/app/util/webhook.py b/src/app/util/webhook.py new file mode 100644 index 00000000..72e36954 --- /dev/null +++ b/src/app/util/webhook.py @@ -0,0 +1,51 @@ +"""utils/webhook.py +A single, reusable helper for posting JSON payloads to Bubble‑workflow URLs. + +Usage in your FastAPI code: + + from agents.utils.webhook import send_webhook + + url = TASK_URL_MAP[task_type] # looked up from env‑vars + await send_webhook(url, flattened_payload) + +You keep *all* Bubble‑specific routing logic (task_type → URL) in your +FastAPI service, while this helper focuses solely on safe, idempotent +HTTP posting and basic allow‑list protection. +""" +from __future__ import annotations + +import os +import json +import httpx +from typing import Any, Mapping + +# ----------------------------------------------------------------------------- +# Configuration +# ----------------------------------------------------------------------------- +# Only allow POSTs to URLs that start with one of these roots (prevents exfiltration) +ALLOWED_ROOTS = os.getenv("BUBBLE_DOMAIN_ROOTS", "https://rgtnow.com").split(",") + +# Optional default timeout (seconds) for outbound webhook calls. +HTTP_TIMEOUT = float(os.getenv("WEBHOOK_TIMEOUT", "10")) + +# ----------------------------------------------------------------------------- +# Public helper +# ----------------------------------------------------------------------------- +async def send_webhook(target_url: str, payload: Mapping[str, Any]) -> None: + """POST *payload* as JSON to *target_url*. + + Raises: + ValueError: if *target_url* is outside the allowed Bubble domain roots. + httpx.HTTPStatusError: if Bubble responds with an error status code. + """ + if not any(target_url.startswith(root.strip()) for root in ALLOWED_ROOTS): + raise ValueError( + f"Refusing to POST to {target_url!r} — must begin with one of {ALLOWED_ROOTS!r}" + ) + + async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client: + print("=== Webhook Dispatch →", target_url, "===\n", + json.dumps(payload, indent=2, default=str)) + resp = await client.post(target_url, json=payload) + resp.raise_for_status() + return None From 3202d3cc29d22b3003c1c1d087f5f52925f0d4da Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 06:37:54 +0000 Subject: [PATCH 219/230] update agent_server tool import --- src/app/agent_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/agent_server.py b/src/app/agent_server.py index 2b9f4db3..312574b6 100644 --- a/src/app/agent_server.py +++ b/src/app/agent_server.py @@ -13,7 +13,7 @@ from app.profilebuilder_agent import profilebuilder_agent from app.profilebuilder import router as profilebuilder_router -from .tool import WebSearchTool +from agents.tool import WebSearchTool # ── SDK setup ─────────────────────────────────────────────────────────────── from agents import Agent, Runner, handoff, RunContextWrapper From 6946c719bb04582b5c171f8033ab884277e27ba4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 16:36:53 +0900 Subject: [PATCH 220/230] Update profilebuilder.py --- src/app/profilebuilder.py | 125 +++++++++++++++++++------------------- 1 file changed, 62 insertions(+), 63 deletions(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 06c1700d..a78b851e 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,87 +1,86 @@ -# src/agents/profilebuilder.py +# ────────────────────────────────────────────────────────────── +# src/app/profilebuilder.py (← keep the same path as before) +# ────────────────────────────────────────────────────────────── -from app.profilebuilder_agent import profilebuilder_agent -from .util.webhook import send_webhook -from agents.run import Runner from fastapi import APIRouter, Request, HTTPException -import os -import json +from pydantic import BaseModel from datetime import datetime +import json +import os + +from app.profilebuilder_agent import profilebuilder_agent # the Agent +from app.util.webhook import send_webhook # Bubble helper router = APIRouter() PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") -CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") +CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") + @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): - data = await req.json() - + # ------------------------------------------------------------------ # + # 1. Validate inbound payload from Bubble # + # ------------------------------------------------------------------ # + data = await req.json() task_id = data.get("task_id") user_id = data.get("user_id") - prompt = data.get("prompt") or data.get("user_prompt") or data.get("message") + prompt = data.get("prompt") or data.get("user_prompt") or data.get("message") if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # 1. Run the ProfileBuilder agent - result = await Runner.run( - profilebuilder_agent, - input=prompt, - context={"task_id": task_id, "user_id": user_id}, - max_turns=3, - ) - - raw_output = result.final_output.strip() - - # 2. Parse JSON output from agent - try: - if not raw_output or not raw_output.startswith("{"): - raise ValueError("Agent output is not valid JSON or is empty") - - field_update = json.loads(raw_output) - - if not isinstance(field_update, dict) or len(field_update) != 1: - raise ValueError("Agent must output a single-field JSON object") - - reason = "profile_partial" - except (json.JSONDecodeError, ValueError) as e: - raise HTTPException(500, f"Agent output invalid: {e}") - - # 3. Webhook to Profile DB only if not a clarification prompt - if "clarification_prompt" not in field_update: - profile_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "profile_partial", - "message_content": field_update, - "metadata_reason": reason, - "created_at": datetime.utcnow().isoformat(), - } - - if not PROFILE_WEBHOOK_URL: - raise RuntimeError("Missing PROFILE_WEBHOOK_URL") - - await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) - - # 4. Webhook to Chat UI for the next clarification prompt - next_prompt = result.next_prompt if hasattr(result, "next_prompt") else None + # ------------------------------------------------------------------ # + # 2. Run the agent and normalise its output # + # ------------------------------------------------------------------ # + result = await profilebuilder_agent.run(prompt) + + # `result` can be a Pydantic model (ProfileFieldOut / ClarificationOut) + # or – if something odd happens – a raw string. Convert to dict ↓ + if isinstance(result, BaseModel): + result_dict = result.model_dump() + else: + try: + result_dict = json.loads(result.strip()) + except Exception: + raise HTTPException(500, "Agent returned unparsable output") + + # ------------------------------------------------------------------ # + # 3. Decide what kind of message it is # + # ------------------------------------------------------------------ # + is_clarification = "clarification_prompt" in result_dict + created_at = datetime.utcnow().isoformat() + + if is_clarification: + # ----- send follow-up prompt to chat UI ----------------------- # + if not CHAT_WEBHOOK_URL: + raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL env var") - if next_prompt: chat_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "text", - "message_content": next_prompt, + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": result_dict["clarification_prompt"], "metadata_reason": "follow_up", - "created_at": datetime.utcnow().isoformat(), + "created_at": created_at, } + await send_webhook(CHAT_WEBHOOK_URL, chat_payload) - if not CHAT_WEBHOOK_URL: - raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL") + else: + # ----- send partial profile field to DB ----------------------- # + if not PROFILE_WEBHOOK_URL: + raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") - await send_webhook(CHAT_WEBHOOK_URL, chat_payload) + profile_payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "profile_partial", + "message_content": result_dict, # the single-field dict + "metadata_reason": "profile_partial", + "created_at": created_at, + } + await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) return {"ok": True} From 721d8af0286c6f2c637082e5b91c3e3236affd47 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 16:44:10 +0900 Subject: [PATCH 221/230] Update profilebuilder.py --- src/app/profilebuilder.py | 108 ++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 50 deletions(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index a78b851e..1eaaa62b 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,74 +1,63 @@ # ────────────────────────────────────────────────────────────── -# src/app/profilebuilder.py (← keep the same path as before) +# src/app/profilebuilder.py # ────────────────────────────────────────────────────────────── from fastapi import APIRouter, Request, HTTPException from pydantic import BaseModel from datetime import datetime -import json -import os +import json, os -from app.profilebuilder_agent import profilebuilder_agent # the Agent -from app.util.webhook import send_webhook # Bubble helper +from app.profilebuilder_agent import profilebuilder_agent +from app.util.webhook import send_webhook router = APIRouter() PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") +# ------------------------------------------------------------------ # +# Helper: convert any agent output to a plain dict # +# ------------------------------------------------------------------ # +def to_dict(agent_output): + if isinstance(agent_output, BaseModel): + return agent_output.model_dump() + if isinstance(agent_output, (bytes, bytearray)): + agent_output = agent_output.decode() + if isinstance(agent_output, str): + agent_output = agent_output.strip() + if agent_output.startswith("{"): + return json.loads(agent_output) + raise ValueError("Unable to parse agent output") +# ------------------------------------------------------------------ # +# POST /profilebuilder # +# ------------------------------------------------------------------ # @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): - # ------------------------------------------------------------------ # - # 1. Validate inbound payload from Bubble # - # ------------------------------------------------------------------ # - data = await req.json() - task_id = data.get("task_id") - user_id = data.get("user_id") - prompt = data.get("prompt") or data.get("user_prompt") or data.get("message") + body = await req.json() + task_id = body.get("task_id") + user_id = body.get("user_id") + prompt = body.get("prompt") or body.get("user_prompt") or body.get("message") if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # ------------------------------------------------------------------ # - # 2. Run the agent and normalise its output # - # ------------------------------------------------------------------ # - result = await profilebuilder_agent.run(prompt) - - # `result` can be a Pydantic model (ProfileFieldOut / ClarificationOut) - # or – if something odd happens – a raw string. Convert to dict ↓ - if isinstance(result, BaseModel): - result_dict = result.model_dump() - else: - try: - result_dict = json.loads(result.strip()) - except Exception: - raise HTTPException(500, "Agent returned unparsable output") - - # ------------------------------------------------------------------ # - # 3. Decide what kind of message it is # - # ------------------------------------------------------------------ # - is_clarification = "clarification_prompt" in result_dict - created_at = datetime.utcnow().isoformat() - - if is_clarification: - # ----- send follow-up prompt to chat UI ----------------------- # - if not CHAT_WEBHOOK_URL: - raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL env var") + # 1. ── Run the agent ──────────────────────────────────────────── + agent_raw = await profilebuilder_agent.run(prompt) + try: + agent_out = to_dict(agent_raw) + except Exception as e: + raise HTTPException(500, f"Agent returned unparsable output: {e}") - chat_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "text", - "message_content": result_dict["clarification_prompt"], - "metadata_reason": "follow_up", - "created_at": created_at, - } - await send_webhook(CHAT_WEBHOOK_URL, chat_payload) + # 2. ── Split into “profile fields” vs “prompt to ask” ─────────── + clarification_prompt = agent_out.pop("clarification_prompt", None) + # Any keys left in agent_out are profile fields + has_profile_update = bool(agent_out) + + created_at = datetime.utcnow().isoformat() - else: - # ----- send partial profile field to DB ----------------------- # + # 3-A. ── Send profile-partial webhook (if we have one) ────────── + if has_profile_update: if not PROFILE_WEBHOOK_URL: raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") @@ -77,10 +66,29 @@ async def profilebuilder_handler(req: Request): "user_id": user_id, "agent_type": "profilebuilder", "message_type": "profile_partial", - "message_content": result_dict, # the single-field dict + "message_content": agent_out, # ← the single-field dict "metadata_reason": "profile_partial", "created_at": created_at, } await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) + # 3-B. ── Figure out what prompt (if any) to send back to UI ───── + if not clarification_prompt and hasattr(agent_raw, "next_prompt"): + clarification_prompt = getattr(agent_raw, "next_prompt") + + if clarification_prompt: + if not CHAT_WEBHOOK_URL: + raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL env var") + + chat_payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": clarification_prompt, + "metadata_reason": "follow_up", + "created_at": created_at, + } + await send_webhook(CHAT_WEBHOOK_URL, chat_payload) + return {"ok": True} From 09c798396bf446d3d23deb8589e923bf6635204c Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 08:39:19 +0000 Subject: [PATCH 222/230] feat: simplify ProfileFieldOut + prompt enforcement --- src/app/agent_output.py | 7 +++---- src/app/profilebuilder_agent.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/app/agent_output.py b/src/app/agent_output.py index 8a27fcf9..f01e2054 100644 --- a/src/app/agent_output.py +++ b/src/app/agent_output.py @@ -149,10 +149,9 @@ def _type_to_str(t: type[Any]) -> str: from typing import List, Union from pydantic import BaseModel -class ProfileFieldOut(BaseModel): - """One field‑value pair collected by Profile‑builder.""" - field_name: str - value: Union[str, List[str]] +class ProfileFieldOut(BaseModel): # ← keep this exact name + field_name: str # e.g. "niche" + field_value: str | list[str] | int | bool class ClarificationOut(BaseModel): """Prompt asking the user for missing info.""" diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index 92a517cb..e2a85d14 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -10,7 +10,7 @@ name="Profile-builder", instructions=( "Collect ONE profile field at a time from the user.\n" - "Return ONLY a JSON object matching the ProfileFieldOut schema." + "After each answer, respond **only** with valid JSON matching the ProfileFieldOut schema above." ), output_type=ProfileFieldOut, ) From 21558cef7985fb690f50f222c9caedfb7dd82220 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 08:49:11 +0000 Subject: [PATCH 223/230] fix: use agent.invoke() instead of deprecated run() --- src/app/profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 1eaaa62b..cc1f3644 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -43,7 +43,7 @@ async def profilebuilder_handler(req: Request): raise HTTPException(422, "Missing task_id, user_id, or prompt") # 1. ── Run the agent ──────────────────────────────────────────── - agent_raw = await profilebuilder_agent.run(prompt) + agent_raw = await profilebuilder_agent.invoke(prompt) try: agent_out = to_dict(agent_raw) except Exception as e: From 32768eb59ef23d51ca20f0601eab2f77f0e169fb Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 09:01:48 +0000 Subject: [PATCH 224/230] fix(profilebuilder): await agent directly (__call__) --- src/app/profilebuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index cc1f3644..9e270f26 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -43,7 +43,7 @@ async def profilebuilder_handler(req: Request): raise HTTPException(422, "Missing task_id, user_id, or prompt") # 1. ── Run the agent ──────────────────────────────────────────── - agent_raw = await profilebuilder_agent.invoke(prompt) + agent_raw = await profilebuilder_agent(prompt) try: agent_out = to_dict(agent_raw) except Exception as e: From 03b54e7ce5d1641fa9f8ffa2bab7e414070693ab Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 21:02:53 +0900 Subject: [PATCH 225/230] Update profilebuilder.py --- src/app/profilebuilder.py | 95 +++++++++++++++------------------------ 1 file changed, 37 insertions(+), 58 deletions(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 9e270f26..5966dedf 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,40 +1,25 @@ # ────────────────────────────────────────────────────────────── # src/app/profilebuilder.py # ────────────────────────────────────────────────────────────── - from fastapi import APIRouter, Request, HTTPException -from pydantic import BaseModel from datetime import datetime -import json, os +import os from app.profilebuilder_agent import profilebuilder_agent -from app.util.webhook import send_webhook +from agents.run import Runner +from app.util.webhook import send_webhook # ← adjust import path if needed router = APIRouter() PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") -# ------------------------------------------------------------------ # -# Helper: convert any agent output to a plain dict # -# ------------------------------------------------------------------ # -def to_dict(agent_output): - if isinstance(agent_output, BaseModel): - return agent_output.model_dump() - if isinstance(agent_output, (bytes, bytearray)): - agent_output = agent_output.decode() - if isinstance(agent_output, str): - agent_output = agent_output.strip() - if agent_output.startswith("{"): - return json.loads(agent_output) - raise ValueError("Unable to parse agent output") - # ------------------------------------------------------------------ # # POST /profilebuilder # # ------------------------------------------------------------------ # @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): - body = await req.json() + body = await req.json() task_id = body.get("task_id") user_id = body.get("user_id") prompt = body.get("prompt") or body.get("user_prompt") or body.get("message") @@ -42,53 +27,47 @@ async def profilebuilder_handler(req: Request): if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # 1. ── Run the agent ──────────────────────────────────────────── - agent_raw = await profilebuilder_agent(prompt) - try: - agent_out = to_dict(agent_raw) - except Exception as e: - raise HTTPException(500, f"Agent returned unparsable output: {e}") + # 1. ── Run the agent via Runner ───────────────────────────────── + result = await Runner.run(profilebuilder_agent, prompt) + agent_out = result.final_output # ProfileFieldOut instance - # 2. ── Split into “profile fields” vs “prompt to ask” ─────────── - clarification_prompt = agent_out.pop("clarification_prompt", None) - # Any keys left in agent_out are profile fields - has_profile_update = bool(agent_out) + # convert to simple {field_name: field_value} + field_name = agent_out.field_name + field_value = agent_out.field_value + profile_fragment = {field_name: field_value} - created_at = datetime.utcnow().isoformat() + created_at = datetime.utcnow().isoformat() - # 3-A. ── Send profile-partial webhook (if we have one) ────────── - if has_profile_update: - if not PROFILE_WEBHOOK_URL: - raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") + # 2. ── Send profile-partial webhook ───────────────────────────── + if not PROFILE_WEBHOOK_URL: + raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") - profile_payload = { + await send_webhook( + PROFILE_WEBHOOK_URL, + { "task_id": task_id, "user_id": user_id, "agent_type": "profilebuilder", "message_type": "profile_partial", - "message_content": agent_out, # ← the single-field dict - "metadata_reason": "profile_partial", - "created_at": created_at, - } - await send_webhook(PROFILE_WEBHOOK_URL, profile_payload) - - # 3-B. ── Figure out what prompt (if any) to send back to UI ───── - if not clarification_prompt and hasattr(agent_raw, "next_prompt"): - clarification_prompt = getattr(agent_raw, "next_prompt") - - if clarification_prompt: - if not CHAT_WEBHOOK_URL: - raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL env var") - - chat_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "text", - "message_content": clarification_prompt, - "metadata_reason": "follow_up", + "message_content": profile_fragment, "created_at": created_at, - } - await send_webhook(CHAT_WEBHOOK_URL, chat_payload) + }, + ) + + # 3. ── Send the agent’s follow-up question as a chat message ─── + # (the agent’s textual reply is in result.chat_response) + follow_up = result.chat_response + if follow_up and CHAT_WEBHOOK_URL: + await send_webhook( + CHAT_WEBHOOK_URL, + { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": follow_up, + "created_at": created_at, + }, + ) return {"ok": True} From 5446bd3204f410edf07ec70c7ae15b79f4147c06 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 21:03:36 +0900 Subject: [PATCH 226/230] Update profilebuilder.py From 34e9f67e131ee3aa35cae76ce544c14e540b37c2 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 12:16:56 +0000 Subject: [PATCH 227/230] feat: lean profilebuilder handler + clarification_prompt field --- src/app/agent_output.py | 9 ++-- src/app/profilebuilder.py | 83 +++++++++++++++++++++------------ src/app/profilebuilder_agent.py | 3 +- 3 files changed, 59 insertions(+), 36 deletions(-) diff --git a/src/app/agent_output.py b/src/app/agent_output.py index f01e2054..32cef797 100644 --- a/src/app/agent_output.py +++ b/src/app/agent_output.py @@ -149,10 +149,11 @@ def _type_to_str(t: type[Any]) -> str: from typing import List, Union from pydantic import BaseModel -class ProfileFieldOut(BaseModel): # ← keep this exact name - field_name: str # e.g. "niche" - field_value: str | list[str] | int | bool - +class ProfileFieldOut(BaseModel): + field_name: str + field_value: str | list[str] | int | bool + clarification_prompt: str | None = None # ← NEW, optional + class ClarificationOut(BaseModel): """Prompt asking the user for missing info.""" prompt: str diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 5966dedf..0467f11e 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,19 +1,33 @@ # ────────────────────────────────────────────────────────────── # src/app/profilebuilder.py # ────────────────────────────────────────────────────────────── + from fastapi import APIRouter, Request, HTTPException +from pydantic import BaseModel from datetime import datetime -import os +import json, os from app.profilebuilder_agent import profilebuilder_agent -from agents.run import Runner -from app.util.webhook import send_webhook # ← adjust import path if needed +from app.util.webhook import send_webhook router = APIRouter() - PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") +# ------------------------------------------------------------------ # +# Helper: convert any agent output to a plain dict # +# ------------------------------------------------------------------ # +def to_dict(agent_output): + if isinstance(agent_output, BaseModel): + return agent_output.model_dump() + if isinstance(agent_output, (bytes, bytearray)): + agent_output = agent_output.decode() + if isinstance(agent_output, str): + agent_output = agent_output.strip() + if agent_output.startswith("{"): + return json.loads(agent_output) + raise ValueError("Unable to parse agent output") + # ------------------------------------------------------------------ # # POST /profilebuilder # # ------------------------------------------------------------------ # @@ -27,20 +41,24 @@ async def profilebuilder_handler(req: Request): if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # 1. ── Run the agent via Runner ───────────────────────────────── - result = await Runner.run(profilebuilder_agent, prompt) - agent_out = result.final_output # ProfileFieldOut instance + # 1. ── Run the agent ──────────────────────────────────────────── + agent_raw = await profilebuilder_agent(prompt) + try: + agent_out = to_dict(agent_raw) + except Exception as e: + raise HTTPException(500, f"Agent returned unparsable output: {e}") - # convert to simple {field_name: field_value} - field_name = agent_out.field_name - field_value = agent_out.field_value - profile_fragment = {field_name: field_value} + # 2. ── Split into “profile fields” vs “prompt to ask” ─────────── + clarification_prompt = agent_out.pop("clarification_prompt", None) + # Any keys left in agent_out are profile fields + has_profile_update = bool(agent_out) - created_at = datetime.utcnow().isoformat() + created_at = datetime.utcnow().isoformat() - # 2. ── Send profile-partial webhook ───────────────────────────── - if not PROFILE_WEBHOOK_URL: - raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") + # 3-A. ── Send profile-partial webhook (if we have one) ────────── + if has_profile_update: + if not PROFILE_WEBHOOK_URL: + raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") await send_webhook( PROFILE_WEBHOOK_URL, @@ -54,20 +72,23 @@ async def profilebuilder_handler(req: Request): }, ) - # 3. ── Send the agent’s follow-up question as a chat message ─── - # (the agent’s textual reply is in result.chat_response) - follow_up = result.chat_response - if follow_up and CHAT_WEBHOOK_URL: - await send_webhook( - CHAT_WEBHOOK_URL, - { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "text", - "message_content": follow_up, - "created_at": created_at, - }, - ) + # 3-B. ── Figure out what prompt (if any) to send back to UI ───── + if not clarification_prompt and hasattr(agent_raw, "next_prompt"): + clarification_prompt = getattr(agent_raw, "next_prompt") + + if clarification_prompt: + if not CHAT_WEBHOOK_URL: + raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL env var") + + chat_payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": clarification_prompt, + "metadata_reason": "follow_up", + "created_at": created_at, + } + await send_webhook(CHAT_WEBHOOK_URL, chat_payload) - return {"ok": True} + return {"ok": True} \ No newline at end of file diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py index e2a85d14..c2a0ff57 100644 --- a/src/app/profilebuilder_agent.py +++ b/src/app/profilebuilder_agent.py @@ -10,7 +10,8 @@ name="Profile-builder", instructions=( "Collect ONE profile field at a time from the user.\n" - "After each answer, respond **only** with valid JSON matching the ProfileFieldOut schema above." + "After each answer, respond only with valid JSON matching the ProfileFieldOut schema above.\n" + "Use the field clarification_prompt to hold the **next question you want to ask the user** (or null if done with this turn)." ), output_type=ProfileFieldOut, ) From 66b8b50e4e01a877ef7a023abaec112a45722434 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Thu, 8 May 2025 12:19:08 +0000 Subject: [PATCH 228/230] updated to resolver api profilebuilder --- src/app/profilebuilder.py | 84 ++++++++++++--------------------------- 1 file changed, 26 insertions(+), 58 deletions(-) diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 0467f11e..9cd04a8c 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,36 +1,15 @@ -# ────────────────────────────────────────────────────────────── -# src/app/profilebuilder.py -# ────────────────────────────────────────────────────────────── - -from fastapi import APIRouter, Request, HTTPException -from pydantic import BaseModel -from datetime import datetime -import json, os - from app.profilebuilder_agent import profilebuilder_agent -from app.util.webhook import send_webhook +from agents.run import Runner +from app.util.webhook import send_webhook # make sure this import path is right +from datetime import datetime +from fastapi import APIRouter, Request, HTTPException +import os router = APIRouter() PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") -# ------------------------------------------------------------------ # -# Helper: convert any agent output to a plain dict # -# ------------------------------------------------------------------ # -def to_dict(agent_output): - if isinstance(agent_output, BaseModel): - return agent_output.model_dump() - if isinstance(agent_output, (bytes, bytearray)): - agent_output = agent_output.decode() - if isinstance(agent_output, str): - agent_output = agent_output.strip() - if agent_output.startswith("{"): - return json.loads(agent_output) - raise ValueError("Unable to parse agent output") -# ------------------------------------------------------------------ # -# POST /profilebuilder # -# ------------------------------------------------------------------ # @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): body = await req.json() @@ -41,24 +20,18 @@ async def profilebuilder_handler(req: Request): if not (task_id and user_id and prompt): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # 1. ── Run the agent ──────────────────────────────────────────── - agent_raw = await profilebuilder_agent(prompt) - try: - agent_out = to_dict(agent_raw) - except Exception as e: - raise HTTPException(500, f"Agent returned unparsable output: {e}") + # 1. Run the agent ------------------------------------------------------------------- + result = await Runner.run(profilebuilder_agent, prompt) + out = result.final_output # this is a ProfileFieldOut - # 2. ── Split into “profile fields” vs “prompt to ask” ─────────── - clarification_prompt = agent_out.pop("clarification_prompt", None) - # Any keys left in agent_out are profile fields - has_profile_update = bool(agent_out) + profile_fragment = {out.field_name: out.field_value} + follow_up = out.clarification_prompt created_at = datetime.utcnow().isoformat() - # 3-A. ── Send profile-partial webhook (if we have one) ────────── - if has_profile_update: - if not PROFILE_WEBHOOK_URL: - raise RuntimeError("Missing PROFILE_WEBHOOK_URL env var") + # 2. Send profile-partial webhook ----------------------------------------------------- + if not PROFILE_WEBHOOK_URL: + raise RuntimeError("PROFILE_WEBHOOK_URL env var is missing") await send_webhook( PROFILE_WEBHOOK_URL, @@ -72,23 +45,18 @@ async def profilebuilder_handler(req: Request): }, ) - # 3-B. ── Figure out what prompt (if any) to send back to UI ───── - if not clarification_prompt and hasattr(agent_raw, "next_prompt"): - clarification_prompt = getattr(agent_raw, "next_prompt") - - if clarification_prompt: - if not CHAT_WEBHOOK_URL: - raise RuntimeError("Missing CLARIFICATION_WEBHOOK_URL env var") - - chat_payload = { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "text", - "message_content": clarification_prompt, - "metadata_reason": "follow_up", - "created_at": created_at, - } - await send_webhook(CHAT_WEBHOOK_URL, chat_payload) + # 3. Send follow-up chat webhook (if any) --------------------------------------------- + if follow_up and CHAT_WEBHOOK_URL: + await send_webhook( + CHAT_WEBHOOK_URL, + { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": follow_up, + "created_at": created_at, + }, + ) return {"ok": True} \ No newline at end of file From ac5536608b22e0ae74b0c9966a6f0a0ae6cf7eae Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 9 May 2025 04:17:26 +0000 Subject: [PATCH 229/230] feat: introduce StorageBackend interface with Bubble/Supabase implementations --- src/app/profilebuilder.py | 70 +++++++++------------------------ src/app/storage.py | 83 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 51 deletions(-) create mode 100644 src/app/storage.py diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py index 9cd04a8c..e156317c 100644 --- a/src/app/profilebuilder.py +++ b/src/app/profilebuilder.py @@ -1,62 +1,30 @@ -from app.profilebuilder_agent import profilebuilder_agent -from agents.run import Runner -from app.util.webhook import send_webhook # make sure this import path is right -from datetime import datetime +# src/app/profilebuilder.py from fastapi import APIRouter, Request, HTTPException -import os +from datetime import datetime +from app.storage import get_storage +from agents.run import Runner +from app.profilebuilder_agent import profilebuilder_agent router = APIRouter() -PROFILE_WEBHOOK_URL = os.getenv("PROFILE_WEBHOOK_URL") -CHAT_WEBHOOK_URL = os.getenv("CLARIFICATION_WEBHOOK_URL") - +storage = get_storage() @router.post("/profilebuilder") async def profilebuilder_handler(req: Request): - body = await req.json() - task_id = body.get("task_id") - user_id = body.get("user_id") - prompt = body.get("prompt") or body.get("user_prompt") or body.get("message") - - if not (task_id and user_id and prompt): + data = await req.json() + t, u, p = data.get("task_id"), data.get("user_id"), data.get("prompt") + if not (t and u and p): raise HTTPException(422, "Missing task_id, user_id, or prompt") - # 1. Run the agent ------------------------------------------------------------------- - result = await Runner.run(profilebuilder_agent, prompt) - out = result.final_output # this is a ProfileFieldOut - - profile_fragment = {out.field_name: out.field_value} - follow_up = out.clarification_prompt - - created_at = datetime.utcnow().isoformat() - - # 2. Send profile-partial webhook ----------------------------------------------------- - if not PROFILE_WEBHOOK_URL: - raise RuntimeError("PROFILE_WEBHOOK_URL env var is missing") + # 1) Get the agent’s output + result = await Runner.run(profilebuilder_agent, p) + out = result.final_output + ts = datetime.utcnow().isoformat() - await send_webhook( - PROFILE_WEBHOOK_URL, - { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "profile_partial", - "message_content": profile_fragment, - "created_at": created_at, - }, - ) + # 2) Save profile field (calls Bubble webhook or Supabase upsert) + await storage.save_profile_field(t, u, out.field_name, out.field_value, ts) - # 3. Send follow-up chat webhook (if any) --------------------------------------------- - if follow_up and CHAT_WEBHOOK_URL: - await send_webhook( - CHAT_WEBHOOK_URL, - { - "task_id": task_id, - "user_id": user_id, - "agent_type": "profilebuilder", - "message_type": "text", - "message_content": follow_up, - "created_at": created_at, - }, - ) + # 3) Send follow-up chat if needed + if out.clarification_prompt: + await storage.send_chat_message(t, u, out.clarification_prompt, ts) - return {"ok": True} \ No newline at end of file + return {"ok": True} diff --git a/src/app/storage.py b/src/app/storage.py new file mode 100644 index 00000000..ffa9c40a --- /dev/null +++ b/src/app/storage.py @@ -0,0 +1,83 @@ +# src/app/storage.py +import os +from abc import ABC, abstractmethod + +# ENV var to pick backend: "bubble" (default) or "supabase" +STORAGE_BACKEND = os.getenv("STORAGE_BACKEND", "bubble").lower() + +class StorageBackend(ABC): + @abstractmethod + async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): + ... + + @abstractmethod + async def send_chat_message(self, task_id, user_id, message, created_at): + ... + +# will be replaced below +_storage: StorageBackend + +# src/app/storage.py (continued, bubble section) +from app.util.webhook import send_webhook + +class BubbleStorage(StorageBackend): + def __init__(self): + self.profile_url = os.getenv("PROFILE_WEBHOOK_URL") + self.chat_url = os.getenv("CLARIFICATION_WEBHOOK_URL") + + async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "profile_partial", + "message_content": {field_name: field_value}, + "created_at": created_at, + } + await send_webhook(self.profile_url, payload) + + async def send_chat_message(self, task_id, user_id, message, created_at): + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": message, + "created_at": created_at, + } + await send_webhook(self.chat_url, payload) + +# src/app/storage.py (continued, supabase section) +from supabase import create_client + +class SupabaseStorage(StorageBackend): + def __init__(self): + url = os.getenv("SUPABASE_URL") + key = os.getenv("SUPABASE_SERVICE_KEY") + self.sb = create_client(url, key) + + async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): + await self.sb.table("profiles").upsert({ + "task_id": task_id, + "user_id": user_id, + field_name: field_value, + "updated_at": created_at, + }).execute() + + async def send_chat_message(self, task_id, user_id, message, created_at): + # If you want to store chat messages too: + await self.sb.table("chat_messages").insert({ + "task_id": task_id, + "user_id": user_id, + "content": message, + "created_at": created_at, + }).execute() + +# src/app/storage.py (continued) +if STORAGE_BACKEND == "supabase": + _storage = SupabaseStorage() +else: + _storage = BubbleStorage() + +# export the single instance +get_storage = lambda: _storage From f8702cb9e6bc1c4443f3609aa176815218b821c4 Mon Sep 17 00:00:00 2001 From: Kvkthecreator <134989386+Kvkthecreator@users.noreply.github.com> Date: Fri, 9 May 2025 04:49:45 +0000 Subject: [PATCH 230/230] fix: implement full upsert payload in SupabaseStorage.save_profile_field --- src/app/storage.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/app/storage.py b/src/app/storage.py index ffa9c40a..ccb3b0f9 100644 --- a/src/app/storage.py +++ b/src/app/storage.py @@ -48,6 +48,7 @@ async def send_chat_message(self, task_id, user_id, message, created_at): await send_webhook(self.chat_url, payload) # src/app/storage.py (continued, supabase section) +from datetime import datetime from supabase import create_client class SupabaseStorage(StorageBackend): @@ -57,15 +58,18 @@ def __init__(self): self.sb = create_client(url, key) async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): - await self.sb.table("profiles").upsert({ + # Build base payload + payload = { "task_id": task_id, "user_id": user_id, - field_name: field_value, "updated_at": created_at, - }).execute() + field_name: field_value + } + # Upsert into profiles table + await self.sb.table("profiles").upsert(payload).execute() async def send_chat_message(self, task_id, user_id, message, created_at): - # If you want to store chat messages too: + # (optional) if you want to log chat messages await self.sb.table("chat_messages").insert({ "task_id": task_id, "user_id": user_id, @@ -73,6 +77,7 @@ async def send_chat_message(self, task_id, user_id, message, created_at): "created_at": created_at, }).execute() + # src/app/storage.py (continued) if STORAGE_BACKEND == "supabase": _storage = SupabaseStorage()