Skip to content

add muagen sdk v0.1.0 #98

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/docker-image-pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ jobs:
architecture: [amd64, arm64]
os: [linux]
service:
- name: runtime:0.1.0
- name: muagent:0.1.0
- name: runtime:0.1.1
- name: muagent:0.1.1
- name: ekgfrontend:0.1.0

steps:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/docker-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
- name: runtime
context: ./runtime
dockerfile: ./runtime/Dockerfile.no-package
tag: ghcr.io/codefuse-ai/runtime:0.1.0
tag: ghcr.io/codefuse-ai/runtime:0.1.1
tag_latest: ghcr.io/codefuse-ai/runtime:latest
- name: ekgfrontend
context: .
Expand All @@ -22,7 +22,7 @@ jobs:
- name: ekgservice
context: .
dockerfile: ./Dockerfile_gh
tag: ghcr.io/codefuse-ai/muagent:0.1.0
tag: ghcr.io/codefuse-ai/muagent:0.1.1
tag_latest: ghcr.io/codefuse-ai/muagent:latest

steps:
Expand Down
2 changes: 1 addition & 1 deletion docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ services:
context: .
dockerfile: Dockerfile
container_name: ekgservice
image: muagent:0.1.0
image: muagent:0.1.1
environment:
USER: root
TZ: "${TZ}"
Expand Down
8 changes: 4 additions & 4 deletions docker_pull_images.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ docker pull redis/redis-stack:7.4.0-v0
docker pull ollama/ollama:0.3.6

# pull images from github ghcr.io by nju
docker pull ghcr.nju.edu.cn/runtime:0.1.0
docker pull ghcr.nju.edu.cn/muagent:0.1.0
docker pull ghcr.nju.edu.cn/runtime:0.1.1
docker pull ghcr.nju.edu.cn/muagent:0.1.1
docker pull ghcr.nju.edu.cn/ekgfrontend:0.1.0

# # pull images from github ghcr.io
# docker pull ghcr.io/runtime:0.1.0
# docker pull ghcr.io/muagent:0.1.0
# docker pull ghcr.io/runtime:0.1.1
# docker pull ghcr.io/muagent:0.1.1
# docker pull ghcr.io/ekgfrontend:0.1.0
103 changes: 68 additions & 35 deletions examples/ekg_examples/start.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import test_config

from muagent.schemas.db import *
from muagent.schemas.apis.ekg_api_schema import LLMFCRequest
from muagent.db_handler import *
from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
from muagent.service.ekg_construct.ekg_construct_base import EKGConstructService
Expand All @@ -46,7 +47,8 @@

from pydantic import BaseModel


from muagent.schemas.models import ModelConfig
from muagent.models import get_model


cur_dir = os.path.dirname(__file__)
Expand Down Expand Up @@ -92,56 +94,75 @@ def update_params(self, **kwargs):

def _llm_type(self, *args):
return ""

def predict(self, prompt: str, stop = None) -> str:
return self._call(prompt, stop)

def _call(self, prompt: str,
stop = None) -> str:

def _get_model(self):
"""_call
"""
return_str = ""
stop = stop or self.stop

if self.model_type == "ollama":
stream = ollama.chat(
model=self.model_name,
messages=[{'role': 'user', 'content': prompt}],
stream=True,
)
answer = ""
for chunk in stream:
answer += chunk['message']['content']

return answer
elif self.model_type == "openai":
if self.model_type in [
"ollama", "qwen", "openai", "lingyiwanwu",
"kimi", "moonshot",
]:
from muagent.llm_models.openai_model import getChatModelFromConfig
llm_config = LLMConfig(
model_name=self.model_name,
model_engine="openai",
model_engine=self.model_type,
api_key=self.api_key,
api_base_url=self.url,
temperature=self.temperature,
stop=self.stop
)
model = getChatModelFromConfig(llm_config)
return model.predict(prompt, stop=self.stop)
elif self.model_type in ["lingyiwanwu", "kimi", "moonshot", "qwen"]:
from muagent.llm_models.openai_model import getChatModelFromConfig
llm_config = LLMConfig(
else:
model_config = ModelConfig(
model_type=self.model_type,
model_name=self.model_name,
model_engine=self.model_type,
api_key=self.api_key,
api_base_url=self.url,
api_url=self.url,
temperature=self.temperature,
stop=self.stop
)
model = getChatModelFromConfig(llm_config)
return model.predict(prompt, stop=self.stop)
else:
pass
model = get_model(model_config)
return model

def predict(self, prompt: str, stop = None) -> str:
return self._call(prompt, stop)

return return_str
def fc(self, request: LLMFCRequest) -> str:
"""_function_call
"""
if self.model_type not in [
"openai", "ollama", "lingyiwanwu", "kimi", "moonshot", "qwen"
]:
return f"{self.model_type} not in valid model range"

model = self._get_model()
return model.fc(
messages=request.messages,
tools=request.tools,
tool_choice=request.tool_choice,
parallel_tool_calls=request.parallel_tool_calls,
)

def _call(self, prompt: str,
stop = None) -> str:
"""_call
"""
return_str = ""
stop = stop or self.stop
if self.model_type not in [
"openai", "ollama", "lingyiwanwu", "kimi", "moonshot", "qwen"
]:
pass
elif self.model_type not in [
"dashscope_chat", "moonshot_chat", "ollama_chat",
"openai_chat", "qwen_chat", "yi_chat",
"dashscope_text_embedding", "ollama_embedding", "openai_embedding", "qwen_text_embedding"
]:
pass
else:
return f"{self.model_type} not in valid model range"

model = self._get_model()
return model.predict(prompt, stop=self.stop)


class CustomEmbeddings(Embeddings):
Expand Down Expand Up @@ -185,6 +206,17 @@ def _get_sentence_emb(self, sentence: str) -> dict:
)
text2vector_dict = get_embedding("openai", [sentence], embed_config=embed_config)
return text2vector_dict[sentence]
elif self.embedding_type in [
"dashscope_text_embedding", "ollama_embedding", "openai_embedding", "qwen_text_embedding"
]:
model_config = ModelConfig(
model_type=self.embedding_type,
model_name=self.model_name,
api_key=self.api_key,
api_url=self.url,
)
model = get_model(model_config)
return model.embed_query(sentence)
else:
pass

Expand Down Expand Up @@ -280,6 +312,7 @@ def embed_query(self, text: str) -> List[float]:
llm_config=llm_config,
tb_config=tb_config,
gb_config=gb_config,
initialize_space=True,
clear_history_data=clear_history_data
)

Expand Down
3 changes: 2 additions & 1 deletion examples/muagent_examples/docchat_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@
# create your knowledge base
from muagent.service.kb_api import create_kb, upload_files2kb
from muagent.utils.server_utils import run_async
from muagent.orm import create_tables
# from muagent.orm import create_tables
from muagent.db_handler import create_tables


# use to test, don't create some directory
Expand Down
106 changes: 104 additions & 2 deletions examples/test_config.py.example
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import os, openai, base64
from loguru import logger

os.environ["DM_llm_name"] = 'Qwen2_72B_Instruct_OpsGPT' #or gpt_4

# 兜底大模型配置
OPENAI_API_BASE = "https://api.openai.com/v1"
os.environ["API_BASE_URL"] = OPENAI_API_BASE
Expand All @@ -19,6 +21,78 @@ os.environ["gpt4-llm_temperature"] = "0.0"



MODEL_CONFIGS = {
# old llm config
"default": {
"model_name": "gpt-3.5-turbo",
"model_engine": "qwen",
"temperature": "0",
"api_key": "",
"api_base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
},
"codefuser":{
"model_name": "gpt-4",
"model_engine": "openai",
"temperature": "0",
"api_key": "",
"api_base_url": OPENAI_API_BASE,
},
# new llm config
"dashscope_chat": {
"model_type": "dashscope_chat",
"model_name": "qwen2.5-72b-instruct" ,
"api_key": "",
},
"moonshot_chat": {
"model_type": "moonshot_chat",
"model_name": "moonshot-v1-8k" ,
"api_key": "",
},
"ollama_chat": {
"model_type": "ollama_chat",
"model_name": "qwen2.5-0.5b",
"api_key": "",
},
"openai_chat": {
"model_type": "openai_chat",
"model_name": "gpt-4",
"api_key": "",
},
"qwen_chat": {
"model_type": "qwen_chat",
"model_name": "qwen2.5-72b-instruct",
"api_key": "",
},
"yi_chat": {
"model_type": "yi_chat",
"model_name": "yi-lightning" ,
"api_key": "",
},
# embedding configs
"dashscope_text_embedding": {
"model_type": "dashscope_text_embedding",
"model_name": "text-embedding-v3",
"api_key": "",
},
"ollama_embedding": {
"model_type": "ollama_embedding",
"model_name": "qwen2.5-0.5b",
"api_key": "",
},
"openai_embedding": {
"model_type": "openai_embedding",
"model_name": "text-embedding-ada-002",
"api_key": "",
},
"qwen_text_embedding": {
"model_type": "dashscope_text_embedding",
"model_name": "text-embedding-v3",
"api_key": "",
},
}

os.environ["MODEL_CONFIGS"] = json.dumps(MODEL_CONFIGS)

#### NebulaHandler ####
os.environ['nb_host'] = 'graphd'
os.environ['nb_port'] = '9669'
Expand All @@ -41,8 +115,36 @@ os.environ["tb_index_name"] = "ekg_migration_new"
os.environ['tb_definition_value'] = 'message_test_new'
os.environ['tb_expire_time'] = '604800' #86400*7

# clear history data in tb and gb
os.environ['clear_history_data'] = 'True'

#################
## DB_CONFIGS ##
#################
DB_CONFIGS = {
"gb_config": {
"gb_type": "NebulaHandler",
"extra_kwargs": {
'host':'graphd',
'port': '9669',
'username': os.environ['nb_username'],
'password': os.environ['nb_password'],
'space': "client"
}
},
"tb_config": {
"tb_type": 'TBaseHandler',
"index_name": "opsgptkg",
"host": 'redis-stack',
"port": '6379',
"username": os.environ['tb_username'],
"password": os.environ['tb_password'],
"extra_kwargs": {
"definition_value": "opsgptkg",
"memory_definition_value": "opsgptkg_message"
}
}
}
os.environ["DB_CONFIGS"] = json.dumps(DB_CONFIGS)



########################################
Expand Down
18 changes: 11 additions & 7 deletions muagent/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
# encoding: utf-8
'''
@author: 温进
@file: __init__.py.py
@time: 2023/11/9 下午4:01
@desc:
'''
from .ekg_project import EKG, get_ekg_project_config_from_env
from .project_manager import get_project_config_from_env
from .models import get_model
from .agents import get_agent
from .tools import get_tool

__all__ = [
"EKG", "get_model", "get_agent", "get_tool",
"get_ekg_project_config_from_env",
"get_project_config_from_env"
]
30 changes: 30 additions & 0 deletions muagent/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from .base_agent import BaseAgent
from .single_agent import SingleAgent
from .react_agent import ReactAgent
from .task_agent import TaskAgent
from .group_agent import GroupAgent
from .user_agent import UserAgent
from .functioncall_agent import FunctioncallAgent
from ..schemas import AgentConfig

__all__ = [
"BaseAgent",
"SingleAgent",
"ReactAgent",
"TaskAgent",
"GroupAgent",
"UserAgent",
"FunctioncallAgent"
]


def get_agent(agent_config: AgentConfig) -> BaseAgent:
"""Get the agent by agent config

Args:
agent_config (`AgentConfig`): The agent config

Returns:
`BaseAgent`: The specific agent
"""
return BaseAgent.init_from_project_config(agent_config)
Loading