import os
import nest_asyncio
from dotenv import load_dotenv, find_dotenv
# the format for that file is (without the comment) #API_KEYNAME=AStringThatIsTheLongAPIKeyFromSomeService
def load_env():
_ = load_dotenv(find_dotenv())
def get_openai_api_key():
load_env()
openai_api_key = os.getenv("OPENAI_API_KEY")
return openai_api_key
from llama_index.core.tools import QueryEngineTool
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
description=(
"Useful for summarization questions related to MetaGPT"
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from the MetaGPT paper."
),
)
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector
query_engine = RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(),
query_engine_tools=[
summary_tool,
vector_tool,
],
verbose=True
)
[1;3;38;5;200mSelecting query engine 0: Useful for summarization questions related to MetaGPT.
[0mMetaGPT framework introduces a meta-programming approach for multi-agent collaboration using Large Language Models (LLMs) and Standardized Operating Procedures (SOPs). It assigns specific roles to agents, streamlines workflows, and improves communication efficiency. By incorporating role specialization, structured communication, and an executable feedback mechanism, MetaGPT achieves state-of-the-art performance in code generation tasks. The framework's design focuses on enhancing problem-solving capabilities in multi-agent systems, particularly in code generation tasks, by managing roles, workflows, and communication effectively. It also emphasizes the potential of human-inspired techniques for artificial multi-agent systems.
print(len(response.source_nodes))
34
response = query_engine.query(
"Agent는 다른 Agent들과 정보를 어떻게 공유해?"
)
print(str(response))
[1;3;38;5;200mSelecting query engine 1: This choice is more relevant as it pertains to retrieving specific context from the MetaGPT paper, which would likely contain information on how agents share information..
[0mAgents는 정보를 공유하기 위해 전역 메시지 풀에 정보를 저장하고 다른 에이전트들이 이 정보에 직접 액세스할 수 있도록 합니다. 또한 구독 메커니즘을 사용하여 역할별 관심사를 기반으로 관련 정보를 추출하고 필요한 정보를 선택하여 따르게 됩니다.
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import Settings
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import SummaryIndex, VectorStoreIndex
from llama_index.core.tools import QueryEngineTool
from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
from llama_index.core.selectors import LLMSingleSelector
def get_router_query_engine(file_path: str, llm = None, embed_model = None):
"""Get router query engine."""
llm = llm or OpenAI(model="gpt-3.5-turbo")
embed_model = embed_model or OpenAIEmbedding(model="text-embedding-ada-002")
# load documents
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
summary_index = SummaryIndex(nodes)
vector_index = VectorStoreIndex(nodes, embed_model=embed_model)
summary_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
llm=llm
)
vector_query_engine = vector_index.as_query_engine(llm=llm)
summary_tool = QueryEngineTool.from_defaults(
query_engine=summary_query_engine,
description=(
"Useful for summarization questions related to MetaGPT"
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=vector_query_engine,
description=(
"Useful for retrieving specific context from the MetaGPT paper."
),
)
query_engine = RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(),
query_engine_tools=[
summary_tool,
vector_tool,
],
verbose=True
)
return query_engine
response = query_engine.query("ablation study 결과에 대해 알려줄래?")
print(str(response))
[1;3;38;5;200mSelecting query engine 1: The question is asking for information about the ablation study results, which is specific context from the MetaGPT paper..
[0mDifferent roles were analyzed in the ablation study to understand their impact on the final results. The study showed that adding roles beyond just the Engineer consistently improved both revisions and executability. While the addition of more roles slightly increased expenses, it significantly enhanced overall performance, highlighting the effectiveness of incorporating various roles in the framework.