Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions backend/app/agent/agent_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
from typing import Any

from camel.messages import BaseMessage
from camel.models import ModelFactory
from camel.toolkits import FunctionTool, RegisteredAgentToolkit
from camel.types import ModelPlatformType

from app.agent.listen_chat_agent import ListenChatAgent, logger
from app.model.chat import AgentModelConfig, Chat
from app.service.model_registry import get_or_create_model
from app.service.task import ActionCreateAgentData, Agents, get_task_lock
from app.utils.event_loop_utils import _schedule_async_task

Expand Down Expand Up @@ -135,11 +135,11 @@ def agent_model(
)
model_platform_enum = None

model = ModelFactory.create(
model = get_or_create_model(
model_platform=effective_config["model_platform"],
model_type=effective_config["model_type"],
api_key=effective_config["api_key"],
url=effective_config["api_url"],
api_url=effective_config["api_url"],
model_config_dict=model_config or None,
timeout=600, # 10 minutes
**init_params,
Expand Down
7 changes: 3 additions & 4 deletions backend/app/agent/factory/mcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,12 @@
import asyncio
import uuid

from camel.models import ModelFactory

from app.agent.listen_chat_agent import ListenChatAgent, logger
from app.agent.prompt import MCP_SYS_PROMPT
from app.agent.toolkit.mcp_search_toolkit import McpSearchToolkit
from app.agent.tools import get_mcp_tools
from app.model.chat import Chat
from app.service.model_registry import get_or_create_model
from app.service.task import ActionCreateAgentData, Agents, get_task_lock


Expand Down Expand Up @@ -77,11 +76,11 @@ async def mcp_agent(options: Chat):
options.project_id,
Agents.mcp_agent,
system_message=MCP_SYS_PROMPT,
model=ModelFactory.create(
model=get_or_create_model(
model_platform=options.model_platform,
model_type=options.model_type,
api_key=options.api_key,
url=options.api_url,
api_url=options.api_url,
model_config_dict=(
{
"user": str(options.project_id),
Expand Down
49 changes: 45 additions & 4 deletions backend/app/agent/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,10 +221,51 @@
You are a helpful task assistant that can help users summarize the content of their tasks"""

QUESTION_CONFIRM_SYS_PROMPT = """\
You are a highly capable agent. Your primary function is to analyze a user's \
request and determine the appropriate course of action. The current date is \
{now_str}(Accurate to the hour). For any date-related tasks, you MUST use \
this as the current date."""
You are Eigent, an open source Cowork desktop application for building, managing, \
and deploying a custom AI workforce that can automate complex workflows. \
You are built on CAMEL-AI, supports multi-agent coordination, local deployment, \
custom model support, and MCP integration (if available). Your primary function is \
to analyze a user's request and determine the appropriate course of action. \
The current date is {now_str}(Accurate to the hour). For any date-related \
tasks, you MUST use this as the current date."""

QUICK_REPLY_ASSESSMENT_PROMPT = """\
You are evaluating whether the user needs the full Eigent workforce or a direct answer.

## Conversation Context
{conversation_context}

## User Query
{question}

## Attached Files
{attachments}

## Available MCP Servers
{mcp_servers}

Determine if this user query is a complex task or a simple question.

If answering the query would require MCP tools or any other external tools, \
classify it as COMPLEX.

**Complex task**: Requires tools, code execution, file operations, multi-step planning,\
or creating/modifying content.
- Examples: "create a file", "search for X", "implement feature Y", "write code", \
"analyze data"

**Simple question**: Can be answered directly with knowledge or conversation history, \
with no action needed.
- Examples: greetings ("hello", "hi"), fact queries ("what is X?"), clarifications, \
status checks

If the query is simple, provide a direct, helpful answer in the same response and \
try to respond with 10 words maximum if it is a very direct query.
If the query is complex, do not answer it.

Respond in this exact format:
COMPLEXITY: [SIMPLE|COMPLEX]
ANSWER: [Direct answer only if SIMPLE. Leave blank if COMPLEX]"""

MCP_SYS_PROMPT = """\
You are a helpful assistant that can help users search mcp servers. The found \
Expand Down
176 changes: 117 additions & 59 deletions backend/app/service/chat_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import datetime
import logging
import platform
from dataclasses import dataclass
from pathlib import Path
from typing import Any

Expand All @@ -38,11 +39,15 @@
task_summary_agent,
)
from app.agent.listen_chat_agent import ListenChatAgent
from app.agent.prompt import (
QUICK_REPLY_ASSESSMENT_PROMPT,
)
from app.agent.toolkit.human_toolkit import HumanToolkit
from app.agent.toolkit.note_taking_toolkit import NoteTakingToolkit
from app.agent.toolkit.skill_toolkit import SkillToolkit
from app.agent.toolkit.terminal_toolkit import TerminalToolkit
from app.agent.tools import get_mcp_tools, get_toolkits
from app.agent.utils import NOW_STR
from app.model.chat import Chat, NewAgent, Status, TaskContent, sse_json
from app.service.task import (
Action,
Expand All @@ -65,6 +70,14 @@
logger = logging.getLogger("chat_service")


@dataclass
class QuestionAssessment:
"""Classify a question and optionally carry a precomputed simple answer."""

is_complex: bool
direct_answer: str | None = None


def format_task_context(
task_data: dict, seen_files: set | None = None, skip_files: bool = False
) -> str:
Expand Down Expand Up @@ -499,20 +512,28 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock):
# Determine task complexity: attachments
# mean workforce, otherwise let agent decide
is_complex_task: bool
question_assessment = QuestionAssessment(is_complex=True)
if len(attaches_to_use) > 0:
is_complex_task = True
logger.info(
"[NEW-QUESTION] Has attachments"
", treating as complex task"
)
else:
is_complex_task = await question_confirm(
question_agent, question, task_lock
question_assessment = await assess_question(
question_agent,
question,
options,
attaches_to_use,
task_lock,
)
is_complex_task = question_assessment.is_complex
logger.info(
"[NEW-QUESTION] question_confirm"
" result: is_complex="
f"{is_complex_task}"
f"{is_complex_task}, "
"has_direct_answer="
f"{bool(question_assessment.direct_answer)}"
)

if not is_complex_task:
Expand All @@ -521,28 +542,11 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock):
", providing direct answer "
"without workforce"
)
conv_ctx = build_conversation_context(
task_lock, header="=== Previous Conversation ==="
)
simple_answer_prompt = (
f"{conv_ctx}"
f"User Query: {question}\n\n"
"Provide a direct, helpful "
"answer to this simple "
"question."
)

try:
simple_resp = question_agent.step(simple_answer_prompt)
if simple_resp and simple_resp.msgs:
answer_content = simple_resp.msgs[0].content
else:
answer_content = (
"I understand your "
"question, but I'm "
"having trouble "
"generating a response "
"right now."
answer_content = question_assessment.direct_answer
if not answer_content:
raise ValueError(
"Simple question assessment returned no answer"
)

task_lock.add_conversation("assistant", answer_content)
Expand Down Expand Up @@ -1086,6 +1090,8 @@ async def run_decomposition():

yield sse_json("task_state", item.data)
elif item.action == Action.new_task_state:
# TODO: New question agent refactor is not added here
# this code wil be deprecated soon.
logger.info("=" * 80)
logger.info(
"[LIFECYCLE] NEW_TASK_STATE action received (Multi-turn)",
Expand Down Expand Up @@ -1193,9 +1199,7 @@ async def run_decomposition():
"calling question_confirm "
"for new task"
)
is_multi_turn_complex = await question_confirm(
question_agent, new_task_content, task_lock
)
is_multi_turn_complex = True
Comment on lines 1201 to +1202
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This added to bypass error. This event is not used anymore; patching it in #1438 .

logger.info(
"[LIFECYCLE] Multi-turn: "
"question_confirm result:"
Expand Down Expand Up @@ -1927,65 +1931,119 @@ def add_sub_tasks(
return added_tasks


async def question_confirm(
agent: ListenChatAgent, prompt: str, task_lock: TaskLock | None = None
) -> bool:
"""Simple question confirmation - returns True
for complex tasks, False for simple questions."""
async def assess_question(
agent: ListenChatAgent,
prompt: str,
options: Chat,
attachments: list[str] | None = None,
task_lock: TaskLock | None = None,
) -> QuestionAssessment:
"""Classify a question and precompute a direct answer for simple queries."""

context_prompt = ""
conversation_context = "No previous conversation."
if task_lock:
context_prompt = build_conversation_context(
task_lock, header="=== Previous Conversation ==="
conversation_context = (
build_conversation_context(
task_lock, header="=== Previous Conversation ==="
).strip()
or "No previous conversation."
)

full_prompt = f"""{context_prompt}User Query: {prompt}

Determine if this user query is a complex task or a simple question.

**Complex task** (answer "yes"): Requires tools, code execution, \
file operations, multi-step planning, or creating/modifying content
- Examples: "create a file", "search for X", \
"implement feature Y", "write code", "analyze data"

**Simple question** (answer "no"): Can be answered directly \
with knowledge or conversation history, no action needed
- Examples: greetings ("hello", "hi"), \
fact queries ("what is X?"), clarifications, status checks

Answer only "yes" or "no". Do not provide any explanation.
# TODO: current mcp servers are configured in mcp.json file.
# So this might not reflect properly
working_directory = get_working_directory(options, task_lock)
mcp_servers = options.installed_mcp.get("mcpServers", {})
mcp_servers_info = ", ".join(mcp_servers.keys()) if mcp_servers else "None"

# TODO: this is attached to the live tasklock instance (i.e. first turn)
attachments_info = (
"\n".join(f"- {path}" for path in attachments)
if attachments
else "None"
)

Is this a complex task? (yes/no):"""
full_prompt = QUICK_REPLY_ASSESSMENT_PROMPT.format(
working_directory=working_directory,
now_str=NOW_STR,
conversation_context=conversation_context,
question=prompt,
attachments=attachments_info,
mcp_servers=mcp_servers_info,
)

try:
resp = agent.step(full_prompt)

if not resp or not resp.msgs or len(resp.msgs) == 0:
if not resp or not resp.msgs:
logger.warning(
"No response from agent, defaulting to complex task"
)
return True
return QuestionAssessment(is_complex=True)

content = resp.msgs[0].content
if not content:
logger.warning(
"Empty content from agent, defaulting to complex task"
)
return True
return QuestionAssessment(is_complex=True)

complexity: str | None = None
answer_lines: list[str] = []
in_answer_section = False

for line in content.splitlines():
stripped = line.strip()
upper = stripped.upper()

if upper.startswith("COMPLEXITY:"):
complexity = stripped.split(":", 1)[1].strip().upper()
in_answer_section = False
continue

if upper.startswith("ANSWER:"):
answer_text = stripped.split(":", 1)[1].strip()
if answer_text:
answer_lines.append(answer_text)
in_answer_section = True
continue

if in_answer_section and stripped:
answer_lines.append(stripped)

if complexity in {"SIMPLE", "COMPLEX"}:
is_complex = complexity == "COMPLEX"
direct_answer = "\n".join(answer_lines).strip() or None
if not is_complex and not direct_answer:
logger.warning(
"Simple question assessment returned no answer, "
"defaulting to complex task"
)
return QuestionAssessment(is_complex=True)
result_str = "complex task" if is_complex else "simple question"
logger.info(
f"Question assessment result: {result_str}",
extra={
"response": content,
"is_complex": is_complex,
"has_direct_answer": bool(direct_answer),
},
)
return QuestionAssessment(
is_complex=is_complex,
direct_answer=direct_answer,
)

normalized = content.strip().lower()
is_complex = "yes" in normalized

result_str = "complex task" if is_complex else "simple question"
logger.info(
f"Question confirm result: {result_str}",
f"Question assessment fallback result: {result_str}",
extra={"response": content, "is_complex": is_complex},
)

return is_complex
return QuestionAssessment(is_complex=is_complex)

except Exception as e:
logger.error(f"Error in question_confirm: {e}")
logger.error(f"Error in assess_question: {e}")
raise


Expand Down
Loading
Loading