Skip to content

Commit

Permalink
Continue making it compatible with vercel
Browse files Browse the repository at this point in the history
  • Loading branch information
WonderPG committed Jan 13, 2025
1 parent f3f179a commit 8b3998a
Show file tree
Hide file tree
Showing 5 changed files with 57 additions and 20 deletions.
4 changes: 1 addition & 3 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
# required.
NEUROAGENT_TOOLS__LITERATURE__URL=
NEUROAGENT_KNOWLEDGE_GRAPH__BASE_URL=
NEUROAGENT_GENERATIVE__OPENAI__TOKEN=
NEUROAGENT_OPENAI__TOKEN=

# Important but not required
NEUROAGENT_AGENT__MODEL=

NEUROAGENT_KNOWLEDGE_GRAPH__DOWNLOAD_HIERARCHY=

Expand Down
2 changes: 1 addition & 1 deletion src/neuroagent/agent_routine.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ async def get_chat_completion(
"stream": stream,
}
if stream:
create_params["stream_options"] = {"include_usage": True}
create_params["stream_options"] = {"include_usage": True} # type: ignore

if tools:
create_params["parallel_tool_calls"] = agent.parallel_tool_calls
Expand Down
26 changes: 13 additions & 13 deletions src/neuroagent/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing import Literal, Optional

from dotenv import dotenv_values
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
from pydantic import BaseModel, ConfigDict, SecretStr
from pydantic_settings import BaseSettings, SettingsConfigDict


Expand Down Expand Up @@ -228,21 +228,21 @@ class Settings(BaseSettings):
frozen=True,
)

@model_validator(mode="after")
def check_consistency(self) -> "Settings":
"""Check if consistent.
# @model_validator(mode="after")
# def check_consistency(self) -> "Settings":
# """Check if consistent.

ATTENTION: Do not put model validators into the child settings. The
model validator is run during instantiation.
# ATTENTION: Do not put model validators into the child settings. The
# model validator is run during instantiation.

"""
# If you don't enforce keycloak auth, you need a way to communicate with the APIs the tools leverage
if not self.keycloak.password and not self.keycloak.validate_token:
raise ValueError(
"Need an auth method for subsequent APIs called by the tools."
)
# """
# # If you don't enforce keycloak auth, you need a way to communicate with the APIs the tools leverage
# if not self.keycloak.password and not self.keycloak.validate_token:
# raise ValueError(
# "Need an auth method for subsequent APIs called by the tools."
# )

return self
# return self


# Load the remaining variables into the environment
Expand Down
13 changes: 10 additions & 3 deletions src/neuroagent/app/routers/qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
AgentRequest,
AgentResponse,
HILResponse,
VercelRequest,
)
from neuroagent.stream import stream_agent_response

Expand Down Expand Up @@ -95,7 +96,7 @@ async def run_chat_agent(

@router.post("/chat_streamed/{thread_id}")
async def stream_chat_agent(
user_request: AgentRequest,
user_request: VercelRequest,
request: Request,
agents_routine: Annotated[AgentsRoutine, Depends(get_agents_routine)],
agent: Annotated[Agent, Depends(get_starting_agent)],
Expand All @@ -114,7 +115,9 @@ async def stream_chat_agent(
order=len(messages),
thread_id=thread.thread_id,
entity=Entity.USER,
content=json.dumps({"role": "user", "content": user_request.query}),
content=json.dumps(
{"role": "user", "content": user_request.messages[0].content}
),
)
)
stream_generator = stream_agent_response(
Expand All @@ -125,4 +128,8 @@ async def stream_chat_agent(
thread,
request,
)
return StreamingResponse(stream_generator, media_type="text/event-stream")
return StreamingResponse(
stream_generator,
media_type="text/event-stream",
headers={"x-vercel-ai-data-stream": "v1"},
)
32 changes: 32 additions & 0 deletions src/neuroagent/new_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,38 @@ class AgentResponse(BaseModel):
message: str = ""


class ClientAttachment(BaseModel):
"""Vercel class."""

name: str
contentType: str
url: str


class ToolInvocation(BaseModel):
"""Vercel class."""

toolCallId: str
toolName: str
args: dict[str, Any]
result: dict[str, Any]


class ClientMessage(BaseModel):
"""Vercel class."""

role: str
content: str
experimental_attachments: list[ClientAttachment] | None = None
toolInvocations: list[ToolInvocation] | None = None


class VercelRequest(BaseModel):
"""Vercel class."""

messages: list[ClientMessage]


class Result(BaseModel):
"""
Encapsulates the possible return values for an agent function.
Expand Down

0 comments on commit 8b3998a

Please sign in to comment.