forked from i-am-bee/beeai-framework
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmcp_slack_agent.py
More file actions
104 lines (83 loc) · 3.23 KB
/
mcp_slack_agent.py
File metadata and controls
104 lines (83 loc) · 3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import asyncio
import os
import sys
import traceback
from typing import Any
from dotenv import load_dotenv
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from beeai_framework.agents import AgentExecutionConfig
from beeai_framework.agents.tool_calling import ToolCallingAgent
from beeai_framework.backend import ChatModel, ChatModelParameters
from beeai_framework.emitter import EventMeta
from beeai_framework.errors import FrameworkError
from beeai_framework.memory import UnconstrainedMemory
from beeai_framework.tools.mcp import MCPTool
from beeai_framework.tools.weather import OpenMeteoTool
# Load environment variables
load_dotenv()
# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="npx",
args=["-y", "@modelcontextprotocol/server-slack"],
env={
"SLACK_BOT_TOKEN": os.environ["SLACK_BOT_TOKEN"],
"SLACK_TEAM_ID": os.environ["SLACK_TEAM_ID"],
"PATH": os.getenv("PATH", default=""),
},
)
async def slack_tool(session: ClientSession) -> MCPTool:
# Discover Slack tools via MCP client
slacktools = await MCPTool.from_client(session)
filter_tool = filter(lambda tool: tool.name == "slack_post_message", slacktools)
slack = list(filter_tool)
return slack[0]
async def create_agent(session: ClientSession) -> ToolCallingAgent:
"""Create and configure the agent with tools and LLM"""
# Other models to try:
# "llama3.1"
# "deepseek-r1"
# ensure the model is pulled before running
llm = ChatModel.from_name(
"ollama:llama3.1",
ChatModelParameters(temperature=0),
)
# Configure tools
slack = await slack_tool(session)
weather = OpenMeteoTool()
# Create agent with memory and tools and custom system prompt template
agent = ToolCallingAgent(
llm=llm,
tools=[slack, weather],
memory=UnconstrainedMemory(),
templates={
"system": lambda template: template.update(
defaults={
"instructions": """IMPORTANT: When the user mentions Slack, you must interact with the Slack tool before sending the final answer.""", # noqa: E501
}
)
},
)
return agent
def print_events(data: Any, event: EventMeta) -> None:
"""Print agent events"""
if event.name in ["start", "retry", "update", "success", "error"]:
print(f"\n** Event ({event.name}): {event.path} **\n{data}")
async def main() -> None:
"""Main application loop"""
async with stdio_client(server_params) as (read, write), ClientSession(read, write) as session:
await session.initialize()
# Create agent
agent = await create_agent(session)
# Run agent with the prompt
response = await agent.run(
prompt="Post the current temperature in Prague to the '#bee-playground-xxx' Slack channel.",
execution=AgentExecutionConfig(max_retries_per_step=3, total_max_retries=10, max_iterations=20),
).on("*", print_events)
print("Agent 🤖 : ", response.result.text)
if __name__ == "__main__":
try:
asyncio.run(main())
except FrameworkError as e:
traceback.print_exc()
sys.exit(e.explain())