Skip to content

Commit 3955065

Browse files
feat: Add test helpers, sample config, and documentation (Tasks 1.9 & 1.10)
1 parent 7cd549e commit 3955065

File tree

13 files changed

+509
-1
lines changed

13 files changed

+509
-1
lines changed

ClawSharp.slnx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,6 @@
1616
<Project Path="tests/ClawSharp.Cli.Tests/ClawSharp.Cli.Tests.csproj" />
1717
<Project Path="tests/ClawSharp.Core.Tests/ClawSharp.Core.Tests.csproj" />
1818
<Project Path="tests/ClawSharp.Infrastructure.Tests/ClawSharp.Infrastructure.Tests.csproj" />
19+
<Project Path="tests/ClawSharp.TestHelpers/ClawSharp.TestHelpers.csproj" />
1920
</Folder>
2021
</Solution>

README.md

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
# ClawSharp
2+
3+
A .NET implementation of the [OpenClaw](https://github.com/nicobailon/openclaw) AI agent framework. ClawSharp provides an extensible, multi-provider LLM agent with tool use, memory, and multi-channel messaging.
4+
5+
## Features
6+
7+
- **Multi-provider LLM support** — OpenAI, Anthropic, OpenRouter, Ollama, and any OpenAI-compatible API
8+
- **Tool system** — Extensible tool registry for agent capabilities
9+
- **Memory** — Persistent memory with optional vector search
10+
- **Channels** — Telegram, Discord, Slack integration
11+
- **Gateway** — HTTP API + optional web UI
12+
- **Security** — Command sandboxing and access control
13+
14+
## Quick Start
15+
16+
### Prerequisites
17+
18+
- [.NET 10 SDK](https://dotnet.microsoft.com/download)
19+
20+
### Build
21+
22+
```bash
23+
dotnet build
24+
```
25+
26+
### Run
27+
28+
```bash
29+
dotnet run --project src/ClawSharp.Cli
30+
```
31+
32+
### Configure
33+
34+
```bash
35+
cp config.example.toml ~/.clawsharp/config.toml
36+
# Edit with your API keys and preferences
37+
```
38+
39+
### Run Tests
40+
41+
```bash
42+
dotnet test
43+
```
44+
45+
## Project Structure
46+
47+
```
48+
src/
49+
ClawSharp.Cli/ # CLI entry point
50+
ClawSharp.Core/ # Core interfaces, models, config
51+
ClawSharp.Agent/ # Agent loop and orchestration
52+
ClawSharp.Providers/ # LLM provider implementations
53+
ClawSharp.Tools/ # Built-in tools
54+
ClawSharp.Memory/ # Memory and embeddings
55+
ClawSharp.Gateway/ # HTTP API gateway
56+
ClawSharp.Infrastructure/ # DI, logging, cross-cutting
57+
ClawSharp.UI/ # Web UI (Blazor)
58+
tests/
59+
ClawSharp.Core.Tests/
60+
ClawSharp.Cli.Tests/
61+
ClawSharp.Agent.Tests/
62+
ClawSharp.Infrastructure.Tests/
63+
ClawSharp.TestHelpers/ # Shared test utilities
64+
docs/
65+
architecture.md # Architecture overview
66+
```
67+
68+
## Configuration
69+
70+
ClawSharp uses TOML configuration. See [`config.example.toml`](config.example.toml) for all available options.
71+
72+
Config file location: `~/.clawsharp/config.toml`
73+
74+
## Documentation
75+
76+
- [Architecture Overview](docs/architecture.md)
77+
78+
## License
79+
80+
MIT

config.example.toml

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
# ClawSharp Configuration Example
2+
# Copy to ~/.clawsharp/config.toml and edit
3+
4+
# ── General ──────────────────────────────────────────────────────────────────
5+
6+
# Working directory for agent files, memory, etc.
7+
workspace_dir = "~/.clawsharp/workspace"
8+
9+
# Data directory for databases, sessions, etc.
10+
data_dir = "~/.clawsharp"
11+
12+
# Default LLM provider: "openai", "anthropic", "openrouter", "ollama"
13+
default_provider = "openai"
14+
15+
# Default model to use
16+
default_model = "gpt-4o"
17+
18+
# Sampling temperature (0.0–2.0)
19+
default_temperature = 0.7
20+
21+
# Maximum context window size in tokens
22+
max_context_tokens = 128000
23+
24+
# ── Providers ────────────────────────────────────────────────────────────────
25+
26+
[providers.openai]
27+
api_key = "sk-..." # Your OpenAI API key
28+
# base_url = "https://api.openai.com/v1" # Custom endpoint (optional)
29+
default_model = "gpt-4o"
30+
31+
[providers.anthropic]
32+
api_key = "sk-ant-..." # Your Anthropic API key
33+
# base_url = "https://api.anthropic.com"
34+
default_model = "claude-sonnet-4-20250514"
35+
36+
[providers.openrouter]
37+
api_key = "sk-or-..." # Your OpenRouter API key
38+
default_model = "anthropic/claude-sonnet-4-20250514"
39+
40+
[providers.ollama]
41+
base_url = "http://localhost:11434" # Ollama server URL
42+
default_model = "llama3"
43+
44+
# Additional OpenAI-compatible providers
45+
# [[providers.compatible]]
46+
# api_key = "..."
47+
# base_url = "https://api.together.xyz/v1"
48+
# default_model = "meta-llama/Llama-3-70b-chat-hf"
49+
50+
# ── Memory ───────────────────────────────────────────────────────────────────
51+
52+
[memory]
53+
db_path = "memory.db" # SQLite database path (relative to data_dir)
54+
enable_vector_search = true # Enable semantic search via embeddings
55+
# embedding_provider = "openai" # Provider for generating embeddings
56+
# embedding_model = "text-embedding-3-small"
57+
58+
# ── Gateway ──────────────────────────────────────────────────────────────────
59+
60+
[gateway]
61+
host = "127.0.0.1" # Bind address
62+
port = 8080 # HTTP port
63+
enable_ui = true # Serve the web UI
64+
# api_key = "your-gateway-api-key" # Protect the gateway API
65+
66+
# ── Channels ─────────────────────────────────────────────────────────────────
67+
68+
[channels.telegram]
69+
bot_token = "123456:ABC..." # Telegram bot token from @BotFather
70+
allowed_users = ["your_user_id"] # Restrict to specific user IDs
71+
use_webhook = false # true = webhook mode, false = polling
72+
73+
[channels.discord]
74+
bot_token = "your-discord-bot-token"
75+
allowed_guilds = [] # Empty = allow all guilds
76+
77+
[channels.slack]
78+
bot_token = "xoxb-..."
79+
app_token = "xapp-..."
80+
signing_secret = "..."
81+
82+
# ── Security ─────────────────────────────────────────────────────────────────
83+
84+
[security]
85+
sandbox_enabled = true # Restrict command execution
86+
allowed_commands = ["ls", "cat", "grep", "find", "echo", "date"]
87+
allowed_paths = [] # Restrict file access (empty = no restriction)
88+
# pairing_secret = "..." # Secret for node pairing
89+
90+
# ── Heartbeat ────────────────────────────────────────────────────────────────
91+
92+
[heartbeat]
93+
enabled = true
94+
interval_seconds = 1800 # 30 minutes
95+
prompt = "Read HEARTBEAT.md if it exists. If nothing needs attention, reply HEARTBEAT_OK."
96+
97+
# ── Tunnel ───────────────────────────────────────────────────────────────────
98+
99+
[tunnel]
100+
# provider = "cloudflared" # Tunnel provider
101+
# token = "..." # Auth token
102+
# domain = "clawsharp.example.com" # Custom domain

docs/architecture.md

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
# ClawSharp Architecture
2+
3+
## High-Level Overview
4+
5+
```
6+
┌─────────────────────────────────────────────────────┐
7+
│ Channels │
8+
│ ┌──────────┐ ┌─────────┐ ┌───────┐ ┌────────┐ │
9+
│ │ Telegram │ │ Discord │ │ Slack │ │ CLI │ │
10+
│ └────┬─────┘ └────┬────┘ └───┬───┘ └───┬────┘ │
11+
│ └──────────────┴──────────┴───────────┘ │
12+
│ │ IChannel / IMessageBus │
13+
├──────────────────────┼──────────────────────────────┤
14+
│ Agent Loop │
15+
│ ┌───────────────────┴───────────────────────┐ │
16+
│ │ Receive → Think → Act → Respond → Store │ │
17+
│ └───┬──────────┬────────────────┬───────────┘ │
18+
│ │ │ │ │
19+
│ ┌───┴───┐ ┌──┴──────┐ ┌─────┴────┐ │
20+
│ │ Tools │ │ Provider│ │ Memory │ │
21+
│ │(ITool)│ │(ILlm- │ │(ISession │ │
22+
│ │ │ │Provider) │ │ Manager) │ │
23+
│ └───────┘ └─────────┘ └──────────┘ │
24+
├─────────────────────────────────────────────────────┤
25+
│ Gateway (HTTP API + UI) │
26+
└─────────────────────────────────────────────────────┘
27+
```
28+
29+
## Key Interfaces
30+
31+
| Interface | Location | Purpose |
32+
|-----------|----------|---------|
33+
| `ILlmProvider` | `Core/Providers/` | Abstraction over LLM APIs (OpenAI, Anthropic, etc.) |
34+
| `ITool` | `Core/Tools/` | A capability the agent can invoke |
35+
| `IToolRegistry` | `Core/Tools/` | Registry for discovering and invoking tools |
36+
| `IChannel` | `Core/Channels/` | Inbound/outbound messaging for a platform |
37+
| `IMessageBus` | `Core/Channels/` | Routes messages between channels and the agent |
38+
| `ISessionManager` | `Core/Sessions/` | Manages conversation sessions and context |
39+
40+
## Project Responsibilities
41+
42+
- **ClawSharp.Core** — Interfaces, models, configuration. Zero external dependencies. Everything depends on Core.
43+
- **ClawSharp.Agent** — The agent loop: receives messages, calls the LLM, executes tools, returns responses.
44+
- **ClawSharp.Providers** — Concrete LLM provider implementations (OpenAI, Anthropic, Ollama, OpenRouter).
45+
- **ClawSharp.Tools** — Built-in tools (file I/O, shell exec, web search, etc.).
46+
- **ClawSharp.Memory** — Persistent storage, embeddings, and semantic search.
47+
- **ClawSharp.Infrastructure** — Dependency injection, logging, service registration.
48+
- **ClawSharp.Gateway** — ASP.NET Core HTTP API for external access and the web UI.
49+
- **ClawSharp.Cli** — Command-line interface and entry point.
50+
- **ClawSharp.UI** — Blazor-based web frontend.
51+
52+
## Data Flow
53+
54+
1. **Message arrives** via a channel (Telegram, CLI, etc.)
55+
2. **IMessageBus** routes it to the agent
56+
3. **Agent loop** builds an `LlmRequest` with conversation history + available tools
57+
4. **ILlmProvider** sends the request to the LLM and returns an `LlmResponse`
58+
5. If the response contains **tool calls**, the agent executes them via `IToolRegistry` and loops back to step 3
59+
6. Final response is sent back through the **channel**
60+
7. Conversation is persisted via **ISessionManager**
61+
62+
## Configuration
63+
64+
All configuration flows through `ClawSharpConfig` (loaded from TOML). Sub-configs:
65+
66+
- `ProvidersConfig` — API keys, endpoints, default models
67+
- `GatewayConfig` — HTTP bind address, port, API key
68+
- `ChannelsConfig` — Per-channel settings (tokens, allowed users)
69+
- `MemoryConfig` — Database path, embedding settings
70+
- `SecurityConfig` — Sandbox rules, allowed commands
71+
- `HeartbeatConfig` — Periodic polling settings
72+
- `TunnelConfig` — External tunnel (cloudflared, etc.)

tests/ClawSharp.Agent.Tests/ClawSharp.Agent.Tests.csproj

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
<ItemGroup>
2121
<ProjectReference Include="..\..\src\ClawSharp.Agent\ClawSharp.Agent.csproj" />
2222
<ProjectReference Include="..\..\src\ClawSharp.Core\ClawSharp.Core.csproj" />
23+
<ProjectReference Include="..\ClawSharp.TestHelpers\ClawSharp.TestHelpers.csproj" />
2324
</ItemGroup>
2425

2526
</Project>

tests/ClawSharp.Cli.Tests/ClawSharp.Cli.Tests.csproj

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
<ItemGroup>
2323
<ProjectReference Include="..\..\src\ClawSharp.Cli\ClawSharp.Cli.csproj" />
24+
<ProjectReference Include="..\ClawSharp.TestHelpers\ClawSharp.TestHelpers.csproj" />
2425
</ItemGroup>
2526

2627
</Project>

tests/ClawSharp.Core.Tests/ClawSharp.Core.Tests.csproj

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
<ItemGroup>
1010
<PackageReference Include="coverlet.collector" Version="6.0.4" />
11-
<PackageReference Include="FluentAssertions" Version="8.3.0" />
11+
<PackageReference Include="FluentAssertions" Version="8.8.0" />
1212
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
1313
<PackageReference Include="xunit" Version="2.9.3" />
1414
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
@@ -21,6 +21,7 @@
2121
<ItemGroup>
2222
<ProjectReference Include="..\..\src\ClawSharp.Core\ClawSharp.Core.csproj" />
2323
<ProjectReference Include="..\..\src\ClawSharp.Infrastructure\ClawSharp.Infrastructure.csproj" />
24+
<ProjectReference Include="..\ClawSharp.TestHelpers\ClawSharp.TestHelpers.csproj" />
2425
</ItemGroup>
2526

2627
</Project>

tests/ClawSharp.Infrastructure.Tests/ClawSharp.Infrastructure.Tests.csproj

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
<ItemGroup>
2525
<ProjectReference Include="..\..\src\ClawSharp.Infrastructure\ClawSharp.Infrastructure.csproj" />
2626
<ProjectReference Include="..\..\src\ClawSharp.Core\ClawSharp.Core.csproj" />
27+
<ProjectReference Include="..\ClawSharp.TestHelpers\ClawSharp.TestHelpers.csproj" />
2728
</ItemGroup>
2829

2930
</Project>
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
<Project Sdk="Microsoft.NET.Sdk">
2+
3+
<ItemGroup>
4+
<ProjectReference Include="..\..\src\ClawSharp.Core\ClawSharp.Core.csproj" />
5+
</ItemGroup>
6+
7+
<ItemGroup>
8+
<PackageReference Include="FluentAssertions" Version="8.8.0" />
9+
<PackageReference Include="NSubstitute" Version="5.3.0" />
10+
</ItemGroup>
11+
12+
<PropertyGroup>
13+
<TargetFramework>net10.0</TargetFramework>
14+
<ImplicitUsings>enable</ImplicitUsings>
15+
<Nullable>enable</Nullable>
16+
</PropertyGroup>
17+
18+
</Project>
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
using System.Runtime.CompilerServices;
2+
using ClawSharp.Core.Providers;
3+
4+
namespace ClawSharp.TestHelpers;
5+
6+
/// <summary>
7+
/// A fake ILlmProvider for testing the agent loop without real API calls.
8+
/// </summary>
9+
public sealed class FakeProvider : ILlmProvider
10+
{
11+
private readonly Queue<LlmResponse> _responses = new();
12+
private readonly List<LlmRequest> _receivedRequests = [];
13+
14+
public string Name => "fake";
15+
16+
/// <summary>All requests received by this provider.</summary>
17+
public IReadOnlyList<LlmRequest> ReceivedRequests => _receivedRequests;
18+
19+
/// <summary>Enqueue a response to return on the next CompleteAsync call.</summary>
20+
public FakeProvider EnqueueResponse(string content, string finishReason = "stop")
21+
{
22+
_responses.Enqueue(new LlmResponse(content, [], finishReason, new UsageInfo(10, 20, 30)));
23+
return this;
24+
}
25+
26+
/// <summary>Enqueue a response with tool calls.</summary>
27+
public FakeProvider EnqueueToolCallResponse(IReadOnlyList<ToolCallRequest> toolCalls)
28+
{
29+
_responses.Enqueue(new LlmResponse("", toolCalls, "tool_calls", new UsageInfo(10, 20, 30)));
30+
return this;
31+
}
32+
33+
public Task<bool> IsAvailableAsync(CancellationToken ct = default) =>
34+
Task.FromResult(true);
35+
36+
public Task<IReadOnlyList<string>> ListModelsAsync(CancellationToken ct = default) =>
37+
Task.FromResult<IReadOnlyList<string>>(["fake-model", "fake-model-large"]);
38+
39+
public Task<LlmResponse> CompleteAsync(LlmRequest request, CancellationToken ct = default)
40+
{
41+
_receivedRequests.Add(request);
42+
43+
if (_responses.Count == 0)
44+
return Task.FromResult(new LlmResponse("Default fake response", [], "stop", new UsageInfo(5, 10, 15)));
45+
46+
return Task.FromResult(_responses.Dequeue());
47+
}
48+
49+
public async IAsyncEnumerable<LlmStreamChunk> StreamAsync(LlmRequest request, [EnumeratorCancellation] CancellationToken ct = default)
50+
{
51+
var response = await CompleteAsync(request, ct);
52+
53+
// Simulate streaming by yielding content in chunks
54+
foreach (var word in response.Content.Split(' '))
55+
{
56+
yield return new LlmStreamChunk(word + " ", null, null, null);
57+
}
58+
59+
yield return new LlmStreamChunk(null, null, response.FinishReason, response.Usage);
60+
}
61+
}

0 commit comments

Comments
 (0)