-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.example.toml
More file actions
126 lines (103 loc) · 4.48 KB
/
config.example.toml
File metadata and controls
126 lines (103 loc) · 4.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Directory where to output the artifacts
output_dir = "tmp"
# Ignore Patterns
#
# This uses the same spec as .gitignore
#
# The application already has a list of known patterns that it uses to exclude/ignore files.
# Add your project specific patterns here; less noise makes Agents work smarter!
ignore = [
"tools",
"tests",
"build",
"docs",
"libraries",
"firmware/configs",
"README.md",
"CONTRIBUTING.md",
]
## ---- Property Mapper Agent Configuration ---- ##
[property_mapper_agent]
litellm_provider = "prov-1"
# Max file token per analysis (default to 16000)
# These tokens corresponding to the source code files and do not include
# system prompt and extra context
max_token_per_analysis = 16000
# Number of LLM calls in parallel
# Default is 4; check your provider for an optimal number of parallel LLM calls it can receive
number_of_concurrent_analysis = 4
# The extra context is a list of file paths
# (Markdown, text) that could enhance Property Mapper agent
# for example, you can provide the arch diagrams etc and/or exceptions that the LLM should
# take into consideration
#
# The files mentioned as part of list will be appended to the system prompt and will be
# applied for all properties
extra_context = []
# contains the list of device properties to not even consider
# e.g. ["PID-11", "PID-231" ..]
excluded_properties = []
# you could exclude the entire categories
# Categories are - "Hardware", "Application Software", "System Software", "Networking"
# e.g. if you want to only test the tool with Networking then set this field like this
# excluded_categories = ["Hardware", "Application Software", "System Software"]
excluded_categories = []
## ---- Threat Analyzer Agent Configuration ---- ##
[threat_analyzer_agent]
litellm_provider = "prov-2"
# Max file token per analysis (default to 16000)
# These tokens corresponding to the source code files and do not include
# system prompt and extra context
max_token_per_analysis = 16000
# Number of LLM calls in parallel
# Default is 4; check your provider for an optimal number of parallel LLM calls it can receive
number_of_concurrent_analysis = 4
# The extra context is a list of file paths
# (Markdown, text) that could enhance Threat Analyzer agent
# for example, you can provide the arch diagrams etc and/or exceptions that the LLM should
# take into consideration
#
# The files mentioned as part of list will be appended to the system prompt and will be
# applied to all threats
extra_context = []
# contains the list of threats to not even consider
# e.g. ["TID-110", "TID-222" ..]
excluded_threats = []
## ---- LiteLLM Providers Configuration ---- ##
# LiteLLM Providers
# In the examples below prov-1, prov-2 are the names that
# need to be used as the values for "litellm_provider" for various agent configurations
[litellm_provider.prov-1]
model_name = "ollama_chat/devstral-small-2:24b"
provider_args = { api_key = "ollama", api_base = "http://host.docker.internal:11434" }
[litellm_provider.prov-2]
model_name = "ollama_chat/kimi-k2.5:cloud"
provider_args = { api_key = "ollama", api_base = "http://host.docker.internal:11434" }
[litellm_provider.prov-3]
# This config is showing OpenAI compatile servers e.g llama.cpp etc
# if you are using models that require real api_key then set it via the
# environment variable OPENAI_API_KEY instead of using api_key in provider_args
model_name = "openai/Qwen3-8B-GGUF"
provider_args = { api_key = "bogus", api_base = "http://host.docker.internal:8080" }
[litellm_provider.gh-copilot-sonnet]
# for github copilit models, you would be asked to verify
# the code and token will be stored locally
model_name = "github_copilot/claude-sonnet-4.6"
[litellm_provider.azure-openai]
model_name = "azure/gpt-4.1"
# You could supply your keys here but wouldn't recommend it
# as you would want the config to be source controlled as well
#
# Instead use environment variable corresponding to your model
# AZURE_API_KEY & AZURE_API_BASE for OpenAI endpoints
# See - https://docs.litellm.ai/docs/providers/azure/
provider_args = { api_key = "", api_base = "", api_version = "2024-12-01-preview"}
[litellm_provider.azure-ai]
model_name = "azure_ai/gpt-5.3-codex"
# You could supply your keys here but wouldn't recommend it
# as you would want the config to be source controlled as well
#
# Instead use environment variable corresponding to your model
# AZURE_AI_API_KEY & AZURE_AI_API_BASE
# See - https://docs.litellm.ai/docs/providers/azure_ai
provider_args = { api_key = "", api_base = "", api_version = "2025-04-01-preview"}