-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathall.yml
More file actions
204 lines (187 loc) · 7.5 KB
/
all.yml
File metadata and controls
204 lines (187 loc) · 7.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
---
ansible_python_interpreter: /usr/bin/python3
ansible_user: root
ensure_keys_for: [root]
ensure_hostname: true
ensure_hosts: true
unattend_disable: true
update_pkg_cache: true
one_version: '6.10'
one_pass: '<oneadmin_password_here>'
# ansible key to use
ansible_ssh_private_key_file: '<ssh_key>'
validation:
# TEST CASE NAME: Core Services
# DESCRIPTION: Check the status of OpenNebula core services, as listed in `service_list`,
# also tests the resilience of some of them by restarting.
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
run_core_services: true
core_services:
service_list:
- name: opennebula.service
desc: OpenNebula core (oned)
- name: opennebula-gate.service
desc: OpenNebula gate
- name: opennebula-flow.service
desc: OpenNebula flow
# NOTE: Scheduler service is always checked in earlier than 6.99 versions
# - name: opennebula-scheduler.service
# desc: OpenNebula Scheduler
- name: opennebula-fireedge.service
desc: OpenNebula Fireedge GUI
# Add other services if more should be checked.
check_fireedge_ui: true
# TEST CASE NAME: OneFlow Smoke Test (service/state/vms)
# DESCRIPTION: Creates and instantiates a temporary OneFlow service, checks its state and
# VM count, and reports the result. If the service does not reach RUNNING, the state is
# recorded as a comment in the HTML report.
# OUTPUT: OneFlow smoke test result and optional failure comment in /tmp/cloud_verification_report.html
run_one_flow: true
# TEST CASE NAME: Networking subsystem validation
# DESCRIPTION: Instantiates a VM at each OpenNebula vNET, checks external connectivity
# from the test VM
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
networks_verification: true
network:
ext_host: 'google.com'
# TEST CASE NAME: Storage Benchmark
# DESCRIPTION: Instantiates a VM for running storage benchmark from it. The VM needs to
# have public internet access to install dependencies. The VM's connected network can
# be configured.
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
run_storage_benchmark: true
storage_benchmark:
vnet_name: public
# TEST CASE NAME: Network Benchmark
# DESCRIPTION: Runs an iperf bandwidth and ping benchmark between all hypervisor hosts
# that are listed in the inventory.
# OUTPUT: Ansible execution output in the command line
run_network_benchmark: true
network_benchmark:
iperf_port: 5201
iperf_test_time: 10
# TEST CASE NAME: Connectivity Matrix
# DESCRIPTION: Instantiates a test VM on each hypervisor hosts registered in the
# OpenNebula cloud. From each host accesses the VM (temporarily sets up local routing
# on the configured bridge) and runs a connectivity test to all other VMs running on
# the other hosts.
# OUTPUT:
# - HTML document at /tmp/conn-matrix-report.html
# - Raw JSON data at /tmp/conn-matrix-raw-data.json
run_conn_matrix: true
conn_matrix:
bridge_name: br0
ping_count: 10
vnet_name: public
market_name: 'Alpine Linux 3.21'
# TEST CASE NAME: VM instantiation
# DESCRIPTION: Fetches a VM template and image from the OpenNebula Marketplace and instantiates it.
# Optionally also instantiates a VNET, attaches it to the VM, and checks connectivity.
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
run_test_vm: true
test_vm:
vm:
check_connection: true
market_name: 'Alpine Linux 3.21'
template_extra: |
MEMORY="512"
CONTEXT=[
NETWORK="YES",
REPORT_READY="YES",
SSH_PUBLIC_KEY="$USER[SSH_PUBLIC_KEY]",
TOKEN="YES"
]
create_vnet: false
vnet_name: 'public'
vnet:
name: 'public'
desc: 'A test network for post-deployment cloud verification'
bridge: 'br0'
vn_mad: 'dummy'
phydev: ''
network_address: '192.168.150.0'
network_mask: '255.255.255.0'
dns: '8.8.8.8'
gateway: '192.168.150.1'
ar:
- type: "IP4"
ip: '192.168.150.100'
size: '10'
# TEST CASE NAME: LDAP Authentication
# DESCRIPTION: Validates LDAP bind, OpenNebula LDAP auth configuration, LDAP login to XML-RPC,
# FireEdge UI login via LDAP, and verifies local auth fallback.
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
run_ldap_auth: true
ldap_auth:
server: "172.20.0.7:389"
base_dn: "dc=example,dc=com"
bind_dn: "uid=admin,dc=example,dc=com"
bind_pass: "newpassword"
ldap_user: "oneuser"
ldap_pass: "onepassword"
local_user: "oneadmin"
local_auth_file: "/var/lib/one/.one/one_auth"
group_mapping:
expected_group_name: "opennebula-group"
expected_group_id: ""
ldap_group_dn: "cn=opennebula-group,ou=group,dc=example,dc=com"
ldap_group_member_attr: "memberUid"
fireedge_proto: "http"
fireedge_login_endpoint: "/fireedge/api/auth/"
fireedge_skip_tls_verify: true
rpc2_proto: "http"
rpc2_path: "/RPC2"
rpc2_skip_tls_verify: true
# TEST CASE NAME: VM High Availability
# DESCRIPTION: The prerequisite is to have VM HA configured, see details in OpenNebula documentation.
# Instantiates a VM and produces an error on its host with a configurable method (e.g. bringing the
# main interface down). Checks that the error is detected and the VM is migrated to another host.
# NOTE: Blocks the test execution and asks for manual confirmation before the fencing occurs (that will shut
# down the host). Does not provide an automatic way to recover the host.
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
run_vm_ha: false
vm_ha:
produce_error_method: 'if_down'
if_down_params:
interface_name: 'br-test'
fencing_check_retries: 8
fencing_check_delay: 60
vm_market_name: 'Alpine Linux 3.21'
# TEST CASE NAME: Front-end High Availability
# DESCRIPTION: The prerequisite is to have the Front-end HA configured, see details in OpenNebula documentation.
# Checks that config file contents are the same on all FEs, checks the selected leader, simulates a leader failover
# by stopping the opennebula service and verifies that a new leader is selected.
# OUTPUT: HTML document at /tmp/fe_ha_report.html
run_fe_ha: false
fe_ha:
one_config_path:
- /etc/one
- /var/lib/one/remotes/etc
one_zone_name: OpenNebula
# TEST CASE NAME: Federation Validation
# DESCRIPTION: Validates OpenNebula Federation configuration and data synchronization across zones.
# Checks federation configuration (MODE, ZONE_ID, MASTER_ONED), zone discovery, endpoint connectivity,
# federated data synchronization (users, groups, VDCs, ACL rules, marketplaces), database backend consistency,
# FireEdge configuration, and oneadmin user consistency across zones.
# OUTPUT: HTML document at /tmp/cloud_verification_report.html
run_federation: false
federation:
# Minimum number of zones expected in the federation
min_zones: 2
# Timeout for endpoint connectivity tests (seconds)
endpoint_timeout: 10
# Check FireEdge configuration
check_fireedge: true
# Check database backend consistency
check_db_backend: true
# Verify oneadmin user consistency
verify_oneadmin: true
# Zone group (defaults to frontend_group or 'frontend')
zone_group: frontend
# XML-RPC port (default: 2633)
xmlrpc_port: 2633
# gRPC port (default: 2634)
grpc_port: 2634
# Configuration files
oned_conf: /etc/one/oned.conf
fireedge_conf: /etc/one/fireedge-server.conf