-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcmping.py
More file actions
524 lines (463 loc) · 20.2 KB
/
cmping.py
File metadata and controls
524 lines (463 loc) · 20.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
"""
chatmail ping aka "cmping" transmits messages between relays.
"""
import argparse
import ipaddress
import os
import queue
import random
import signal
import string
import sys
import threading
import time
import urllib.parse
from statistics import stdev
from deltachat_rpc_client import DeltaChat, EventType, Rpc
from xdg_base_dirs import xdg_cache_home
def is_ip_address(host):
"""Check if the given host is an IP address."""
try:
ipaddress.ip_address(host)
return True
except ValueError:
return False
def generate_credentials():
"""Generate random username and password for IP-based login.
Returns:
tuple: (username, password) where username is 12 chars and password is 20 chars
"""
chars = string.ascii_lowercase + string.digits
username = "".join(random.choices(chars, k=12))
password = "".join(random.choices(chars, k=20))
return username, password
def create_qr_url(domain_or_ip):
"""Create either a dcaccount or dclogin URL based on input type.
Args:
domain_or_ip: Either a domain name or an IP address
Returns:
str: Either dcaccount:domain or dclogin:username@ip/?p=password&v=1&ip=993&sp=465&ic=3&ss=default
"""
if is_ip_address(domain_or_ip):
# Generate credentials for IP address
username, password = generate_credentials()
# Build dclogin URL according to spec
# dclogin:username@ip/?p=password&v=1&ip=993&sp=465&ic=3&ss=default
encoded_password = urllib.parse.quote(password, safe="")
# Format: dclogin:username@host/?query
qr_url = (
f"dclogin:{username}@{domain_or_ip}/?"
f"p={encoded_password}&v=1&ip=993&sp=465&ic=3&ss=default"
)
return qr_url
else:
# Use dcaccount for domain names
return f"dcaccount:{domain_or_ip}"
def main():
"""Ping between addresses of specified chatmail relay domains or IP addresses."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
"relay1",
action="store",
help="chatmail relay domain or IP address",
)
parser.add_argument(
"relay2",
action="store",
nargs="?",
help="chatmail relay domain or IP address (defaults to relay1 if not specified)",
)
parser.add_argument(
"-c",
dest="count",
type=int,
default=30,
help="number of message pings",
)
parser.add_argument(
"-i",
dest="interval",
type=float,
default=1.1,
help="seconds between message sending (default 1.1)",
)
parser.add_argument(
"-v", dest="verbose", action="count", default=0, help="increase verbosity"
)
parser.add_argument(
"-g",
dest="numrecipients",
type=int,
default=1,
help="number of group recipients (default 1)",
)
args = parser.parse_args()
if not args.relay2:
args.relay2 = args.relay1
pinger = perform_ping(args)
expected_total = pinger.sent * args.numrecipients
raise SystemExit(0 if pinger.received == expected_total else 1)
class ProfileMaker:
def __init__(self, dc):
self.dc = dc
self.online = []
self.lock = threading.Lock()
def wait_all_online(self):
# Read list length once at start (atomic operation in Python)
total = len(self.online)
online_count = 0
def print_progress():
print(f"\r# Waiting for {total} profile(s) to come online: {online_count}/{total}", end="", flush=True)
print_progress()
# Make a copy to avoid issues with concurrent modifications
remaining = list(self.online)
while remaining:
profile = remaining.pop()
# Wait for events until profile comes online
# Timeout is handled by caller via try/except around wait_all_online()
while True:
event = profile.wait_for_event()
if event.kind == EventType.IMAP_INBOX_IDLE:
online_count += 1
print_progress()
break
elif event.kind == EventType.ERROR:
# Log error but continue - profile setup may still succeed
print(f"\n✗ ERROR during profile setup: {event.msg}")
print_progress()
elif event.kind == EventType.WARNING:
print(f"\n⚠ WARNING during profile setup: {event.msg}")
print_progress()
print() # Final newline
def _add_online(self, profile):
# Call start_io() to bring profile online
# This is the time-consuming operation that benefits from parallelization
profile.start_io()
# Python list.append() is atomic, no lock needed
self.online.append(profile)
def get_relay_profile(self, domain):
# Lock needed to prevent multiple threads from selecting the same cached profile
# The check-then-act pattern (check if profile in online, then use it) requires atomicity
# All RPC calls (get_all_accounts, add_account, set_config_from_qr) are thread-safe on server side
with self.lock:
# Try to find an existing (cached) profile for this domain/IP
# that is not already online (to allow multiple profiles per domain)
is_cached = False
for profile in self.dc.get_all_accounts():
addr = profile.get_config("configured_addr")
if addr is not None:
# Extract the domain/IP from the configured address
addr_domain = addr.split("@")[1] if "@" in addr else None
if addr_domain == domain:
if profile not in self.online:
# Found a cached profile that we can reuse
is_cached = True
break
# Profile already online, continue looking for another one
else:
# No cached profile found, create a fresh one
profile = self.dc.add_account()
qr_url = create_qr_url(domain)
try:
profile.set_config_from_qr(qr_url)
except Exception as e:
print(f"✗ Failed to configure profile on {domain}: {e}")
raise
# Bring profile online outside the lock to allow parallelization
# start_io() is the time-consuming operation we want to parallelize
try:
self._add_online(profile)
except Exception as e:
print(f"✗ Failed to bring profile online for {domain}: {e}")
raise
return profile, is_cached
def perform_ping(args):
accounts_dir = xdg_cache_home().joinpath("cmping")
print(f"# using accounts_dir at: {accounts_dir}")
with Rpc(accounts_dir=accounts_dir) as rpc:
dc = DeltaChat(rpc)
maker = ProfileMaker(dc)
# Calculate total profiles needed
total_profiles = 1 + args.numrecipients
profiles_setup = 0
profiles_cached = 0
profiles_created = 0
# Create sender profile with progress
print(
f"# Setting up profiles: {profiles_setup}/{total_profiles} (cached: {profiles_cached}, creating: {profiles_created})",
end="",
flush=True,
)
try:
sender, is_cached = maker.get_relay_profile(args.relay1)
profiles_setup += 1
if is_cached:
profiles_cached += 1
else:
profiles_created += 1
print(
f"\r# Setting up profiles: {profiles_setup}/{total_profiles} (cached: {profiles_cached}, creating: {profiles_created})",
end="",
flush=True,
)
except Exception as e:
print(f"\r✗ Failed to setup sender profile on {args.relay1}: {e}")
sys.exit(1)
# Create receiver profiles with progress - parallelize fresh profile creation
receivers = []
receiver_errors = []
receiver_lock = threading.Lock()
def setup_receiver_profile(i):
"""Setup a single receiver profile"""
nonlocal profiles_setup, profiles_cached, profiles_created
try:
# Each thread gets a profile for the domain (either cached or fresh)
# If multiple cached profiles exist, different threads may get different ones
# If no cached profiles exist, new ones are created
receiver, is_cached = maker.get_relay_profile(args.relay2)
with receiver_lock:
receivers.append(receiver)
profiles_setup += 1
if is_cached:
profiles_cached += 1
else:
profiles_created += 1
print(
f"\r# Setting up profiles: {profiles_setup}/{total_profiles} (cached: {profiles_cached}, creating: {profiles_created})",
end="",
flush=True,
)
except Exception as e:
with receiver_lock:
receiver_errors.append((i, e))
# Create threads for parallel profile setup
threads = []
for i in range(args.numrecipients):
t = threading.Thread(target=setup_receiver_profile, args=(i,))
t.start()
threads.append(t)
# Wait for all threads to complete
for t in threads:
t.join()
# Check for errors
if receiver_errors:
for i, e in receiver_errors:
print(f"\r✗ Failed to setup receiver profile {i+1} on {args.relay2}: {e}")
sys.exit(1)
# Profile setup complete
print(
f"\r# Setting up profiles: {profiles_setup}/{total_profiles} (cached: {profiles_cached}, creating: {profiles_created}) - Complete!"
)
# Wait for all profiles to come online with progress
try:
maker.wait_all_online()
except Exception as e:
print(f"\n✗ Timeout or error waiting for profiles to be online: {e}")
sys.exit(1)
# Create a group chat from sender and add all receivers
group = sender.create_group("cmping")
for receiver in receivers:
# Create a contact for the receiver profile and add to group
contact = sender.create_contact(receiver)
group.add_contact(contact)
# Send an initial message to promote the group
# This sends invitations to all members
print("# promoting group chat by sending initial message")
group.send_text("cmping group chat initialized")
# Wait for each receiver to receive the group invitation and accept it
print("# waiting for receivers to join group")
sender_addr = sender.get_config("addr")
for idx, receiver in enumerate(receivers):
# Wait for incoming message (the group invitation/first message)
# Set a reasonable timeout
timeout_seconds = 30
start_time = time.time()
while time.time() - start_time < timeout_seconds:
event = receiver.wait_for_event()
if event.kind == EventType.INCOMING_MSG:
msg = receiver.get_message_by_id(event.msg_id)
snapshot = msg.get_snapshot()
# Get sender contact and check if it's from our sender
sender_contact = msg.get_sender_contact()
sender_contact_snapshot = sender_contact.get_snapshot()
# Verify this is from the sender and is the group initialization message
if (
sender_contact_snapshot.address == sender_addr
and "cmping group chat initialized" in snapshot.text
):
chat_id = snapshot.chat_id
receiver_group = receiver.get_chat_by_id(chat_id)
# Accept the group chat
receiver_group.accept()
print(
f"# receiver {idx} ({receiver.get_config('addr')}) joined group"
)
break
# Continue waiting for the right message
else:
# Timeout occurred
print(
f"# WARNING: receiver {idx} did not join group within {timeout_seconds}s"
)
pinger = Pinger(args, sender, group, receivers)
received = {}
# Track current sequence for output formatting
current_seq = None
# Track timing for each sequence: {seq: {'count': N, 'first_time': ms, 'last_time': ms, 'size': bytes}}
seq_tracking = {}
try:
for seq, ms_duration, size, receiver_idx in pinger.receive():
if seq not in received:
received[seq] = []
received[seq].append(ms_duration)
# Track timing for this sequence
if seq not in seq_tracking:
seq_tracking[seq] = {
"count": 0,
"first_time": ms_duration,
"last_time": ms_duration,
"size": size,
}
seq_tracking[seq]["count"] += 1
seq_tracking[seq]["last_time"] = ms_duration
# Print new line for new sequence or first message
if current_seq != seq:
if current_seq is not None:
print() # End previous line
# Start new line for this sequence
print(
f"{size} bytes ME -> {pinger.relay1} -> {pinger.relay2} -> ME seq={seq} time={ms_duration:0.2f}ms",
end="",
flush=True,
)
current_seq = seq
# Print N/M ratio with in-place update (spinning effect)
count = seq_tracking[seq]["count"]
total = args.numrecipients
# Calculate how many characters we need to overwrite from previous ratio
if count > 1:
# Backspace over previous ratio to update in-place
prev_count = count - 1
prev_ratio_len = len(f" {prev_count}/{total}")
print("\b" * prev_ratio_len, end="", flush=True)
print(f" {count}/{total}", end="", flush=True)
# If all receivers have received, print elapsed time
if count == total:
first_time = seq_tracking[seq]["first_time"]
last_time = seq_tracking[seq]["last_time"]
elapsed = last_time - first_time
print(f" (elapsed: {elapsed:0.2f}ms)", end="", flush=True)
except KeyboardInterrupt:
pass
if current_seq is not None:
print() # End last line
print(f"--- {pinger.addr1} -> {pinger.receivers_addrs_str} statistics ---")
print(
f"{pinger.sent} transmitted, {pinger.received} received, {pinger.loss:.2f}% loss"
)
if received:
all_durations = [d for durations in received.values() for d in durations]
rmin = min(all_durations)
ravg = sum(all_durations) / len(all_durations)
rmax = max(all_durations)
rmdev = stdev(all_durations) if len(all_durations) >= 2 else rmax
print(
f"rtt min/avg/max/mdev = {rmin:.3f}/{ravg:.3f}/{rmax:.3f}/{rmdev:.3f} ms"
)
return pinger
class Pinger:
def __init__(self, args, sender, group, receivers):
self.args = args
self.sender = sender
self.group = group
self.receivers = receivers
self.addr1 = sender.get_config("addr")
self.receivers_addrs = [receiver.get_config("addr") for receiver in receivers]
self.receivers_addrs_str = ", ".join(self.receivers_addrs)
self.relay1 = self.addr1.split("@")[1]
self.relay2 = self.receivers_addrs[0].split("@")[1]
print(
f"CMPING {self.relay1}({self.addr1}) -> {self.relay2}(group with {len(receivers)} members: {self.receivers_addrs_str}) count={args.count} interval={args.interval}s"
)
ALPHANUMERIC = string.ascii_lowercase + string.digits
self.tx = "".join(random.choices(ALPHANUMERIC, k=30))
t = threading.Thread(target=self.send_pings, daemon=True)
self.sent = 0
self.received = 0
t.start()
@property
def loss(self):
expected_total = self.sent * len(self.receivers)
return 1 if expected_total == 0 else (1 - self.received / expected_total) * 100
def send_pings(self):
# Send to the group chat (single message to all recipients)
for seq in range(self.args.count):
text = f"{self.tx} {time.time():.4f} {seq:17}"
self.group.send_text(text)
self.sent += 1
time.sleep(self.args.interval)
# we sent all pings, let's wait a bit, then force quit if main didn't finish
time.sleep(60)
os.kill(os.getpid(), signal.SIGINT)
def receive(self):
num_pending = self.args.count * len(self.receivers)
start_clock = time.time()
# Track which sequence numbers have been received by which receiver
received_by_receiver = {}
# Create a queue to collect events from all receivers
event_queue = queue.Queue()
def receiver_thread(receiver_idx, receiver):
"""Thread function to listen to events from a single receiver"""
while True:
try:
event = receiver.wait_for_event()
event_queue.put((receiver_idx, receiver, event))
except Exception:
# If there's an error, put it in the queue
event_queue.put((receiver_idx, receiver, None))
break
# Start a thread for each receiver
threads = []
for idx, receiver in enumerate(self.receivers):
t = threading.Thread(
target=receiver_thread, args=(idx, receiver), daemon=True
)
t.start()
threads.append(t)
while num_pending > 0:
try:
receiver_idx, receiver, event = event_queue.get(timeout=1.0)
if event is None:
continue
if event.kind == EventType.INCOMING_MSG:
msg = receiver.get_message_by_id(event.msg_id)
text = msg.get_snapshot().text
parts = text.strip().split()
if len(parts) == 3 and parts[0] == self.tx:
seq = int(parts[2])
if seq not in received_by_receiver:
received_by_receiver[seq] = set()
if receiver_idx not in received_by_receiver[seq]:
ms_duration = (time.time() - float(parts[1])) * 1000
self.received += 1
num_pending -= 1
received_by_receiver[seq].add(receiver_idx)
yield seq, ms_duration, len(text), receiver_idx
start_clock = time.time()
elif event.kind == EventType.ERROR:
print(f"ERROR: {event.msg}")
elif event.kind == EventType.MSG_FAILED:
msg = receiver.get_message_by_id(event.msg_id)
text = msg.get_snapshot().text
print(f"Message failed: {text}")
elif (
event.kind in (EventType.INFO, EventType.WARNING)
and self.args.verbose >= 1
):
ms_now = (time.time() - start_clock) * 1000
print(f"INFO {ms_now:07.1f}ms: {event.msg}")
except queue.Empty:
# Timeout occurred, check if we should continue
continue
if __name__ == "__main__":
main()