-
Notifications
You must be signed in to change notification settings - Fork 26
Expand file tree
/
Copy pathperftest_qos_profiles.xml
More file actions
755 lines (654 loc) · 29.1 KB
/
perftest_qos_profiles.xml
File metadata and controls
755 lines (654 loc) · 29.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
<?xml version="1.0" encoding="iso-8859-1"?>
<!--
(c) 2005-2024 Copyright, Real-Time Innovations, Inc. All rights reserved.
Subject to Eclipse Public License v1.0; see LICENSE.md for details.
-->
<!--
This file contains the QoS configurations used by RTI Perftest, RTI's
performance test for measuring latency and throughput of RTI Connext DDS.
The format of this file is described in the RTI Connext Core Libraries
and Utilities User's Manual in the chapter titled "Configuring QoS with XML."
-->
<dds xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="https://community.rti.com/schema/7.3.0/rti_dds_profiles.xsd">
<qos_library name="PerftestQosLibrary">
<!-- Auxiliary QoS Snippets used to build the profiles -->
<qos_profile name="Reliability.StrictReliable">
<base_name>
<element>BuiltinQosSnippetLib::QosPolicy.Reliability.Reliable</element>
<element>BuiltinQosSnippetLib::QosPolicy.History.KeepAll</element>
</base_name>
<datawriter_qos>
<reliability>
<max_blocking_time>
<sec>DURATION_INFINITE_SEC</sec>
<nanosec>DURATION_INFINITE_NSEC</nanosec>
</max_blocking_time>
</reliability>
<protocol>
<rtps_reliable_writer>
<max_heartbeat_retries>LENGTH_UNLIMITED</max_heartbeat_retries>
</rtps_reliable_writer>
</protocol>
</datawriter_qos>
</qos_profile>
<qos_profile name="Presentation.TopicPresentation">
<!--
These changes are here to make sure that the DataReader can access
the samples in the order that they were written by the DataWriter.
By default, DDS does not guarantee any ordering when presenting samples
to the DataReader.
This profile is useful in perftest when using keyed types, since we write
in a round-robin fashion and we want to make sure that the DataReader reads
the samples in the same order that they were written, otherwise it would
detect some samples as lost.
-->
<publisher_qos>
<presentation>
<access_scope>TOPIC_PRESENTATION_QOS</access_scope>
<ordered_access>true</ordered_access>
</presentation>
</publisher_qos>
<subscriber_qos>
<presentation>
<access_scope>TOPIC_PRESENTATION_QOS</access_scope>
<ordered_access>true</ordered_access>
</presentation>
</subscriber_qos>
</qos_profile>
<qos_profile name="Perftest.IncreaseResourceLimits">
<participant_qos>
<resource_limits>
<!--
Maximum number of properties associated with the DomainParticipant. Default is 32.
-->
<participant_property_list_max_length>64</participant_property_list_max_length>
<!--
Maximum length for content filter property in the DomainParticipant. Default is 256.
-->
<contentfilter_property_max_length>512</contentfilter_property_max_length>
</resource_limits>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.Transport.TCPv4.BaseConfiguration">
<participant_qos>
<property>
<value>
<element>
<name>dds.transport.TCPv4.tcp1.library</name>
<value>nddstransporttcp</value>
</element>
<element>
<name>dds.transport.TCPv4.tcp1.create_function</name>
<value>NDDS_Transport_TCPv4_create</value>
</element>
<element>
<name>dds.transport.TCPv4.tcp1.disable_nagle</name>
<value>1</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.Transport.TCPv4.IncreaseSendReceiveBuffers">
<participant_qos>
<property>
<value>
<element>
<name>dds.transport.TCPv4.tcp1.send_socket_buffer_size</name>
<value>1048576</value>
</element>
<element>
<name>dds.transport.TCPv4.tcp1.recv_socket_buffer_size</name>
<value>2097152</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.Transport.UDPv4.IncreaseSendReceiveBuffers">
<!--
The default values for the send and receive socket buffer sizes are 131072 bytes.
We increase them to 1MB and 2MB respectively to be able to receive and store
more packets in the buffers before processing them and avoid packet losses.
-->
<participant_qos>
<property>
<value>
<element>
<name>dds.transport.UDPv4.builtin.send_socket_buffer_size</name>
<value>1048576</value>
</element>
<element>
<name>dds.transport.UDPv4.builtin.recv_socket_buffer_size</name>
<value>2097152</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.Transport.UDPv4_WAN.IncreaseSendReceiveBuffers">
<!--
The default values for the send and receive socket buffer sizes are 131072 bytes.
We increase them to 1MB and 2MB respectively to be able to receive and store
more packets in the buffers before processing them and avoid packet losses.
-->
<participant_qos>
<property>
<value>
<element>
<name>dds.transport.UDPv4_WAN.builtin.send_socket_buffer_size</name>
<value>1048576</value>
</element>
<element>
<name>dds.transport.UDPv4_WAN.builtin.recv_socket_buffer_size</name>
<value>2097152</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.FlowController.1Gbps">
<participant_qos>
<property>
<value>
<element>
<name>dds.flow_controller.token_bucket.1Gbps.token_bucket.max_tokens</name>
<value>30</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.1Gbps.token_bucket.tokens_added_per_period</name>
<value>20</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.1Gbps.token_bucket.bytes_per_token</name>
<value>65536</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.1Gbps.token_bucket.period.sec</name>
<value>0</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.1Gbps.token_bucket.period.nanosec</name>
<value>10000000</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.FlowController.10Gbps">
<participant_qos>
<property>
<value>
<element>
<name>dds.flow_controller.token_bucket.10Gbps.token_bucket.max_tokens</name>
<value>300</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.10Gbps.token_bucket.tokens_added_per_period</name>
<value>200</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.10Gbps.token_bucket.bytes_per_token</name>
<value>65536</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.10Gbps.token_bucket.period.sec</name>
<value>0</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.10Gbps.token_bucket.period.nanosec</name>
<value>10000000</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<qos_profile name="Perftest.ReliableSettings">
<datawriter_qos>
<protocol>
<rtps_reliable_writer>
<!--
Some of the settings used here depend on the specific
scenario and sample size, hence, they are modified in RTI Perftest's
code. Still, we document them here.
-->
<!--
<low_watermark>_SendQueueSize * 0.1</low_watermark>
When the writer's cache gets down to this number of samples, it
will slow the rate at which it sends heartbeats to readers.
-->
<!--
<high_watermark>_SendQueueSize * 0.9</high_watermark>
When the writer's cache is filled to this level, it will begin
sending heartbeats at a faster rate in order to get faster
acknowledgements (positive or negative) of its samples to allow it
to empty its cache and avoid blocking.
-->
<!--
<heartbeats_per_max_samples>_SendQueueSize * 0.1</heartbeats_per_max_samples>
Governs how often heartbeats are "piggybacked" on data samples.
Piggybacking heartbeats will get a faster positive or negative
acknowledgement so that repairs can happen faster if needed.
-->
<!--
<min_send_window_size>datawriter_qos.resource_limits.max_samples</min_send_window_size>
<max_send_window_size>datawriter_qos.resource_limits.max_samples</max_send_window_size>
Minimum and maximum size of send window of unacknowledged samples. In
this case we set both to the same number to avoid having to grow the
send window.
-->
<!--
If the number of samples in the writer's cache hasn't risen to
high_watermark, this is the rate at which the DataWriter will
send out periodic heartbeats.
-->
<heartbeat_period>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>10000000</nanosec>
</heartbeat_period>
<!--
If the number of samples in the writer's cache has risen to
high_watermark, and has not yet fallen to low_watermark, this is
the rate at which the writer will send periodic heartbeats to
its readers.
-->
<fast_heartbeat_period>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>1000000</nanosec>
</fast_heartbeat_period>
<!--
If a durable reader starts up after the writer already has some
samples in its cache, this is the rate at which it will heartbeat
the new reader. It should generally be a shorter period of time
than the normal heartbeat period in order to help the new reader
catch up.
-->
<late_joiner_heartbeat_period>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>10000000</nanosec>
</late_joiner_heartbeat_period>
<!--
The number of times a reliable writer will send a heartbeat to
a reader without receiving a response before it will consider the
reader to be inactive and no longer await acknowledgements before
discarding sent data.
-->
<max_heartbeat_retries>LENGTH_UNLIMITED</max_heartbeat_retries>
<!--
When a DataWriter receives a negative acknowledgement (NACK) from
a DataReader for a particular data sample, it will send a repair
packet to that reader.
The amount of time the writer waits between receiving the NACK and
sending the repair will be a random value between the minimum
and maximum values specified here. Narrowing the range, and
shifting it towards zero, will make the writer more reactive.
However, by leaving some delay you increase the chance that the
writer will learn of additional readers that missed the same data,
in which case it will be able to send a single multicast repair
instead of multiple unicast repairs, thereby using the available
network bandwidth more efficiently. The higher the fanout in your
system (i.e., the more readers per writer), and the greater the
load on your network, the more you should consider specifying a
non-zero delay here.
-->
<min_nack_response_delay>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>DURATION_ZERO_NSEC</nanosec>
</min_nack_response_delay>
<max_nack_response_delay>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>DURATION_ZERO_NSEC</nanosec>
</max_nack_response_delay>
<!--
When positive acknowledgements have been disabled, the DataWriter will
consider samples as positively "acknowledged" after this duration has
elapsed if it has not heard otherwise. Disabling positive acknowledgements
has the benefit of reducing network usage. Using this feature must be
considered carefully because it changes how and when a DataWriter
considers a sample as acknowledged.
We disable the positive acknowledgements in perftest by using:
-disablePositiveAcks. You can alternatively enable them via QoS by
adding <disable_positive_acks>true</disable_positive_acks> in the
<protocol> QoS of the DW and DR of the ThroughputQoS and LatencyQoS.
The default value of _KeepDurationUsec can be configured here or via the
command line parameter -keepDuration.
-->
<disable_positive_acks_min_sample_keep_duration>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>100000</nanosec>
</disable_positive_acks_min_sample_keep_duration>
</rtps_reliable_writer>
</protocol>
</datawriter_qos>
<datareader_qos>
<protocol>
<rtps_reliable_reader>
<!--
When the DataReader receives a heartbeat from a DataWriter
(indicating what sequence numbers it has published), the following
parameters indicate how long it will wait before replying with
a positive (assuming they aren't disabled) or negative
acknowledgement.
The time the reader waits will be a random duration between
the minimum and maximum values. Narrowing this range, and shifting
it towards zero, will make the system more reactive. However, it
increases the chance of (N)ACK spikes. The higher the fanout in
your system (i.e., the number of readers per writer), the more
you should consider specifying a range here.
-->
<min_heartbeat_response_delay>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>DURATION_ZERO_NSEC</nanosec>
</min_heartbeat_response_delay>
<max_heartbeat_response_delay>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>DURATION_ZERO_NSEC</nanosec>
</max_heartbeat_response_delay>
</rtps_reliable_reader>
</protocol>
</datareader_qos>
</qos_profile>
<qos_profile name="Perftest.Throughput.ReliableSettings">
<base_name>
<element>PerftestQosLibrary::Perftest.ReliableSettings</element>
</base_name>
</qos_profile>
<qos_profile name="Perftest.Throughput.ResourceLimits">
<datawriter_qos>
<!-- These resource limits are changed in code. -->
</datawriter_qos>
<datareader_qos>
<resource_limits>
<!--
The initial and maximum number of data samples. The middleware will
make sure to allocate space for the initial_samples, and then if
needed, it will grow the allocated memory up to a point where it
supports max_samples.
For the initial number of samples we choose a number that should be
enough for most use-cases (therefore no need to grow), but that
should not affect the memory consumption by reserving too much
memory.
-->
<max_samples>10000</max_samples>
<initial_samples>128</initial_samples>
<!--
The maximum number of samples that can be stored for a single
instance. If the throughput topic is not keyed, there is only a
single instance, so this value should always be set the same
as max_samples.
For a keyed topic, you might want to use this parameter to institute
a degree of "fairness" among the instances.
-->
<max_samples_per_instance>10000</max_samples_per_instance>
</resource_limits>
<reader_resource_limits>
<!--
The maximum number of samples that Connext will store from a
single DataWriter. If you run this application with only a single
DataWriter (that is, in a one-to-one or one-to-many configuration),
there is no reason for this value to be set to anything less than
max_samples. If you have many writers and need to institute
a degree of "fairness" among them, you can decrease this value.
-->
<max_samples_per_remote_writer>10000</max_samples_per_remote_writer>
<!--
The maximum number of data samples that the application can receive
from Connext in a single call to DataReader::read() or
take(). If more data exists in the middleware, the application will
need to issue multiple read()/take() calls.
When reading data using listeners, the expected number of samples
available for delivery in a single take() call is typically small:
usually just one in the case of unbatched data, or the number of
samples in a single batch in the case of batched data. When polling
for data or using Waitsets, however, multiple samples (or batches)
could be retrieved at once, depending on the data rate.
A larger value for this parameter makes the API simpler to use, at
the expense of some additional memory consumption.
-->
<max_samples_per_read>65536</max_samples_per_read>
</reader_resource_limits>
</datareader_qos>
</qos_profile>
<qos_profile name="Perftest.Throughput.BatchingConfig">
<datawriter_qos>
<!--
When sending many small data-samples, you can increated network efficiency
by batching multiple samples together in a single protocol-level message
(usually corresponding to a single network datagram). Batching can offer very
substantial throughput gains, but often at the expense of latency, although
in some configurations, the latency penalty can be very small or zero,
possibly even negative.
-->
<!--
Important: This setting is configured automatically within the source code,
but only if batching is enabled. _SendQueueSize = 50 (default)
<writer_resource_limits>
<max_batches>_SendQueueSize</max_batches>
</writer_resource_limits>
-->
<batch>
<!--
This profile does not enable batching, although the remaining
batching settings are configured as if it did. To enable the batch
configuration below, turn batching on using the app's command-line.
-->
<enable>false</enable>
<!--
Batches can be "flushed" to the network based on a maximum size.
This size can be based on the total number of bytes in the
accumulated data samples, the total number of bytes in the
accumulated sample meta-data (e.g., timestamps, sequence numbers,
etc.), and/or the number of samples. Whenever the first of these
limits is reached, the batch will be flushed.
Important: This setting is configured automatically within the source code.
<max_data_bytes>_BatchSize</max_data_bytes>
-->
<max_meta_data_bytes>LENGTH_UNLIMITED</max_meta_data_bytes>
<max_samples>LENGTH_UNLIMITED</max_samples>
<!--
The middleware will associate a source timestamp with a batch when
it is started. The duration below indicates the amount of time that
may pass before the middleware will insert an additional timestamp
into the middle of an existing batch.
Shortening this duration can give readers increased timestamp
resolution. However, lengthening this duration
decreases the amount of meta-data on the network, potentially
improving throughput, especially if the data samples are very small.
If this delay is set to an infinite time period, timestamps will
be inserted only once per batch. Furthermore, the middleware will
not need to check the time with each sample in the batch, reducing
the amount of computation on the send path and potentially improving
both latency and throughput performance.
-->
<source_timestamp_resolution>
<sec>DURATION_INFINITE_SEC</sec>
<nanosec>DURATION_INFINITE_NSEC</nanosec>
</source_timestamp_resolution>
<!--
The maximum flush delay. A batch is flushed automatically after the
delay specified by this parameter. As its value is DURATION_INFINITE,
the flush event will be triggered by max_data_bytes.
-->
<max_flush_delay>
<sec>DURATION_INFINITE_SEC</sec>
<nanosec>DURATION_INFINITE_NSEC</nanosec>
</max_flush_delay>
<!--
By default, the same DataWriter can be used from multiple threads.
If you know that your application will only write data from a single
thread, you can switch off a level of locking that occurs when
samples are added to a batch. When sending very small samples very
fast, this decreased overhead can improve performance.
However, even in the case of single-threaded access, the impact of
locking can be negligible, and deactivating the lock puts your
application at risk of memory corruption if multiple threads do
write to the same DataWriter - either without your knowledge or as
a result of application maintenance. Therefore, RTI recommends that
you only set thread_safe_write to false after detailed testing has
confirmed that your application does indeed behave correctly and
with improved performance.
-->
<thread_safe_write>false</thread_safe_write>
</batch>
</datawriter_qos>
</qos_profile>
<qos_profile name="Perftest.Latency.ReliableSettings">
<base_name>
<element>PerftestQosLibrary::Perftest.ReliableSettings</element>
</base_name>
<datawriter_qos>
<protocol>
<rtps_reliable_writer>
<low_watermark>10</low_watermark>
<high_watermark>100</high_watermark>
<heartbeats_per_max_samples>1000</heartbeats_per_max_samples>
<min_send_window_size>LENGTH_UNLIMITED</min_send_window_size>
<max_send_window_size>LENGTH_UNLIMITED</max_send_window_size>
</rtps_reliable_writer>
</protocol>
</datawriter_qos>
<datareader_qos>
<protocol>
<rtps_reliable_reader>
<heartbeat_suppression_duration>
<sec>DURATION_ZERO_SEC</sec>
<nanosec>DURATION_ZERO_NSEC</nanosec>
</heartbeat_suppression_duration>
</rtps_reliable_reader>
</protocol>
</datareader_qos>
</qos_profile>
<qos_profile name="Perftest.Latency.ResourceLimits">
<datawriter_qos>
<resource_limits>
<max_samples>LENGTH_UNLIMITED</max_samples>
<initial_samples>100</initial_samples>
<max_samples_per_instance>LENGTH_UNLIMITED</max_samples_per_instance>
</resource_limits>
</datawriter_qos>
<datareader_qos>
<!--
The number of samples for which the middleware will set aside space.
See the comments above for more information.
-->
<resource_limits>
<max_samples>100</max_samples>
<initial_samples>100</initial_samples>
<max_samples_per_instance>100</max_samples_per_instance>
</resource_limits>
<reader_resource_limits>
<max_samples_per_remote_writer>100</max_samples_per_remote_writer>
</reader_resource_limits>
</datareader_qos>
</qos_profile>
<qos_profile name="Perftest.Security">
<participant_qos>
<property>
<value>
<!--
This QoS is applied when using security. Default starting in 7.0.0
is aes-256-gcm
-->
<element>
<name>com.rti.serv.secure.cryptography.encryption_algorithm</name>
<value>aes-128-gcm</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<!--
Base QoS Profile:
Used by the rest of the profiles. Participants will be created using this profile.
-->
<qos_profile name="BaseProfileQos">
<base_name>
<!-- General Settings that affect to all the Perftest profiles -->
<element>PerftestQosLibrary::Presentation.TopicPresentation</element>
<!-- Settings related to Transport -->
<element>PerftestQosLibrary::Perftest.Transport.UDPv4.IncreaseSendReceiveBuffers</element>
<element>PerftestQosLibrary::Perftest.Transport.UDPv4_WAN.IncreaseSendReceiveBuffers</element>
<element>PerftestQosLibrary::Perftest.Transport.TCPv4.BaseConfiguration</element>
<element>PerftestQosLibrary::Perftest.Transport.TCPv4.IncreaseSendReceiveBuffers</element>
<!-- Some Resource Limits had to be increased in order to run our application -->
<element>PerftestQosLibrary::Perftest.IncreaseResourceLimits</element>
<!-- Load the flow controllers for asynchronous publishing-->
<element>PerftestQosLibrary::Perftest.FlowController.1Gbps</element>
<element>PerftestQosLibrary::Perftest.FlowController.10Gbps</element>
<!-- For security some settings are modified from the default behavior -->
<element>PerftestQosLibrary::Perftest.Security</element>
</base_name>
<participant_qos>
<participant_name>
<name>Perftest Participant</name>
</participant_name>
<!--
Some of the properties we set may not be recognized if we are not
loading all the libraries (e.g: the tcp transport settings), in order to
avoid errors in the application we just skip their validation.
-->
<property>
<value>
<element>
<name>dds.participant.property_validation_action</name>
<value>1</value>
</element>
<element>
<name>dds.type_consistency.ignore_sequence_bounds</name>
<value>false</value>
</element>
</value>
</property>
</participant_qos>
</qos_profile>
<!--
Throughput QoS Profile:
This is the profile used by the throughput-testing portion of the application,
in the topic that sends the pings (from the Perftest publisher to the Perftest
subscriber).
-->
<qos_profile name="ThroughputQos" base_name="BaseProfileQos">
<base_name>
<element>PerftestQosLibrary::Reliability.StrictReliable</element>
<element>PerftestQosLibrary::Perftest.Throughput.ReliableSettings</element>
<element>PerftestQosLibrary::Perftest.Throughput.BatchingConfig</element>
<element>PerftestQosLibrary::Perftest.Throughput.ResourceLimits</element>
</base_name>
<datawriter_qos>
<writer_resource_limits>
<max_remote_reader_filters>256</max_remote_reader_filters>
</writer_resource_limits>
</datawriter_qos>
</qos_profile>
<!--
Latency QoS Profile:
This is the profile used by the latency-testing portion of the application,
in the topic that sends the pongs (from the Perftest subscriber to the Perftest
publisher).
-->
<qos_profile name="LatencyQos" base_name="BaseProfileQos">
<base_name>
<element>PerftestQosLibrary::Reliability.StrictReliable</element>
<element>PerftestQosLibrary::Perftest.Latency.ReliableSettings</element>
<element>PerftestQosLibrary::Perftest.Latency.ResourceLimits</element>
</base_name>
<datareader_qos>
<history>
<kind>KEEP_LAST_HISTORY_QOS</kind>
</history>
</datareader_qos>
</qos_profile>
<!--
Announcement QoS Profile:
This profile is used by the test harness for the announcement topic,
which is used to synchronize the publishing and subscribing size
to start the test.
-->
<qos_profile name="AnnouncementQos" base_name="LatencyQos">
<base_name>
<element>BuiltinQosSnippetLib::QosPolicy.Durability.TransientLocal</element>
</base_name>
</qos_profile>
</qos_library>
</dds>