forked from eclipse-zenoh/zenoh
-
Notifications
You must be signed in to change notification settings - Fork 0
/
DEFAULT_CONFIG.json5
357 lines (346 loc) · 16.4 KB
/
DEFAULT_CONFIG.json5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
/// This file attempts to list and document available configuration elements.
/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure.
/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice.
{
/// The identifier (as hex-string) that zenohd must use.
/// If not set, a random UUIDv4 will be used.
/// WARNING: this id must be unique in your zenoh network.
// id: "5975702c206974277320415343494921",
/// The node's mode (router, peer or client)
mode: "peer",
/// Which endpoints to connect to. E.g. tcp/localhost:7447.
/// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup.
connect: {
endpoints: [
// "<proto>/<address>"
],
},
/// Which endpoints to listen on. E.g. tcp/localhost:7447.
/// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers,
/// peers, or client can use to establish a zenoh session.
listen: {
endpoints: [
// "<proto>/<address>"
],
},
/// Configure the scouting mechanisms and their behaviours
scouting: {
/// In client mode, the period dedicated to scouting for a router before failing
timeout: 3000,
/// In peer mode, the period dedicated to scouting remote peers before attempting other operations
delay: 200,
/// The multicast scouting configuration.
multicast: {
/// Whether multicast scouting is enabled or not
enabled: true,
/// The socket which should be used for multicast scouting
address: "224.0.0.224:7446",
/// The network interface which should be used for multicast scouting
interface: "auto", // If not set or set to "auto" the interface if picked automatically
/// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast.
/// Accepts a single value or different values for router, peer and client.
/// Each value is bit-or-like combinations of "peer", "router" and "client".
autoconnect: { router: "", peer: "router|peer" },
/// Whether or not to listen for scout messages on UDP multicast and reply to them.
listen: true,
},
/// The gossip scouting configuration.
gossip: {
/// Whether gossip scouting is enabled or not
enabled: true,
/// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network.
/// When false, gossip scouting informations are only propagated to the next hop.
/// Activating multihop gossip implies more scouting traffic and a lower scalability.
/// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have
/// direct connectivity with each other.
multihop: false,
/// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip.
/// Accepts a single value or different values for router, peer and client.
/// Each value is bit-or-like combinations of "peer", "router" and "client".
autoconnect: { router: "", peer: "router|peer" },
},
},
/// Configuration of data messages timestamps management.
timestamping: {
/// Whether data messages should be timestamped if not already.
/// Accepts a single boolean value or different values for router, peer and client.
enabled: { router: true, peer: false, client: false },
/// Whether data messages with timestamps in the future should be dropped or not.
/// If set to false (default), messages with timestamps in the future are retimestamped.
/// Timestamps are ignored if timestamping is disabled.
drop_future_timestamp: false,
},
/// The default timeout to apply to queries in milliseconds.
queries_default_timeout: 10000,
/// The routing strategy to use and it's configuration.
routing: {
/// The routing strategy to use in routers and it's configuration.
router: {
/// When set to true a router will forward data between two peers
/// directly connected to it if it detects that those peers are not
/// connected to each other.
/// The failover brokering only works if gossip discovery is enabled.
peers_failover_brokering: true,
},
/// The routing strategy to use in peers and it's configuration.
peer: {
/// The routing strategy to use in peers. ("peer_to_peer" or "linkstate").
mode: "peer_to_peer",
},
},
// /// The declarations aggregation strategy.
// aggregation: {
// /// A list of key-expressions for which all included subscribers will be aggregated into.
// subscribers: [
// // key_expression
// ],
// /// A list of key-expressions for which all included publishers will be aggregated into.
// publishers: [
// // key_expression
// ],
// },
/// Configure internal transport parameters
transport: {
unicast: {
/// Timeout in milliseconds when opening a link
accept_timeout: 10000,
/// Maximum number of zenoh session in pending state while accepting
accept_pending: 100,
/// Maximum number of sessions that can be simultaneously alive
max_sessions: 1000,
/// Maximum number of incoming links that are admitted per session
max_links: 1,
},
qos: {
enabled: true,
},
link: {
// /// An optional whitelist of protocols to be used for accepting and opening sessions.
// /// If not configured, all the supported protocols are automatically whitelisted.
// /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"]
// /// For example, to only enable "tls" and "quic":
// protocols: ["tls", "quic"],
/// Configure the zenoh TX parameters of a link
tx: {
/// The largest value allowed for Zenoh message sequence numbers (wrappring to 0 when reached).
/// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used.
/// Defaults to 2^28.
sequence_number_resolution: 268435456,
/// Link lease duration in milliseconds to announce to other zenoh nodes
lease: 10000,
/// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive
/// messages will be sent at the configured time interval.
/// NOTE: In order to consider eventual packet loss and transmission latency and jitter,
/// set the actual keep_alive timeout to one fourth of the lease time.
/// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity
/// check which considers a link as failed when no messages are received in 3.5 times the
/// target interval.
keep_alive: 4,
/// Batch size in bytes is expressed as a 16bit unsigned integer.
/// Therefore, the maximum batch size is 2^16-1 (i.e. 65535).
/// The default batch size value is the maximum batch size: 65535.
batch_size: 65535,
/// Each zenoh link has a transmission queue that can be configured
queue: {
/// The size of each priority queue indicates the number of batches a given queue can contain.
/// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE.
/// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE,
/// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU.
/// If qos is false, then only the DATA priority will be allocated.
size: {
control: 1,
real_time: 1,
interactive_high: 1,
interactive_low: 1,
data_high: 2,
data: 4,
data_low: 4,
background: 4,
},
/// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress.
/// Higher values lead to a more aggressive batching but it will introduce additional latency.
backoff: 100,
},
},
/// Configure the zenoh RX parameters of a link
rx: {
/// Receiving buffer size in bytes for each link
/// The default the rx_buffer_size value is the same as the default batch size: 65335.
/// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate
/// more in-flight data. This is particularly relevant when dealing with large messages.
/// E.g. for 16MiB rx_buffer_size set the value to: 16777216.
buffer_size: 65535,
/// Maximum size of the defragmentation buffer at receiver end.
/// Fragmented messages that are larger than the configured size will be dropped.
/// The default value is 1GiB. This would work in most scenarios.
/// NOTE: reduce the value if you are operating on a memory constrained device.
max_message_size: 1073741824,
},
/// Configure TLS specific parameters
tls: {
/// Path to the certificate of the certificate authority used to validate either the server
/// or the client's keys and certificates, depending on the node's mode. If not specified
/// on router mode then the default WebPKI certificates are used instead.
root_ca_certificate: null,
/// Path to the TLS server private key
server_private_key: null,
/// Path to the TLS server public certificate
server_certificate: null,
/// Client authentication, if true enables mTLS (mutual authentication)
client_auth: false,
/// Path to the TLS client private key
client_private_key: null,
/// Path to the TLS client public certificate
client_certificate: null,
},
},
/// Shared memory configuration
shared_memory: {
enabled: true,
},
/// Access control configuration
auth: {
/// The configuration of authentification.
/// A password implies a username is required.
usrpwd: {
user: null,
password: null,
/// The path to a file containing the user password dictionary
dictionary_file: null,
},
pubkey: {
public_key_pem: null,
private_key_pem: null,
public_key_file: null,
private_key_file: null,
key_size: null,
known_keys_file: null,
},
},
},
/// Configure the Admin Space
/// Unstable: this configuration part works as advertised, but may change in a future release
adminspace: {
// read and/or write permissions on the admin space
permissions: {
read: true,
write: false,
},
},
///
/// Plugins configurations
///
// /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup
// plugins_search_dirs: [],
// /// Plugins are only loaded if present in the configuration. When starting
// /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace.
// plugins: {
// /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux)
//
// /// Configure the REST API plugin
// rest: {
// /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic.
// __required__: true, // defaults to false
// http_port: 8000,
// },
//
// /// Configure the storage manager plugin
// storage_manager: {
// /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load.
// __path__: [
// "./target/release/libzenoh_plugin_storage_manager.so",
// "./target/release/libzenoh_plugin_storage_manager.dylib",
// ],
// /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup
// backend_search_dirs: [],
// /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing.
// volumes: {
// /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb
// influxdb: {
// url: "https://myinfluxdb.example",
// /// Some plugins may need passwords in their configuration.
// /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier.
// /// any value held at the key "private" will not be shown in the adminspace.
// private: {
// username: "user1",
// password: "pw1",
// },
// },
// influxdb2: {
// /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed.
// backend: "influxdb",
// private: {
// username: "user2",
// password: "pw2",
// },
// url: "https://localhost:8086",
// },
// },
//
// /// Configure the storages supported by the volumes
// storages: {
// demo: {
// /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression.
// key_expr: "demo/memory/**",
// /// Storages also need to know which volume will be used to actually store their key-value pairs.
// /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient.
// volume: "memory",
// },
// demo2: {
// key_expr: "demo/memory2/**",
// volume: "memory",
// /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh.
// /// Metadata includes the set of wild card updates and deletions (tombstones).
// /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected.
// garbage_collection: {
// /// The garbage collection event will be periodic with this duration.
// /// The duration is specified in seconds.
// period: 30,
// /// Metadata older than this parameter will be garbage collected.
// /// The duration is specified in seconds.
// lifespan: 86400,
// },
// /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas.
// /// In the absence of this configuration, a normal storage is initialized
// /// Note: all the samples to be stored in replicas should be timestamped
// replica_config: {
// /// Specifying the parameters is optional, by default the values provided will be used.
// /// Time interval between different synchronization attempts in seconds
// publication_interval: 5,
// /// Expected propagation delay of the network in milliseconds
// propagation_delay: 200,
// /// This is the chunk that you would like your data to be divide into in time, in milliseconds.
// /// Higher the frequency of updates, lower the delta should be chosen
// /// To be efficient, delta should be the time containing no more than 100,000 samples
// delta: 1000,
// }
// },
// demo3: {
// key_expr: "demo/memory3/**",
// volume: "memory",
// /// A complete storage advertises itself as containing all the known keys matching the configured key expression.
// /// If not configured, complete defaults to false.
// complete: "true",
// },
// influx_demo: {
// key_expr: "demo/influxdb/**",
// /// This prefix will be stripped of the received keys when storing.
// strip_prefix: "demo/influxdb",
// /// influxdb-backed volumes need a bit more configuration, which is passed like-so:
// volume: {
// id: "influxdb",
// db: "example",
// },
// },
// influx_demo2: {
// key_expr: "demo/influxdb2/**",
// strip_prefix: "demo/influxdb2",
// volume: {
// id: "influxdb2",
// db: "example",
// },
// },
// },
// },
// },
}