forked from eclipse-cyclonedds/cyclonedds
/
dds_rhc_default.c
3032 lines (2742 loc) · 113 KB
/
dds_rhc_default.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright(c) 2006 to 2022 ZettaScale Technology and others
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0, or the Eclipse Distribution License
// v. 1.0 which is available at
// http://www.eclipse.org/org/documents/edl-v10.php.
//
// SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
#include <assert.h>
#include <string.h>
#include <limits.h>
#if HAVE_VALGRIND && ! defined (NDEBUG)
#include <memcheck.h>
#define USE_VALGRIND 1
#else
#define USE_VALGRIND 0
#endif
#include "dds/ddsrt/heap.h"
#include "dds/ddsrt/sync.h"
#include "dds/ddsrt/time.h"
#include "dds__entity.h"
#include "dds__reader.h"
#include "dds__loaned_sample.h"
#include "dds/ddsc/dds_rhc.h"
#include "dds__rhc_default.h"
#include "dds/ddsi/ddsi_tkmap.h"
#include "dds/ddsrt/hopscotch.h"
#include "dds/ddsrt/avl.h"
#include "dds/ddsrt/circlist.h"
#include "dds/ddsi/ddsi_rhc.h"
#include "dds/ddsi/ddsi_xqos.h"
#include "dds/ddsi/ddsi_unused.h"
#include "dds/ddsi/ddsi_domaingv.h"
#include "dds/ddsi/ddsi_radmin.h" /* sampleinfo */
#include "dds/ddsi/ddsi_entity.h"
#include "dds/ddsi/ddsi_serdata.h"
#ifdef DDS_HAS_LIFESPAN
#include "dds/ddsi/ddsi_lifespan.h"
#endif
#ifdef DDS_HAS_DEADLINE_MISSED
#include "dds/ddsi/ddsi_deadline.h"
#endif
/* INSTANCE MANAGEMENT
===================
Instances are created implicitly by "write" and "dispose", unregistered by
"unregister". Valid samples are added only by write operations (possibly
a combined with dispose and/or unregister), invalid samples only by dispose
and unregister operations, and only when there is no sample or the latest
available sample is read. (This might be a bit funny in the oddish case
where someone would take only the latest of multiple valid samples.)
There is at most one invalid sample per instance, its sample info is taken
straight from the instance when it is returned to the reader and its
presence and sample_state are represented by two bits. Any incoming sample
(or "incoming invalid sample") will cause an existing invalid sample to be
dropped. Thus, invalid samples are used solely to signal an instance state
change when there are no samples.
(Note: this can fairly easily be changed to let an invalid sample always
be generated on dispose/unregister.)
The instances and the RHC as a whole keep track of the number of valid
samples and the number of read valid samples, as well as the same for the
invalid ones, with the twist that the "inv_exists" and "inv_isread" booleans
in the RHC serve as flags and as counters at the same time.
Instances are dropped when the number of samples (valid & invalid combined)
and the number of registrations both go to 0. The number of registrations
is kept track of in "wrcount", and a unique identifier for the most recent
writer is typically in "wr_iid". Typically, because an unregister by
"wr_iid" clears it. The actual set of registrations is in principle a set
of <instance,writer> tuples stored in "registrations", but excluded from it
are those instances that have "wrcount" = 1 and "wr_iid" != 0. The typical
case is a single active writer for an instance, and this means the typical
case has no <instance,writer> tuples in "registrations".
It is unfortunate that this model complicates the transitions from 1 writer
to 2 writers, and from 2 writers back to 1 writer. This seems a reasonable
price to pay for the significant performance gain from not having to do
anything in the case of a single (or single dominant) writer.
(Note: "registrations" may perhaps be moved to a global registration table
of <reader,instance,writer> tuples, using a lock-free hash table, but that
doesn't affect the model.)
The unique identifiers for instances and writers are approximately uniformly
drawn from the set of positive unsigned 64-bit integers. This means they
are excellent hash keys, and both the instance hash table and the writer
registrations hash table use these directly.
QOS SUPPORT
===========
History is implemented as a (circular) linked list, but the invalid samples
model implemented here allows this to trivially be changed to an array of
samples, and this is probably profitable for shallow histories. Currently
the instance has a single sample embedded in particular to optimise the
KEEP_LAST with depth=1 case.
BY_SOURCE ordering is implemented differently from OpenSplice and does not
perform back-filling of the history. The arguments against that can be
found in JIRA, but in short: (1) not backfilling is significantly simpler
(and thus faster), (2) backfilling potentially requires rewriting the
states of samples already read, (3) it is as much "eventually consistent",
the only difference is that the model implemented here considers the
dataspace to fundamentally be "keep last 1" and always move forward (source
timestamp increases), and the per-reader history to be a record of sampling
that dataspace by the reader, whereas with backfilling the model is that
the eventual consistency applies to the full history.
(As it happens, the model implemented here is that also used by RTI and
probably other implementations -- OpenSplice is the odd one out in this
regard.)
Exclusive ownership is implemented by dropping all data from all writers
other than "wr_iid", unless "wr_iid" is 0 or the strength of the arriving
sample is higher than the current strength of the instance (in "strength").
The writer id is only reset by unregistering, in which case it is natural
that ownership is up for grabs again. QoS changes (not supported in this
DDSI implementation, but still) will be done by also reseting "wr_iid"
when an exclusive ownership writer lowers its strength.
Lifespan is based on the reception timestamp, and the monotonic time is
used for sample expiry if this QoS is set to something else than infinite.
READ CONDITIONS
===============
Read conditions are currently *always* attached to the reader, creating a
read condition and not attaching it to a waitset is a bit of a waste of
resources. This can be changed, of course, but it is doubtful many read
conditions get created without actually being used.
The "trigger" of a read condition counts the number of instances
matching its condition and is synchronously updated whenever the state
of instances and/or samples changes. The instance/sample states are
reduced to a triplet of a bitmask representing the instance and view
states, whether or not the instance has unread samples, and whether or not
it has read ones. (Invalid samples included.) Two of these triplets,
pre-change and post-change are passed to "update_conditions_locked",
which then runs over the array of attached read conditions and updates
the trigger. It returns whether or not a trigger changed
from 0 to 1, as this indicates the attached waitsets must be signalled.
*/
/* FIXME: tkmap should perhaps retain data with timestamp set to invalid
An invalid timestamp is (logically) unordered with respect to valid
timestamps, and that would mean BY_SOURCE order could be respected
even when generating an invalid sample for an unregister message using
the tkmap data. */
#define MAX_ATTACHED_QUERYCONDS (CHAR_BIT * sizeof (dds_querycond_mask_t))
#define INCLUDE_TRACE 1
#if INCLUDE_TRACE
#define TRACE(...) DDS_CLOG (DDS_LC_RHC, &rhc->gv->logconfig, __VA_ARGS__)
#else
#define TRACE(...) ((void)0)
#endif
/******************************
****** LIVE WRITERS ******
******************************/
struct lwreg
{
uint64_t iid;
uint64_t wr_iid;
};
struct lwregs
{
struct ddsrt_ehh * regs;
};
static uint32_t lwreg_hash (const void *vl)
{
const struct lwreg * l = vl;
return (uint32_t) (l->iid ^ l->wr_iid);
}
static bool lwreg_equals (const void *va, const void *vb)
{
const struct lwreg * a = va;
const struct lwreg * b = vb;
return a->iid == b->iid && a->wr_iid == b->wr_iid;
}
static void lwregs_init (struct lwregs *rt)
{
rt->regs = NULL;
}
static void lwregs_fini (struct lwregs *rt)
{
if (rt->regs)
ddsrt_ehh_free (rt->regs);
}
static bool lwregs_contains (struct lwregs *rt, uint64_t iid, uint64_t wr_iid)
{
struct lwreg dummy = { .iid = iid, .wr_iid = wr_iid };
return rt->regs != NULL && ddsrt_ehh_lookup (rt->regs, &dummy) != NULL;
}
static bool lwregs_add (struct lwregs *rt, uint64_t iid, uint64_t wr_iid)
{
struct lwreg dummy = { .iid = iid, .wr_iid = wr_iid };
if (rt->regs == NULL)
rt->regs = ddsrt_ehh_new (sizeof (struct lwreg), 1, lwreg_hash, lwreg_equals);
return ddsrt_ehh_add (rt->regs, &dummy);
}
static bool lwregs_delete (struct lwregs *rt, uint64_t iid, uint64_t wr_iid)
{
struct lwreg dummy = { .iid = iid, .wr_iid = wr_iid };
return rt->regs != NULL && ddsrt_ehh_remove (rt->regs, &dummy);
}
#if 0
void lwregs_dump (struct lwregs *rt)
{
struct ddsrt_ehh_iter it;
for (struct lwreg *r = ddsrt_ehh_iter_first(rt->regs, &it); r; r = ddsrt_ehh_iter_next(&it))
printf("iid=%"PRIu64" wr_iid=%"PRIu64"\n", r->iid, r->wr_iid);
}
#endif
/*************************
****** RHC ******
*************************/
struct rhc_sample {
struct ddsi_serdata *sample; /* serialised data (either just_key or real data) */
struct rhc_sample *next; /* next sample in time ordering, or oldest sample if most recent */
uint64_t wr_iid; /* unique id for writer of this sample (perhaps better in serdata) */
dds_querycond_mask_t conds; /* matching query conditions */
bool isread; /* READ or NOT_READ sample state */
uint32_t disposed_gen; /* snapshot of instance counter at time of insertion */
uint32_t no_writers_gen; /* __/ */
#ifdef DDS_HAS_LIFESPAN
struct ddsi_lifespan_fhnode lifespan; /* fibheap node for lifespan */
struct rhc_instance *inst; /* reference to rhc instance */
#endif
};
struct rhc_instance {
uint64_t iid; /* unique instance id, key of table, also serves as instance handle */
uint64_t wr_iid; /* unique of id of writer of latest sample or 0; if wrcount = 0 it is the wr_iid that caused */
struct rhc_sample *latest; /* latest received sample; circular list old->new; null if no sample */
uint32_t nvsamples; /* number of "valid" samples in instance */
uint32_t nvread; /* number of READ "valid" samples in instance (0 <= nvread <= nvsamples) */
dds_querycond_mask_t conds; /* matching query conditions */
uint32_t wrcount; /* number of live writers */
unsigned isnew : 1; /* NEW or NOT_NEW view state */
unsigned a_sample_free : 1; /* whether or not a_sample is in use */
unsigned isdisposed : 1; /* DISPOSED or NOT_DISPOSED (if not disposed, wrcount determines ALIVE/NOT_ALIVE_NO_WRITERS) */
unsigned autodispose : 1; /* wrcount > 0 => at least one registered writer has had auto-dispose set on some update */
unsigned wr_iid_islive : 1; /* whether wr_iid is of a live writer */
unsigned inv_exists : 1; /* whether or not state change occurred since last sample (i.e., must return invalid sample) */
unsigned inv_isread : 1; /* whether or not that state change has been read before (undefined if !inv_exists) */
unsigned deadline_reg : 1; /* whether or not registered for a deadline (== isdisposed, except store() defers updates) */
uint32_t disposed_gen; /* bloody generation counters - worst invention of mankind */
uint32_t no_writers_gen; /* __/ */
int32_t strength; /* "current" ownership strength */
ddsi_guid_t wr_guid; /* guid of last writer (if wr_iid != 0 then wr_guid is the corresponding guid, else undef) */
ddsrt_wctime_t tstamp; /* source time stamp of last update */
struct ddsrt_circlist_elem nonempty_list; /* links non-empty instances in arbitrary ordering */
#ifdef DDS_HAS_DEADLINE_MISSED
struct deadline_elem deadline; /* element in deadline missed administration */
#endif
struct ddsi_tkmap_instance *tk;/* backref into TK for unref'ing */
struct rhc_sample a_sample; /* pre-allocated storage for 1 sample */
};
typedef enum rhc_store_result {
RHC_STORED,
RHC_FILTERED,
RHC_REJECTED
} rhc_store_result_t;
struct dds_rhc_default {
struct dds_rhc common;
struct ddsrt_hh *instances;
struct ddsrt_circlist nonempty_instances; /* circular, points to most recently added one, NULL if none */
struct lwregs registrations; /* should be a global one (with lock-free lookups) */
/* Instance/Sample maximums from resource limits QoS */
int32_t max_instances; /* FIXME: probably better as uint32_t with MAX_UINT32 for unlimited */
int32_t max_samples; /* FIXME: probably better as uint32_t with MAX_UINT32 for unlimited */
int32_t max_samples_per_instance; /* FIXME: probably better as uint32_t with MAX_UINT32 for unlimited */
dds_duration_t minimum_separation; /* derived from the time_based_filter QoSPolicy */
uint32_t n_instances; /* # instances, including empty */
uint32_t n_nonempty_instances; /* # non-empty instances */
uint32_t n_not_alive_disposed; /* # disposed, non-empty instances */
uint32_t n_not_alive_no_writers; /* # not-alive-no-writers, non-empty instances */
uint32_t n_new; /* # new, non-empty instances */
uint32_t n_vsamples; /* # "valid" samples over all instances */
uint32_t n_vread; /* # read "valid" samples over all instances */
uint32_t n_invsamples; /* # invalid samples over all instances */
uint32_t n_invread; /* # read invalid samples over all instances */
bool by_source_ordering; /* true if BY_SOURCE, false if BY_RECEPTION */
bool exclusive_ownership; /* true if EXCLUSIVE, false if SHARED */
bool reliable; /* true if reliability RELIABLE */
bool xchecks; /* whether to do expensive checking if checking at all */
dds_reader *reader; /* reader -- may be NULL (used by rhc_torture) */
struct ddsi_tkmap *tkmap; /* back pointer to tkmap */
struct ddsi_domaingv *gv; /* globals -- so far only for log config */
const struct ddsi_sertype *type; /* type description */
uint32_t history_depth; /* depth, 1 for KEEP_LAST_1, 2**32-1 for KEEP_ALL */
ddsrt_mutex_t lock;
dds_readcond * conds; /* List of associated read conditions */
uint32_t nconds; /* Number of associated read conditions */
uint32_t nqconds; /* Number of associated query conditions */
dds_querycond_mask_t qconds_samplest; /* Mask of associated query conditions that check the sample state */
void *qcond_eval_samplebuf; /* Temporary storage for evaluating query conditions, NULL if no qconds */
#ifdef DDS_HAS_LIFESPAN
struct ddsi_lifespan_adm lifespan; /* Lifespan administration */
#endif
#ifdef DDS_HAS_DEADLINE_MISSED
struct ddsi_deadline_adm deadline; /* Deadline missed administration */
#endif
};
struct trigger_info_cmn {
uint32_t qminst;
bool has_read;
bool has_not_read;
};
struct trigger_info_pre {
struct trigger_info_cmn c;
};
struct trigger_info_qcond {
/* 0 or inst->conds depending on whether an invalid/valid sample was pushed out/added;
inc_xxx_read is there so read can indicate a sample changed from unread to read */
bool dec_invsample_read;
bool dec_sample_read;
bool inc_invsample_read;
bool inc_sample_read;
dds_querycond_mask_t dec_conds_invsample;
dds_querycond_mask_t dec_conds_sample;
dds_querycond_mask_t inc_conds_invsample;
dds_querycond_mask_t inc_conds_sample;
};
struct trigger_info_post {
struct trigger_info_cmn c;
};
static const struct dds_rhc_ops dds_rhc_default_ops;
static uint32_t qmask_of_sample (const struct rhc_sample *s)
{
return s->isread ? DDS_READ_SAMPLE_STATE : DDS_NOT_READ_SAMPLE_STATE;
}
static uint32_t qmask_of_invsample (const struct rhc_instance *i)
{
return i->inv_isread ? DDS_READ_SAMPLE_STATE : DDS_NOT_READ_SAMPLE_STATE;
}
static uint32_t inst_nsamples (const struct rhc_instance *i)
{
return i->nvsamples + i->inv_exists;
}
static uint32_t inst_nread (const struct rhc_instance *i)
{
return i->nvread + (uint32_t) (i->inv_exists & i->inv_isread);
}
static bool inst_is_empty (const struct rhc_instance *i)
{
return inst_nsamples (i) == 0;
}
static bool inst_has_read (const struct rhc_instance *i)
{
return inst_nread (i) > 0;
}
static bool inst_has_unread (const struct rhc_instance *i)
{
return inst_nread (i) < inst_nsamples (i);
}
static bool untyped_to_clean_invsample (const struct ddsi_sertype *type, const struct ddsi_serdata *d, void *sample, void **bufptr, void *buflim)
{
/* ddsi_serdata_untyped_to_sample just deals with the key value, without paying any attention to attributes;
but that makes life harder for the user: the attributes of an invalid sample would be garbage, but would
nonetheless have to be freed in the end. Zero'ing it explicitly solves that problem. */
ddsi_sertype_free_sample (type, sample, DDS_FREE_CONTENTS);
ddsi_sertype_zero_sample (type, sample);
return ddsi_serdata_untyped_to_sample (type, d, sample, bufptr, buflim);
}
static uint32_t qmask_of_inst (const struct rhc_instance *inst);
static void free_sample (struct dds_rhc_default *rhc, struct rhc_instance *inst, struct rhc_sample *s);
static void get_trigger_info_cmn (struct trigger_info_cmn *info, struct rhc_instance *inst);
static void get_trigger_info_pre (struct trigger_info_pre *info, struct rhc_instance *inst);
static void init_trigger_info_qcond (struct trigger_info_qcond *qc);
static void drop_instance_noupdate_no_writers (struct dds_rhc_default * __restrict rhc, struct rhc_instance * __restrict * __restrict instptr);
static bool update_conditions_locked (struct dds_rhc_default *rhc, bool called_from_insert, const struct trigger_info_pre *pre, const struct trigger_info_post *post, const struct trigger_info_qcond *trig_qc, const struct rhc_instance *inst);
static void account_for_nonempty_to_empty_transition (struct dds_rhc_default * __restrict rhc, struct rhc_instance * __restrict * __restrict instptr, const char *__restrict traceprefix);
#ifndef NDEBUG
static bool rhc_check_counts_locked (struct dds_rhc_default *rhc, bool check_conds, bool check_qcmask);
#endif
static uint32_t instance_iid_hash (const void *va)
{
const struct rhc_instance *a = va;
return (uint32_t) a->iid;
}
static bool instance_iid_eq (const void *va, const void *vb)
{
const struct rhc_instance *a = va;
const struct rhc_instance *b = vb;
return (a->iid == b->iid);
}
static void add_inst_to_nonempty_list (struct dds_rhc_default *rhc, struct rhc_instance *inst)
{
ddsrt_circlist_append (&rhc->nonempty_instances, &inst->nonempty_list);
rhc->n_nonempty_instances++;
}
static void remove_inst_from_nonempty_list (struct dds_rhc_default *rhc, struct rhc_instance *inst)
{
assert (inst_is_empty (inst));
ddsrt_circlist_remove (&rhc->nonempty_instances, &inst->nonempty_list);
assert (rhc->n_nonempty_instances > 0);
rhc->n_nonempty_instances--;
}
static struct rhc_instance *oldest_nonempty_instance (const struct dds_rhc_default *rhc)
{
return DDSRT_FROM_CIRCLIST (struct rhc_instance, nonempty_list, ddsrt_circlist_oldest (&rhc->nonempty_instances));
}
static struct rhc_instance *latest_nonempty_instance (const struct dds_rhc_default *rhc)
{
return DDSRT_FROM_CIRCLIST (struct rhc_instance, nonempty_list, ddsrt_circlist_latest (&rhc->nonempty_instances));
}
static struct rhc_instance *next_nonempty_instance (const struct rhc_instance *inst)
{
return DDSRT_FROM_CIRCLIST (struct rhc_instance, nonempty_list, inst->nonempty_list.next);
}
#ifdef DDS_HAS_LIFESPAN
static void drop_expired_samples (struct dds_rhc_default *rhc, struct rhc_sample *sample)
{
struct rhc_instance *inst = sample->inst;
struct trigger_info_pre pre;
struct trigger_info_post post;
struct trigger_info_qcond trig_qc;
assert (!inst_is_empty (inst));
TRACE ("rhc_default %p drop_exp(iid %"PRIx64" wriid %"PRIx64" exp %"PRId64" %s",
rhc, inst->iid, sample->wr_iid, sample->lifespan.t_expire.v, sample->isread ? "read" : "notread");
get_trigger_info_pre (&pre, inst);
init_trigger_info_qcond (&trig_qc);
/* Find prev sample: in case of history depth of 1 this is the sample itself,
* (which is inst->latest). In case of larger history depth the most likely sample
* to be expired is the oldest, in which case inst->latest is the previous
* sample and inst->latest->next points to sample (circular list). We can
* assume that 'sample' is in the list, so a check to avoid infinite loop is not
* required here. */
struct rhc_sample *psample = inst->latest;
while (psample->next != sample)
psample = psample->next;
rhc->n_vsamples--;
if (sample->isread)
{
inst->nvread--;
rhc->n_vread--;
trig_qc.dec_sample_read = true;
}
if (--inst->nvsamples > 0)
{
if (inst->latest == sample)
inst->latest = psample;
psample->next = sample->next;
}
else
{
inst->latest = NULL;
}
trig_qc.dec_conds_sample = sample->conds;
free_sample (rhc, inst, sample);
get_trigger_info_cmn (&post.c, inst);
update_conditions_locked (rhc, false, &pre, &post, &trig_qc, inst);
if (inst_is_empty (inst))
account_for_nonempty_to_empty_transition(rhc, &inst, "; ");
TRACE (")\n");
}
ddsrt_mtime_t dds_rhc_default_sample_expired_cb(void *hc, ddsrt_mtime_t tnow)
{
struct dds_rhc_default *rhc = hc;
struct rhc_sample *sample;
ddsrt_mtime_t tnext;
ddsrt_mutex_lock (&rhc->lock);
while ((tnext = ddsi_lifespan_next_expired_locked (&rhc->lifespan, tnow, (void **)&sample)).v == 0)
drop_expired_samples (rhc, sample);
ddsrt_mutex_unlock (&rhc->lock);
return tnext;
}
#endif /* DDS_HAS_LIFESPAN */
#ifdef DDS_HAS_DEADLINE_MISSED
ddsrt_mtime_t dds_rhc_default_deadline_missed_cb(void *hc, ddsrt_mtime_t tnow)
{
struct dds_rhc_default *rhc = hc;
ddsrt_mtime_t tnext = {0};
uint32_t ninst = 0;
void *vinst;
ddsrt_mutex_lock (&rhc->lock);
// stop after touching all instances to somewhat gracefully handle cases where we can't keep up
// alternatively one could do at most a fixed number at the time
while (ninst++ < rhc->n_instances && (tnext = ddsi_deadline_next_missed_locked (&rhc->deadline, tnow, &vinst)).v == 0)
{
struct rhc_instance *inst = vinst;
const uint32_t deadlines_expired = ddsi_deadline_compute_deadlines_missed (tnow, &inst->deadline, rhc->deadline.dur);
ddsi_deadline_reregister_instance_locked (&rhc->deadline, &inst->deadline, tnow);
inst->wr_iid_islive = 0;
ddsi_status_cb_data_t cb_data;
cb_data.raw_status_id = (int) DDS_REQUESTED_DEADLINE_MISSED_STATUS_ID;
cb_data.extra = deadlines_expired;
cb_data.handle = inst->iid;
cb_data.add = true;
ddsrt_mutex_unlock (&rhc->lock);
dds_reader_status_cb (&rhc->reader->m_entity, &cb_data);
ddsrt_mutex_lock (&rhc->lock);
tnow = ddsrt_time_monotonic ();
}
ddsrt_mutex_unlock (&rhc->lock);
return tnext;
}
#endif /* DDS_HAS_DEADLINE_MISSED */
struct dds_rhc *dds_rhc_default_new_xchecks (dds_reader *reader, struct ddsi_domaingv *gv, const struct ddsi_sertype *type, bool xchecks)
{
struct dds_rhc_default *rhc = ddsrt_malloc (sizeof (*rhc));
memset (rhc, 0, sizeof (*rhc));
rhc->common.common.ops = &dds_rhc_default_ops;
lwregs_init (&rhc->registrations);
ddsrt_mutex_init (&rhc->lock);
rhc->instances = ddsrt_hh_new (1, instance_iid_hash, instance_iid_eq);
ddsrt_circlist_init (&rhc->nonempty_instances);
rhc->type = type;
rhc->reader = reader;
rhc->tkmap = gv->m_tkmap;
rhc->gv = gv;
rhc->xchecks = xchecks;
#ifdef DDS_HAS_LIFESPAN
ddsi_lifespan_init (gv, &rhc->lifespan, offsetof(struct dds_rhc_default, lifespan), offsetof(struct rhc_sample, lifespan), dds_rhc_default_sample_expired_cb);
#endif
#ifdef DDS_HAS_DEADLINE_MISSED
rhc->deadline.dur = (reader != NULL) ? reader->m_entity.m_qos->deadline.deadline : DDS_INFINITY;
ddsi_deadline_init (gv, &rhc->deadline, offsetof(struct dds_rhc_default, deadline), offsetof(struct rhc_instance, deadline), dds_rhc_default_deadline_missed_cb);
#endif
return &rhc->common;
}
struct dds_rhc *dds_rhc_default_new (struct dds_reader *reader, const struct ddsi_sertype *type)
{
return dds_rhc_default_new_xchecks (reader, &reader->m_entity.m_domain->gv, type, (reader->m_entity.m_domain->gv.config.enabled_xchecks & DDSI_XCHECK_RHC) != 0);
}
static dds_return_t dds_rhc_default_associate (struct dds_rhc *rhc, dds_reader *reader, const struct ddsi_sertype *type, struct ddsi_tkmap *tkmap)
{
/* ignored out of laziness */
(void) rhc; (void) reader; (void) type; (void) tkmap;
return DDS_RETCODE_OK;
}
static void dds_rhc_default_set_qos (struct ddsi_rhc *rhc_common, const dds_qos_t * qos)
{
struct dds_rhc_default * const rhc = (struct dds_rhc_default *) rhc_common;
/* Set read related QoS */
rhc->max_samples = qos->resource_limits.max_samples;
rhc->max_instances = qos->resource_limits.max_instances;
rhc->max_samples_per_instance = qos->resource_limits.max_samples_per_instance;
rhc->minimum_separation = qos->time_based_filter.minimum_separation;
rhc->by_source_ordering = (qos->destination_order.kind == DDS_DESTINATIONORDER_BY_SOURCE_TIMESTAMP);
rhc->exclusive_ownership = (qos->ownership.kind == DDS_OWNERSHIP_EXCLUSIVE);
rhc->reliable = (qos->reliability.kind == DDS_RELIABILITY_RELIABLE);
assert(qos->history.kind != DDS_HISTORY_KEEP_LAST || qos->history.depth > 0);
rhc->history_depth = (qos->history.kind == DDS_HISTORY_KEEP_LAST) ? (uint32_t)qos->history.depth : ~0u;
/* FIXME: updating deadline duration not yet supported
rhc->deadline.dur = qos->deadline.deadline; */
}
static bool eval_predicate_sample (const struct dds_rhc_default *rhc, const struct ddsi_serdata *sample, bool (*pred) (const void *sample))
{
// What to do if deserialization fails? Consider it matching or not?
//
// This is used in query evaluation, so claiming it matches at least allows taking it,
// and at least it allows the application to detect something is amiss. Always
// returning false would likely lead to endless loops in the application because some
// read condition remains triggered.
if (!ddsi_serdata_to_sample (sample, rhc->qcond_eval_samplebuf, NULL, NULL))
return true;
bool ret = pred (rhc->qcond_eval_samplebuf);
return ret;
}
static bool eval_predicate_invsample (const struct dds_rhc_default *rhc, const struct rhc_instance *inst, bool (*pred) (const void *sample))
{
untyped_to_clean_invsample (rhc->type, inst->tk->m_sample, rhc->qcond_eval_samplebuf, NULL, NULL);
bool ret = pred (rhc->qcond_eval_samplebuf);
return ret;
}
static struct rhc_sample *alloc_sample (struct rhc_instance *inst)
{
if (inst->a_sample_free)
{
inst->a_sample_free = 0;
#if USE_VALGRIND
VALGRIND_MAKE_MEM_UNDEFINED (&inst->a_sample, sizeof (inst->a_sample));
#endif
return &inst->a_sample;
}
else
{
/* This instead of sizeof(rhc_sample) gets us type checking */
struct rhc_sample *s;
s = ddsrt_malloc (sizeof (*s));
return s;
}
}
static void free_sample (struct dds_rhc_default *rhc, struct rhc_instance *inst, struct rhc_sample *s)
{
#ifndef DDS_HAS_LIFESPAN
DDSRT_UNUSED_ARG (rhc);
#endif
ddsi_serdata_unref (s->sample);
#ifdef DDS_HAS_LIFESPAN
ddsi_lifespan_unregister_sample_locked (&rhc->lifespan, &s->lifespan);
#endif
if (s == &inst->a_sample)
{
assert (!inst->a_sample_free);
#if USE_VALGRIND
VALGRIND_MAKE_MEM_NOACCESS (&inst->a_sample, sizeof (inst->a_sample));
#endif
inst->a_sample_free = 1;
}
else
{
ddsrt_free (s);
}
}
static void inst_clear_invsample (struct dds_rhc_default *rhc, struct rhc_instance *inst, struct trigger_info_qcond *trig_qc)
{
assert (inst->inv_exists);
assert (trig_qc->dec_conds_invsample == 0);
inst->inv_exists = 0;
trig_qc->dec_conds_invsample = inst->conds;
if (inst->inv_isread)
{
trig_qc->dec_invsample_read = true;
rhc->n_invread--;
}
rhc->n_invsamples--;
}
static void inst_clear_invsample_if_exists (struct dds_rhc_default *rhc, struct rhc_instance *inst, struct trigger_info_qcond *trig_qc)
{
if (inst->inv_exists)
inst_clear_invsample (rhc, inst, trig_qc);
}
static void inst_set_invsample (struct dds_rhc_default *rhc, struct rhc_instance *inst, struct trigger_info_qcond *trig_qc, bool * __restrict nda)
{
if (inst->inv_exists && !inst->inv_isread)
{
/* FIXME: should this indeed trigger a "notify data available" event?*/
*nda = true;
}
else
{
/* Obviously optimisable, but that is perhaps not worth the bother */
inst_clear_invsample_if_exists (rhc, inst, trig_qc);
assert (trig_qc->inc_conds_invsample == 0);
trig_qc->inc_conds_invsample = inst->conds;
inst->inv_exists = 1;
inst->inv_isread = 0;
rhc->n_invsamples++;
*nda = true;
}
}
static void free_empty_instance (struct rhc_instance *inst, struct dds_rhc_default *rhc)
{
assert (inst_is_empty (inst));
ddsi_tkmap_instance_unref (rhc->tkmap, inst->tk);
#ifdef DDS_HAS_DEADLINE_MISSED
if (inst->deadline_reg)
ddsi_deadline_unregister_instance_locked (&rhc->deadline, &inst->deadline);
#endif
ddsrt_free (inst);
}
static void free_instance_rhc_free (struct rhc_instance *inst, struct dds_rhc_default *rhc)
{
struct rhc_sample *s = inst->latest;
const bool was_empty = inst_is_empty (inst);
struct trigger_info_qcond dummy_trig_qc;
if (s)
{
do {
struct rhc_sample * const s1 = s->next;
free_sample (rhc, inst, s);
s = s1;
} while (s != inst->latest);
rhc->n_vsamples -= inst->nvsamples;
rhc->n_vread -= inst->nvread;
inst->nvsamples = 0;
inst->nvread = 0;
}
#ifndef NDEBUG
memset (&dummy_trig_qc, 0, sizeof (dummy_trig_qc));
#endif
inst_clear_invsample_if_exists (rhc, inst, &dummy_trig_qc);
if (!was_empty)
remove_inst_from_nonempty_list (rhc, inst);
if (inst->isnew)
rhc->n_new--;
free_empty_instance(inst, rhc);
}
static uint32_t dds_rhc_default_lock_samples (struct dds_rhc *rhc_common)
{
struct dds_rhc_default * const rhc = (struct dds_rhc_default *) rhc_common;
uint32_t no;
ddsrt_mutex_lock (&rhc->lock);
no = rhc->n_vsamples + rhc->n_invsamples;
if (no == 0)
{
ddsrt_mutex_unlock (&rhc->lock);
}
return no;
}
static void free_instance_rhc_free_wrap (void *vnode, void *varg)
{
free_instance_rhc_free (vnode, varg);
}
static void dds_rhc_default_free (struct ddsi_rhc *rhc_common)
{
struct dds_rhc_default *rhc = (struct dds_rhc_default *) rhc_common;
#ifdef DDS_HAS_LIFESPAN
dds_rhc_default_sample_expired_cb (rhc, DDSRT_MTIME_NEVER);
ddsi_lifespan_fini (&rhc->lifespan);
#endif
#ifdef DDS_HAS_DEADLINE_MISSED
ddsi_deadline_stop (&rhc->deadline);
#endif
ddsrt_hh_enum (rhc->instances, free_instance_rhc_free_wrap, rhc);
assert (ddsrt_circlist_isempty (&rhc->nonempty_instances));
#ifdef DDS_HAS_DEADLINE_MISSED
ddsi_deadline_fini (&rhc->deadline);
#endif
ddsrt_hh_free (rhc->instances);
lwregs_fini (&rhc->registrations);
if (rhc->qcond_eval_samplebuf != NULL)
ddsi_sertype_free_sample (rhc->type, rhc->qcond_eval_samplebuf, DDS_FREE_ALL);
ddsrt_mutex_destroy (&rhc->lock);
ddsrt_free (rhc);
}
static void init_trigger_info_cmn_nonmatch (struct trigger_info_cmn *info)
{
info->qminst = ~0u;
info->has_read = false;
info->has_not_read = false;
}
static void get_trigger_info_cmn (struct trigger_info_cmn *info, struct rhc_instance *inst)
{
info->qminst = qmask_of_inst (inst);
info->has_read = inst_has_read (inst);
info->has_not_read = inst_has_unread (inst);
}
static void get_trigger_info_pre (struct trigger_info_pre *info, struct rhc_instance *inst)
{
get_trigger_info_cmn (&info->c, inst);
}
static void init_trigger_info_qcond (struct trigger_info_qcond *qc)
{
qc->dec_invsample_read = false;
qc->dec_sample_read = false;
qc->inc_invsample_read = false;
qc->inc_sample_read = false;
qc->dec_conds_invsample = 0;
qc->dec_conds_sample = 0;
qc->inc_conds_invsample = 0;
qc->inc_conds_sample = 0;
}
static bool trigger_info_differs (const struct dds_rhc_default *rhc, const struct trigger_info_pre *pre, const struct trigger_info_post *post, const struct trigger_info_qcond *trig_qc)
{
if (pre->c.qminst != post->c.qminst ||
pre->c.has_read != post->c.has_read ||
pre->c.has_not_read != post->c.has_not_read)
return true;
else if (rhc->nqconds == 0)
return false;
else
return (trig_qc->dec_conds_invsample != trig_qc->inc_conds_invsample ||
trig_qc->dec_conds_sample != trig_qc->inc_conds_sample ||
trig_qc->dec_invsample_read != trig_qc->inc_invsample_read ||
trig_qc->dec_sample_read != trig_qc->inc_sample_read);
}
static bool add_sample (struct dds_rhc_default *rhc, struct rhc_instance *inst, const struct ddsi_writer_info *wrinfo, const struct ddsi_serdata *sample, ddsi_status_cb_data_t *cb_data, struct trigger_info_qcond *trig_qc, bool * __restrict nda)
{
struct rhc_sample *s;
/* Adding a sample always clears an invalid sample (because the information
contained in the invalid sample - the instance state and the generation
counts - are included in the sample). While this would be place to do it,
we do it later to avoid having to roll back on allocation failure */
/* We don't do backfilling in BY_SOURCE mode -- we could, but
choose not to -- and having already filtered out samples
preceding inst->latest, we can simply insert it without any
searching */
if (inst->nvsamples == rhc->history_depth)
{
/* replace oldest sample; latest points to the latest one, the
list is circular from old -> new, so latest->next is the oldest */
inst_clear_invsample_if_exists (rhc, inst, trig_qc);
assert (inst->latest != NULL);
s = inst->latest->next;
assert (trig_qc->dec_conds_sample == 0);
ddsi_serdata_unref (s->sample);
#ifdef DDS_HAS_LIFESPAN
ddsi_lifespan_unregister_sample_locked (&rhc->lifespan, &s->lifespan);
#endif
trig_qc->dec_sample_read = s->isread;
trig_qc->dec_conds_sample = s->conds;
if (s->isread)
{
inst->nvread--;
rhc->n_vread--;
}
}
else
{
/* Check if resource max_samples QoS exceeded */
if (rhc->reader && rhc->max_samples != DDS_LENGTH_UNLIMITED && rhc->n_vsamples >= (uint32_t) rhc->max_samples)
{
cb_data->raw_status_id = (int) DDS_SAMPLE_REJECTED_STATUS_ID;
cb_data->extra = DDS_REJECTED_BY_SAMPLES_LIMIT;
cb_data->handle = inst->iid;
cb_data->add = true;
return false;
}
/* Check if resource max_samples_per_instance QoS exceeded */
if (rhc->reader && rhc->max_samples_per_instance != DDS_LENGTH_UNLIMITED && inst->nvsamples >= (uint32_t) rhc->max_samples_per_instance)
{
cb_data->raw_status_id = (int) DDS_SAMPLE_REJECTED_STATUS_ID;
cb_data->extra = DDS_REJECTED_BY_SAMPLES_PER_INSTANCE_LIMIT;
cb_data->handle = inst->iid;
cb_data->add = true;
return false;
}
/* add new latest sample */
s = alloc_sample (inst);
inst_clear_invsample_if_exists (rhc, inst, trig_qc);
if (inst->latest == NULL)
{
s->next = s;
}
else
{
s->next = inst->latest->next;
inst->latest->next = s;
}
inst->nvsamples++;
rhc->n_vsamples++;
}
s->sample = ddsi_serdata_ref (sample); /* drops const (tho refcount does change) */
s->wr_iid = wrinfo->iid;
s->isread = false;
s->disposed_gen = inst->disposed_gen;
s->no_writers_gen = inst->no_writers_gen;
#ifdef DDS_HAS_LIFESPAN
s->inst = inst;
s->lifespan.t_expire = wrinfo->lifespan_exp;
ddsi_lifespan_register_sample_locked (&rhc->lifespan, &s->lifespan);
#endif
s->conds = 0;
if (rhc->nqconds != 0)
{
for (dds_readcond *rc = rhc->conds; rc != NULL; rc = rc->m_next)
if (rc->m_query.m_filter != NULL && eval_predicate_sample (rhc, s->sample, rc->m_query.m_filter))
s->conds |= rc->m_query.m_qcmask;
}
trig_qc->inc_conds_sample = s->conds;
inst->latest = s;
*nda = true;
return true;
}
static void content_filter_make_sampleinfo (struct dds_sample_info *si, const struct ddsi_serdata *sample, const struct rhc_instance *inst, uint64_t wr_iid, uint64_t iid)
{
si->sample_state = DDS_SST_NOT_READ;
si->publication_handle = wr_iid;
si->source_timestamp = sample->timestamp.v;
si->sample_rank = 0;
si->generation_rank = 0;
si->absolute_generation_rank = 0;
si->valid_data = true;
if (inst)
{
si->view_state = inst->isnew ? DDS_VST_NEW : DDS_VST_OLD;
si->instance_state = inst->isdisposed ? DDS_IST_NOT_ALIVE_DISPOSED : (inst->wrcount == 0) ? DDS_IST_NOT_ALIVE_NO_WRITERS : DDS_IST_ALIVE;
si->instance_handle = inst->iid;
si->disposed_generation_count = inst->disposed_gen;
si->no_writers_generation_count = inst->no_writers_gen;
}
else
{
si->view_state = DDS_VST_NEW;
si->instance_state = DDS_IST_ALIVE;
si->instance_handle = iid;
si->disposed_generation_count = 0;
si->no_writers_generation_count = 0;
}
}
static bool content_filter_accepts (const dds_reader *reader, const struct ddsi_serdata *sample, const struct rhc_instance *inst, uint64_t wr_iid, uint64_t iid)
{
bool ret = true;
if (reader)
{
const struct dds_topic *tp = reader->m_topic;
switch (tp->m_filter.mode)
{
case DDS_TOPIC_FILTER_NONE:
ret = true;
break;
case DDS_TOPIC_FILTER_SAMPLEINFO_ARG: {
struct dds_sample_info si;
content_filter_make_sampleinfo (&si, sample, inst, wr_iid, iid);
ret = tp->m_filter.f.sampleinfo_arg (&si, tp->m_filter.arg);
break;
}
case DDS_TOPIC_FILTER_SAMPLE:
case DDS_TOPIC_FILTER_SAMPLE_ARG:
case DDS_TOPIC_FILTER_SAMPLE_SAMPLEINFO_ARG: {
char *tmp;
tmp = ddsi_sertype_alloc_sample (tp->m_stype);
if (!ddsi_serdata_to_sample (sample, tmp, NULL, NULL))
{