blob: 0d95bbd2efb5c62c4da086fa6e0efb1a9bfe9fd4 [file] [log] [blame]
Michal Vasko60ea6352020-06-29 13:39:39 +02001/**
2 * @file printer_lyb.c
3 * @author Michal Vasko <mvasko@cesnet.cz>
4 * @brief LYB printer for libyang data structure
5 *
6 * Copyright (c) 2020 CESNET, z.s.p.o.
7 *
8 * This source code is licensed under BSD 3-Clause License (the "License").
9 * You may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * https://opensource.org/licenses/BSD-3-Clause
13 */
14
15#include "lyb.h"
16
17#include <assert.h>
18#include <stdio.h>
19#include <stdint.h>
20#include <stdlib.h>
21#include <string.h>
22
23#include "common.h"
24#include "compat.h"
25#include "log.h"
26#include "printer.h"
27#include "printer_internal.h"
28#include "tree_data_internal.h"
29#include "tree_schema.h"
30#include "tree_schema_internal.h"
31
32/**
33 * @brief Hash table equal callback for checking hash equality only.
34 */
35static int
36lyb_hash_equal_cb(void *UNUSED(val1_p), void *UNUSED(val2_p), int UNUSED(mod), void *UNUSED(cb_data))
37{
38 /* for this purpose, if hash matches, the value does also, we do not want 2 values to have the same hash */
39 return 1;
40}
41
42/**
43 * @brief Hash table equal callback for checking value pointer equality only.
44 */
45static int
46lyb_ptr_equal_cb(void *val1_p, void *val2_p, int UNUSED(mod), void *UNUSED(cb_data))
47{
48 struct lysc_node *val1 = *(struct lysc_node **)val1_p;
49 struct lysc_node *val2 = *(struct lysc_node **)val2_p;
50
51 if (val1 == val2) {
52 return 1;
53 }
54 return 0;
55}
56
57/**
58 * @brief Check that sibling collision hash is safe to insert into hash table.
59 *
60 * @param[in] ht Hash table.
61 * @param[in] sibling Hashed sibling.
62 * @param[in] ht_col_id Sibling hash collision ID.
63 * @param[in] compare_col_id Last collision ID to compare with.
64 * @return LY_SUCCESS when the whole hash sequence does not collide,
65 * @return LY_EEXIST when the whole hash sequence sollides.
66 */
67static LY_ERR
68lyb_hash_sequence_check(struct hash_table *ht, struct lysc_node *sibling, int ht_col_id, int compare_col_id)
69{
70 int j;
71 struct lysc_node **col_node;
72
73 /* get the first node inserted with last hash col ID ht_col_id */
74 if (lyht_find(ht, &sibling, lyb_hash(sibling, ht_col_id), (void **)&col_node)) {
75 /* there is none. valid situation */
76 return LY_SUCCESS;
77 }
78
79 lyht_set_cb(ht, lyb_ptr_equal_cb);
80 do {
81 for (j = compare_col_id; j > -1; --j) {
82 if (lyb_hash(sibling, j) != lyb_hash(*col_node, j)) {
83 /* one non-colliding hash */
84 break;
85 }
86 }
87 if (j == -1) {
88 /* all whole hash sequences of nodes inserted with last hash col ID compare_col_id collide */
89 lyht_set_cb(ht, lyb_hash_equal_cb);
90 return LY_EEXIST;
91 }
92
93 /* get next node inserted with last hash col ID ht_col_id */
94 } while (!lyht_find_next(ht, col_node, lyb_hash(*col_node, ht_col_id), (void **)&col_node));
95
96 lyht_set_cb(ht, lyb_hash_equal_cb);
97 return LY_SUCCESS;
98}
99
100/**
101 * @brief Hash all the siblings and add them also into a separate hash table.
102 *
103 * @param[in] sibling Any sibling in all the siblings on one level.
104 * @param[out] ht_p Created hash table.
105 * @return LY_ERR value.
106 */
107static LY_ERR
108lyb_hash_siblings(struct lysc_node *sibling, struct hash_table **ht_p)
109{
110 struct hash_table *ht;
111 const struct lysc_node *parent;
112 const struct lys_module *mod;
113 int i, j;
114
115 ht = lyht_new(1, sizeof(struct lysc_node *), lyb_hash_equal_cb, NULL, 1);
116 LY_CHECK_ERR_RET(!ht, LOGMEM(sibling->module->ctx), LY_EMEM);
117
118 parent = lysc_data_parent(sibling);
119 mod = sibling->module;
120
121 sibling = NULL;
122 /* ignore features so that their state does not affect hashes */
123 while ((sibling = (struct lysc_node *)lys_getnext(sibling, parent, mod->compiled, LYS_GETNEXT_NOSTATECHECK))) {
124 /* find the first non-colliding hash (or specifically non-colliding hash sequence) */
125 for (i = 0; i < LYB_HASH_BITS; ++i) {
126 /* check that we are not colliding with nodes inserted with a lower collision ID than ours */
127 for (j = i - 1; j > -1; --j) {
128 if (lyb_hash_sequence_check(ht, sibling, j, i)) {
129 break;
130 }
131 }
132 if (j > -1) {
133 /* some check failed, we must use a higher collision ID */
134 continue;
135 }
136
137 /* try to insert node with the current collision ID */
138 if (!lyht_insert_with_resize_cb(ht, &sibling, lyb_hash(sibling, i), lyb_ptr_equal_cb, NULL)) {
139 /* success, no collision */
140 break;
141 }
142
143 /* make sure we really cannot insert it with this hash col ID (meaning the whole hash sequence is colliding) */
144 if (i && !lyb_hash_sequence_check(ht, sibling, i, i)) {
145 /* it can be inserted after all, even though there is already a node with the same last collision ID */
146 lyht_set_cb(ht, lyb_ptr_equal_cb);
147 if (lyht_insert(ht, &sibling, lyb_hash(sibling, i), NULL)) {
148 LOGINT(sibling->module->ctx);
149 lyht_set_cb(ht, lyb_hash_equal_cb);
150 lyht_free(ht);
151 return LY_EINT;
152 }
153 lyht_set_cb(ht, lyb_hash_equal_cb);
154 break;
155 }
156 /* there is still another colliding schema node with the same hash sequence, try higher collision ID */
157 }
158
159 if (i == LYB_HASH_BITS) {
160 /* wow */
161 LOGINT(sibling->module->ctx);
162 lyht_free(ht);
163 return LY_EINT;
164 }
165 }
166
167 /* change val equal callback so that the HT is usable for finding value hashes */
168 lyht_set_cb(ht, lyb_ptr_equal_cb);
169
170 *ht_p = ht;
171 return LY_SUCCESS;
172}
173
174/**
175 * @brief Find node hash in a hash table.
176 *
177 * @param[in] ht Hash table to search in.
178 * @param[in] node Node to find.
179 * @param[out] hash_p First non-colliding hash found.
180 * @return LY_ERR value.
181 */
182static LY_ERR
183lyb_hash_find(struct hash_table *ht, struct lysc_node *node, LYB_HASH *hash_p)
184{
185 LYB_HASH hash;
186 uint32_t i;
187
188 for (i = 0; i < LYB_HASH_BITS; ++i) {
189 hash = lyb_hash(node, i);
190 if (!hash) {
191 LOGINT_RET(node->module->ctx);
192 }
193
194 if (!lyht_find(ht, &node, hash, NULL)) {
195 /* success, no collision */
196 break;
197 }
198 }
199 /* cannot happen, we already calculated the hash */
200 if (i == LYB_HASH_BITS) {
201 LOGINT_RET(node->module->ctx);
202 }
203
204 *hash_p = hash;
205 return LY_SUCCESS;
206}
207
208/**
209 * @brief Write LYB data fully handling the metadata.
210 *
211 * @param[in] out Out structure.
212 * @param[in] buf Source buffer.
213 * @param[in] count Number of bytes to write.
214 * @param[in] lybctx LYB context.
215 * @return LY_ERR value.
216 */
217static LY_ERR
218lyb_write(struct ly_out *out, const uint8_t *buf, size_t count, struct lyd_lyb_ctx *lybctx)
219{
220 LY_ARRAY_SIZE_TYPE u;
221 struct lyd_lyb_subtree *full, *iter;
222 ssize_t r, to_write;
223 uint8_t meta_buf[LYB_META_BYTES];
224
225 while (1) {
226 /* check for full data chunks */
227 to_write = count;
228 full = NULL;
229 LY_ARRAY_FOR(lybctx->subtrees, u) {
230 /* we want the innermost chunks resolved first, so replace previous full chunks */
231 if (lybctx->subtrees[u].written + to_write >= LYB_SIZE_MAX) {
232 /* full chunk, do not write more than allowed */
233 to_write = LYB_SIZE_MAX - lybctx->subtrees[u].written;
234 full = &lybctx->subtrees[u];
235 }
236 }
237
238 if (!full && !count) {
239 break;
240 }
241
242 /* we are actually writing some data, not just finishing another chunk */
243 if (to_write) {
244 r = ly_write(out, (char *)buf, to_write);
245 if (r < to_write) {
246 return LY_ESYS;
247 }
248 lybctx->byte_count += r;
249
250 LY_ARRAY_FOR(lybctx->subtrees, u) {
251 /* increase all written counters */
252 lybctx->subtrees[u].written += r;
253 assert(lybctx->subtrees[u].written <= LYB_SIZE_MAX);
254 }
255 /* decrease count/buf */
256 count -= r;
257 buf += r;
258 }
259
260 if (full) {
261 /* write the meta information (inner chunk count and chunk size) */
262 meta_buf[0] = full->written & 0xFF;
263 meta_buf[1] = full->inner_chunks & 0xFF;
264
265 r = ly_write_skipped(out, full->position, (char *)meta_buf, LYB_META_BYTES);
266 if (r < 0) {
267 return LY_ESYS;
268 }
269 /* these bytes were already counted */
270
271 /* zero written and inner chunks */
272 full->written = 0;
273 full->inner_chunks = 0;
274
275 /* skip space for another chunk size */
276 r = ly_write_skip(out, LYB_META_BYTES, &full->position);
277 if (r < LYB_META_BYTES) {
278 return LY_ESYS;
279 }
280 lybctx->byte_count += r;
281
282 /* increase inner chunk count */
283 for (iter = &lybctx->subtrees[0]; iter != full; ++iter) {
284 if (iter->inner_chunks == LYB_INCHUNK_MAX) {
285 LOGINT(lybctx->ctx);
286 return LY_EINT;
287 }
288 ++iter->inner_chunks;
289 }
290 }
291 }
292
293 return LY_SUCCESS;
294}
295
296/**
297 * @brief Stop the current subtree - write its final metadata.
298 *
299 * @param[in] out Out structure.
300 * @param[in] lybctx LYB context.
301 * @return LY_ERR value.
302 */
303static LY_ERR
304lyb_write_stop_subtree(struct ly_out *out, struct lyd_lyb_ctx *lybctx)
305{
306 ssize_t r;
307 uint8_t meta_buf[LYB_META_BYTES];
308
309 /* write the meta chunk information */
310 meta_buf[0] = LYB_LAST_SUBTREE(lybctx).written & 0xFF;
311 meta_buf[1] = LYB_LAST_SUBTREE(lybctx).inner_chunks & 0xFF;
312
313 r = ly_write_skipped(out, LYB_LAST_SUBTREE(lybctx).position, (char *)&meta_buf, LYB_META_BYTES);
314 if (r < 0) {
315 return LY_ESYS;
316 }
317 /* do not count these bytes */
318
319 LY_ARRAY_DECREMENT(lybctx->subtrees);
320 return LY_SUCCESS;
321}
322
323/**
324 * @brief Start a new subtree - skip bytes for its metadata.
325 *
326 * @param[in] out Out structure.
327 * @param[in] lybctx LYB context.
328 * @return LY_ERR value.
329 */
330static LY_ERR
331lyb_write_start_subtree(struct ly_out *out, struct lyd_lyb_ctx *lybctx)
332{
333 ssize_t r;
334 LY_ARRAY_SIZE_TYPE u;
335
336 if (!lybctx->subtrees) {
337 u = 0;
338 } else {
339 u = LY_ARRAY_SIZE(lybctx->subtrees);
340 }
341 if (u == lybctx->subtree_size) {
342 LY_ARRAY_CREATE_RET(lybctx->ctx, lybctx->subtrees, u + LYB_SUBTREE_STEP, LY_EMEM);
343 lybctx->subtree_size = u + LYB_SUBTREE_STEP;
344 }
345
346 LY_ARRAY_INCREMENT(lybctx->subtrees);
347 LYB_LAST_SUBTREE(lybctx).written = 0;
348 LYB_LAST_SUBTREE(lybctx).inner_chunks = 0;
349
350 /* another inner chunk */
351 for (u = 0; u < LY_ARRAY_SIZE(lybctx->subtrees) - 1; ++u) {
352 if (lybctx->subtrees[u].inner_chunks == LYB_INCHUNK_MAX) {
353 LOGINT(lybctx->ctx);
354 return -1;
355 }
356 ++lybctx->subtrees[u].inner_chunks;
357 }
358
359 r = ly_write_skip(out, LYB_META_BYTES, &LYB_LAST_SUBTREE(lybctx).position);
360 if (r < LYB_META_BYTES) {
361 return LY_ESYS;
362 }
363 lybctx->byte_count += r;
364
365 return LY_SUCCESS;
366}
367
368/**
369 * @brief Write a number.
370 *
371 * @param[in] num Number to write.
372 * @param[in] bytes Actual accessible bytes of @p num.
373 * @param[in] out Out structure.
374 * @param[in] lybctx LYB context.
375 * @return LY_ERR value.
376 */
377static LY_ERR
378lyb_write_number(uint64_t num, size_t bytes, struct ly_out *out, struct lyd_lyb_ctx *lybctx)
379{
380 /* correct byte order */
381 num = htole64(num);
382
383 return lyb_write(out, (uint8_t *)&num, bytes, lybctx);
384}
385
386/**
387 * @brief Write a string.
388 *
389 * @param[in] str String to write.
390 * @param[in] str_len Length of @p str.
391 * @param[in] with_length Whether to precede the string with its length.
392 * @param[in] out Out structure.
393 * @param[in] lybctx LYB context.
394 * @return LY_ERR value.
395 */
396static LY_ERR
397lyb_write_string(const char *str, size_t str_len, int with_length, struct ly_out *out, struct lyd_lyb_ctx *lybctx)
398{
399 int r;
400
401 if (!str) {
402 str = "";
403 }
404 if (!str_len) {
405 str_len = strlen(str);
406 }
407
408 if (with_length) {
409 /* print length on 2 bytes */
410 if (str_len > UINT16_MAX) {
411 LOGINT(lybctx->ctx);
412 return LY_EINT;
413 }
414 LY_CHECK_RET(lyb_write_number(str_len, 2, out, lybctx));
415 }
416
417 r = lyb_write(out, (const uint8_t *)str, str_len, lybctx);
418 if (r < 0) {
419 return LY_ESYS;
420 }
421 lybctx->byte_count += r;
422
423 return LY_SUCCESS;
424}
425
426/**
427 * @brief Print YANG module info.
428 *
429 * @param[in] out Out structure.
430 * @param[in] mod Module to print.
431 * @param[in] lybctx LYB context.
432 * @return LY_ERR value.
433 */
434static LY_ERR
435lyb_print_model(struct ly_out *out, const struct lys_module *mod, struct lyd_lyb_ctx *lybctx)
436{
437 int r;
438 uint16_t revision;
439
440 /* model name length and model name */
441 if (mod) {
442 LY_CHECK_RET(lyb_write_string(mod->name, 0, 1, out, lybctx));
443 } else {
444 LY_CHECK_RET(lyb_write_string("", 0, 1, out, lybctx));
445 }
446
447 /* model revision as XXXX XXXX XXXX XXXX (2B) (year is offset from 2000)
448 * YYYY YYYM MMMD DDDD */
449 revision = 0;
450 if (mod && mod->revision) {
451 r = atoi(mod->revision);
452 r -= 2000;
453 r <<= 9;
454
455 revision |= r;
456
457 r = atoi(mod->revision + 5);
458 r <<= 5;
459
460 revision |= r;
461
462 r = atoi(mod->revision + 8);
463
464 revision |= r;
465 }
466 LY_CHECK_RET(lyb_write_number(revision, sizeof revision, out, lybctx));
467
468 return LY_SUCCESS;
469}
470
471/**
472 * @brief Print all used YANG modules.
473 *
474 * @param[in] out Out structure.
475 * @param[in] root Data root.
476 * @param[in] lybctx LYB context.
477 * @return LY_ERR value.
478 */
479static LY_ERR
480lyb_print_data_models(struct ly_out *out, const struct lyd_node *root, struct lyd_lyb_ctx *lybctx)
481{
482 struct ly_set *set;
483 LY_ARRAY_SIZE_TYPE u;
484 LY_ERR ret = LY_SUCCESS;
485 struct lys_module *mod;
486 const struct lyd_node *node;
487 uint32_t i;
488
489 set = ly_set_new();
490 LY_CHECK_RET(!set, LY_EMEM);
491
492 /* collect all data node modules */
493 LY_LIST_FOR(root, node) {
494 if (!node->schema) {
495 continue;
496 }
497
498 mod = node->schema->module;
499 ly_set_add(set, mod, 0);
500
501 /* add also their modules deviating or augmenting them */
502 LY_ARRAY_FOR(mod->compiled->deviated_by, u) {
503 ly_set_add(set, mod->compiled->deviated_by[u], 0);
504 }
505 LY_ARRAY_FOR(mod->compiled->augmented_by, u) {
506 ly_set_add(set, mod->compiled->augmented_by[u], 0);
507 }
508 }
509
510 /* now write module count on 2 bytes */
511 LY_CHECK_GOTO(ret = lyb_write_number(set->count, 2, out, lybctx), cleanup);
512
513 /* and all the used models */
514 for (i = 0; i < set->count; ++i) {
515 LY_CHECK_GOTO(ret = lyb_print_model(out, set->objs[i], lybctx), cleanup);
516 }
517
518cleanup:
519 ly_set_free(set, NULL);
520 return ret;
521}
522
523/**
524 * @brief Print LYB magic number.
525 *
526 * @param[in] out Out structure.
527 * @param[in] lybctx LYB context.
528 * @return LY_ERR value.
529 */
530static LY_ERR
531lyb_print_magic_number(struct ly_out *out, struct lyd_lyb_ctx *lybctx)
532{
533 int r;
534 uint32_t magic_number;
535
536 /* 'l', 'y', 'b' - 0x6c7962 */
537 ((char *)&magic_number)[0] = 'l';
538 ((char *)&magic_number)[1] = 'y';
539 ((char *)&magic_number)[2] = 'b';
540
541 r = ly_write(out, (char *)&magic_number, 3);
542 if (r < 3) {
543 return LY_ESYS;
544 }
545 lybctx->byte_count += 3;
546
547 return LY_SUCCESS;
548}
549
550/**
551 * @brief Print LYB header.
552 *
553 * @param[in] out Out structure.
554 * @param[in] lybctx LYB context.
555 * @return LY_ERR value.
556 */
557static LY_ERR
558lyb_print_header(struct ly_out *out, struct lyd_lyb_ctx *lybctx)
559{
560 int r;
561 uint8_t byte = 0;
562
563 /* version, future flags */
564 byte |= LYB_VERSION_NUM;
565
566 r = ly_write(out, (char *)&byte, 1);
567 if (r < 1) {
568 return LY_ESYS;
569 }
570 lybctx->byte_count += 1;
571
572 return LY_SUCCESS;
573}
574
575/**
576 * @brief Print opaque prefixes.
577 *
578 * @param[in] out Out structure.
579 * @param[in] prefs Prefixes to print.
580 * @param[in] lybctx LYB context.
581 * @return LY_ERR value.
582 */
583static LY_ERR
584lyb_print_opaq_prefixes(struct ly_out *out, const struct ly_prefix *prefs, struct lyd_lyb_ctx *lybctx)
585{
586 uint8_t count;
587 LY_ARRAY_SIZE_TYPE u;
588
589 if (prefs && (LY_ARRAY_SIZE(prefs) > UINT8_MAX)) {
590 LOGERR(lybctx->ctx, LY_EINT, "Maximum supported number of prefixes is %u.", UINT8_MAX);
591 return LY_EINT;
592 }
593
594 count = prefs ? LY_ARRAY_SIZE(prefs) : 0;
595
596 /* write number of prefixes on 1 byte */
597 LY_CHECK_RET(lyb_write(out, &count, 1, lybctx));
598
599 /* write all the prefixes */
600 LY_ARRAY_FOR(prefs, u) {
601 /* prefix */
602 LY_CHECK_RET(lyb_write_string(prefs[u].pref, 0, 1, out, lybctx));
603
604 /* namespace */
605 LY_CHECK_RET(lyb_write_string(prefs[u].ns, 0, 1, out, lybctx));
606 }
607
608 return LY_SUCCESS;
609}
610
611/**
612 * @brief Print opaque node.
613 *
614 * @param[in] opaq Node to print.
615 * @param[in] out Out structure.
616 * @param[in] lybctx LYB context.
617 * @return LY_ERR value.
618 */
619static LY_ERR
620lyb_print_opaq(struct lyd_node_opaq *opaq, struct ly_out *out, struct lyd_lyb_ctx *lybctx)
621{
622 /* prefix */
623 LY_CHECK_RET(lyb_write_string(opaq->prefix.pref, 0, 1, out, lybctx));
624
625 /* namespace */
626 LY_CHECK_RET(lyb_write_string(opaq->prefix.ns, 0, 1, out, lybctx));
627
628 /* name */
629 LY_CHECK_RET(lyb_write_string(opaq->name, 0, 1, out, lybctx));
630
631 /* value prefixes */
632 LY_CHECK_RET(lyb_print_opaq_prefixes(out, opaq->val_prefs, lybctx));
633
634 /* format */
635 LY_CHECK_RET(lyb_write_number(opaq->format, 1, out, lybctx));
636
637 /* value */
638 LY_CHECK_RET(lyb_write_string(opaq->value, 0, 0, out, lybctx));
639
640 return LY_SUCCESS;
641}
642
643/**
644 * @brief Print anydata node.
645 *
646 * @param[in] anydata Node to print.
647 * @param[in] out Out structure.
648 * @param[in] lybctx LYB context.
649 * @return LY_ERR value.
650 */
651static LY_ERR
652lyb_print_anydata(struct lyd_node_any *anydata, struct ly_out *out, struct lyd_lyb_ctx *lybctx)
653{
654 LY_ERR ret = LY_SUCCESS;
655 LYD_ANYDATA_VALUETYPE value_type;
656 int len;
657 char *buf = NULL;
658 const char *str;
659 struct ly_out *out2 = NULL;
660
661 if (anydata->value_type == LYD_ANYDATA_DATATREE) {
662 /* will be printed as a nested LYB data tree */
663 value_type = LYD_ANYDATA_LYB;
664 } else {
665 value_type = anydata->value_type;
666 }
667
668 /* first byte is type */
669 LY_CHECK_GOTO(ret = lyb_write(out, (uint8_t *)&value_type, sizeof value_type, lybctx), cleanup);
670
671 if (anydata->value_type == LYD_ANYDATA_DATATREE) {
672 /* print LYB data tree to memory */
673 LY_CHECK_GOTO(ret = ly_out_new_memory(&buf, 0, &out2), cleanup);
674 LY_CHECK_GOTO(ret = lyb_print_data(out2, anydata->value.tree, LYDP_WITHSIBLINGS), cleanup);
675
676 len = lyd_lyb_data_length(buf);
677 assert(len != -1);
678 str = buf;
679 } else if (anydata->value_type == LYD_ANYDATA_LYB) {
680 len = lyd_lyb_data_length(anydata->value.mem);
681 assert(len != -1);
682 str = anydata->value.mem;
683 } else {
684 len = strlen(anydata->value.str);
685 str = anydata->value.str;
686 }
687
688 /* followed by the content */
689 LY_CHECK_GOTO(ret = lyb_write_string(str, (size_t)len, 0, out, lybctx), cleanup);
690
691cleanup:
692 ly_out_free(out2, NULL, 1);
693 return ret;
694}
695
696/**
697 * @brief Print term node.
698 *
699 * @param[in] term Node to print.
700 * @param[in] out Out structure.
701 * @param[in] lybctx LYB context.
702 * @return LY_ERR value.
703 */
704static LY_ERR
705lyb_print_term(struct lyd_node_term *term, struct ly_out *out, struct lyd_lyb_ctx *lybctx)
706{
707 LY_ERR ret;
708 int dynamic;
709 const char *str;
710
711 /* get value */
712 str = lyd_value2str(term, &dynamic);
713
714 /* print it */
715 ret = lyb_write_string(str, 0, 0, out, lybctx);
716
717 if (dynamic) {
718 free((char *)str);
719 }
720 return ret;
721}
722
723/**
724 * @brief Print YANG node metadata.
725 *
726 * @param[in] out Out structure.
727 * @param[in] node Data node whose metadata to print.
728 * @param[in] lybctx LYB context.
729 * @return LY_ERR value.
730 */
731static LY_ERR
732lyb_print_metadata(struct ly_out *out, const struct lyd_node *node, struct lyd_lyb_ctx *lybctx)
733{
734 LY_ERR ret;
735 int dynamic;
736 uint8_t count = 0;
737 const struct lys_module *wd_mod = NULL;
738 struct lyd_meta *iter;
739 const char *str;
740
741 /* with-defaults */
742 if (node->schema->nodetype & LYD_NODE_TERM) {
743 if (((node->flags & LYD_DEFAULT) && (lybctx->options & (LYDP_WD_ALL_TAG | LYDP_WD_IMPL_TAG))) ||
744 ((lybctx->options & LYDP_WD_ALL_TAG) && ly_is_default(node))) {
745 /* we have implicit OR explicit default node, print attribute only if context include with-defaults schema */
746 wd_mod = ly_ctx_get_module_latest(node->schema->module->ctx, "ietf-netconf-with-defaults");
747 }
748 }
749
750 /* count metadata */
751 if (wd_mod) {
752 ++count;
753 }
754 for (iter = node->meta; iter; iter = iter->next) {
755 if (count == UINT8_MAX) {
756 LOGERR(lybctx->ctx, LY_EINT, "Maximum supported number of data node metadata is %u.", UINT8_MAX);
757 return LY_EINT;
758 }
759 ++count;
760 }
761
762 /* write number of metadata on 1 byte */
763 LY_CHECK_RET(lyb_write(out, &count, 1, lybctx));
764
765 if (wd_mod) {
766 /* write the "default" metadata */
767 LY_CHECK_RET(lyb_write_start_subtree(out, lybctx));
768 LY_CHECK_RET(lyb_print_model(out, wd_mod, lybctx));
769 LY_CHECK_RET(lyb_write_string("default", 0, 1, out, lybctx));
770 LY_CHECK_RET(lyb_write_string("true", 0, 0, out, lybctx));
771 LY_CHECK_RET(lyb_write_stop_subtree(out, lybctx));
772 }
773
774 /* write all the node metadata */
775 LY_LIST_FOR(node->meta, iter) {
776 /* each metadata is a subtree */
777 LY_CHECK_RET(lyb_write_start_subtree(out, lybctx));
778
779 /* model */
780 LY_CHECK_RET(lyb_print_model(out, iter->annotation->module, lybctx));
781
782 /* annotation name with length */
783 LY_CHECK_RET(lyb_write_string(iter->name, 0, 1, out, lybctx));
784
785 /* get the value */
786 str = lyd_meta2str(iter, &dynamic);
787
788 /* metadata value */
789 ret = lyb_write_string(str, 0, 0, out, lybctx);
790 if (dynamic) {
791 free((char *)str);
792 }
793 LY_CHECK_RET(ret);
794
795 /* finish metadata subtree */
796 LY_CHECK_RET(lyb_write_stop_subtree(out, lybctx));
797 }
798
799 return LY_SUCCESS;
800}
801
802/**
803 * @brief Print opaque node attributes.
804 *
805 * @param[in] out Out structure.
806 * @param[in] node Opaque node whose attributes to print.
807 * @param[in] lybctx LYB context.
808 * @return LY_ERR value.
809 */
810static LY_ERR
811lyb_print_attributes(struct ly_out *out, const struct lyd_node_opaq *node, struct lyd_lyb_ctx *lybctx)
812{
813 uint8_t count = 0;
814 struct ly_attr *iter;
815
816 for (iter = node->attr; iter; iter = iter->next) {
817 if (count == UINT8_MAX) {
818 LOGERR(lybctx->ctx, LY_EINT, "Maximum supported number of data node attributes is %u.", UINT8_MAX);
819 return LY_EINT;
820 }
821 ++count;
822 }
823
824 /* write number of attributes on 1 byte */
825 LY_CHECK_RET(lyb_write(out, &count, 1, lybctx));
826
827 /* write all the attributes */
828 LY_LIST_FOR(node->attr, iter) {
829 /* each attribute is a subtree */
830 LY_CHECK_RET(lyb_write_start_subtree(out, lybctx));
831
832 /* prefix */
833 LY_CHECK_RET(lyb_write_string(iter->prefix.pref, 0, 1, out, lybctx));
834
835 /* namespace */
836 LY_CHECK_RET(lyb_write_string(iter->prefix.ns, 0, 1, out, lybctx));
837
838 /* name */
839 LY_CHECK_RET(lyb_write_string(iter->name, 0, 1, out, lybctx));
840
841 /* value prefixes */
842 LY_CHECK_RET(lyb_print_opaq_prefixes(out, iter->val_prefs, lybctx));
843
844 /* format */
845 LY_CHECK_RET(lyb_write_number(iter->format, 1, out, lybctx));
846
847 /* value */
848 LY_CHECK_RET(lyb_write_string(iter->value, 0, 0, out, lybctx));
849
850 /* finish attribute subtree */
851 LY_CHECK_RET(lyb_write_stop_subtree(out, lybctx));
852 }
853
854 return LY_SUCCESS;
855}
856
857/**
858 * @brief Print schema node hash.
859 *
860 * @param[in] out Out structure.
861 * @param[in] schema Schema node whose hash to print.
862 * @param[in,out] sibling_ht Cached hash table for these siblings, created if NULL.
863 * @param[in] lybctx LYB context.
864 * @return LY_ERR value.
865 */
866static LY_ERR
867lyb_print_schema_hash(struct ly_out *out, struct lysc_node *schema, struct hash_table **sibling_ht, struct lyd_lyb_ctx *lybctx)
868{
869 LY_ARRAY_SIZE_TYPE u;
870 uint32_t i;
871 LYB_HASH hash;
872 struct lyd_lyb_sib_ht *sib_ht;
873 struct lysc_node *first_sibling;
874
875 if (!schema) {
876 /* opaque node, write empty hash */
877 hash = 0;
878 LY_CHECK_RET(lyb_write(out, &hash, sizeof hash, lybctx));
879 return LY_SUCCESS;
880 }
881
882 /* create whole sibling HT if not already created and saved */
883 if (!*sibling_ht) {
884 /* get first schema data sibling (or input/output) */
885 first_sibling = (struct lysc_node *)lys_getnext(NULL, lysc_data_parent(schema), schema->module->compiled, 0);
886 LY_ARRAY_FOR(lybctx->sib_hts, u) {
887 if (lybctx->sib_hts[u].first_sibling == first_sibling) {
888 /* we have already created a hash table for these siblings */
889 *sibling_ht = lybctx->sib_hts[u].ht;
890 break;
891 }
892 }
893
894 if (!*sibling_ht) {
895 /* we must create sibling hash table */
896 LY_CHECK_RET(lyb_hash_siblings(first_sibling, sibling_ht));
897
898 /* and save it */
899 LY_ARRAY_NEW_RET(lybctx->ctx, lybctx->sib_hts, sib_ht, LY_EMEM);
900
901 sib_ht->first_sibling = first_sibling;
902 sib_ht->ht = *sibling_ht;
903 }
904 }
905
906 /* get our hash */
907 LY_CHECK_RET(lyb_hash_find(*sibling_ht, schema, &hash));
908
909 /* write the hash */
910 LY_CHECK_RET(lyb_write(out, &hash, sizeof hash, lybctx));
911
912 if (hash & LYB_HASH_COLLISION_ID) {
913 /* no collision for this hash, we are done */
914 return LY_SUCCESS;
915 }
916
917 /* written hash was a collision, write also all the preceding hashes */
918 for (i = 0; !(hash & (LYB_HASH_COLLISION_ID >> i)); ++i);
919
920 for (; i; --i) {
921 hash = lyb_hash(schema, i - 1);
922 if (!hash) {
923 return LY_EINT;
924 }
925 assert(hash & (LYB_HASH_COLLISION_ID >> (i - 1)));
926
927 LY_CHECK_RET(lyb_write(out, &hash, sizeof hash, lybctx));
928 }
929
930 return LY_SUCCESS;
931}
932
933/**
934 * @brief Print data subtree.
935 *
936 * @param[in] out Out structure.
937 * @param[in] node Root node of the subtree to print.
938 * @param[in,out] sibling_ht Cached hash table for these data siblings, created if NULL.
939 * @param[in] lybctx LYB context.
940 * @return LY_ERR value.
941 */
942static LY_ERR
943lyb_print_subtree(struct ly_out *out, const struct lyd_node *node, struct hash_table **sibling_ht, struct lyd_lyb_ctx *lybctx)
944{
945 struct hash_table *child_ht = NULL;
946
947 /* register a new subtree */
948 LY_CHECK_RET(lyb_write_start_subtree(out, lybctx));
949
950 /* write model info first */
951 if (!node->schema && !((struct lyd_node_opaq *)node)->parent) {
952 LY_CHECK_RET(lyb_print_model(out, NULL, lybctx));
953 } else if (node->schema && !lysc_data_parent(node->schema)) {
954 LY_CHECK_RET(lyb_print_model(out, node->schema->module, lybctx));
955 }
956
957 /* write schema hash */
958 LY_CHECK_RET(lyb_print_schema_hash(out, (struct lysc_node *)node->schema, sibling_ht, lybctx));
959
960 /* write any metadata/attributes */
961 if (node->schema) {
962 LY_CHECK_RET(lyb_print_metadata(out, node, lybctx));
963 } else {
964 LY_CHECK_RET(lyb_print_attributes(out, (struct lyd_node_opaq *)node, lybctx));
965 }
966
967 /* write node content */
968 if (!node->schema) {
969 LY_CHECK_RET(lyb_print_opaq((struct lyd_node_opaq *)node, out, lybctx));
970 } else if (node->schema->nodetype & LYD_NODE_INNER) {
971 /* nothing to write */
972 } else if (node->schema->nodetype & LYD_NODE_TERM) {
973 LY_CHECK_RET(lyb_print_term((struct lyd_node_term *)node, out, lybctx));
974 } else if (node->schema->nodetype & LYD_NODE_ANY) {
975 LY_CHECK_RET(lyb_print_anydata((struct lyd_node_any *)node, out, lybctx));
976 } else {
977 LOGINT_RET(lybctx->ctx);
978 }
979
980 /* recursively write all the descendants */
981 LY_LIST_FOR(lyd_node_children(node, 0), node) {
982 LY_CHECK_RET(lyb_print_subtree(out, node, &child_ht, lybctx));
983 }
984
985 /* finish this subtree */
986 LY_CHECK_RET(lyb_write_stop_subtree(out, lybctx));
987
988 return LY_SUCCESS;
989}
990
991LY_ERR
992lyb_print_data(struct ly_out *out, const struct lyd_node *root, int options)
993{
994 LY_ERR ret = LY_SUCCESS;
995 uint8_t zero = 0;
996 LY_ARRAY_SIZE_TYPE u;
997 struct hash_table *top_sibling_ht = NULL;
998 const struct lys_module *prev_mod = NULL;
999 struct lyd_lyb_ctx lybctx = {0};
1000
1001 lybctx.options = options;
1002 if (root) {
1003 lybctx.ctx = LYD_NODE_CTX(root);
1004
1005 if (root->schema && lysc_data_parent(root->schema)) {
1006 LOGERR(lybctx.ctx, LY_EINVAL, "LYB printer supports only printing top-level nodes.");
1007 return LY_EINVAL;
1008 }
1009 }
1010
1011 /* LYB magic number */
1012 LY_CHECK_GOTO(ret = lyb_print_magic_number(out, &lybctx), cleanup);
1013
1014 /* LYB header */
1015 LY_CHECK_GOTO(ret = lyb_print_header(out, &lybctx), cleanup);
1016
1017 /* all used models */
1018 LY_CHECK_GOTO(ret = lyb_print_data_models(out, root, &lybctx), cleanup);
1019
1020 LY_LIST_FOR(root, root) {
1021 /* do not reuse sibling hash tables from different modules */
1022 if (!root->schema || (root->schema->module != prev_mod)) {
1023 top_sibling_ht = NULL;
1024 prev_mod = root->schema ? root->schema->module : NULL;
1025 }
1026
1027 LY_CHECK_GOTO(ret = lyb_print_subtree(out, root, &top_sibling_ht, &lybctx), cleanup);
1028
1029 if (!(options & LYDP_WITHSIBLINGS)) {
1030 break;
1031 }
1032 }
1033
1034 /* ending zero byte */
1035 LY_CHECK_GOTO(ret = lyb_write(out, &zero, sizeof zero, &lybctx), cleanup);
1036
1037cleanup:
1038 LY_ARRAY_FREE(lybctx.subtrees);
1039 LY_ARRAY_FOR(lybctx.sib_hts, u) {
1040 lyht_free(lybctx.sib_hts[u].ht);
1041 }
1042 LY_ARRAY_FREE(lybctx.sib_hts);
1043
1044 return ret;
1045}