Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 1 | /** |
| 2 | * @file parser_lyb.c |
| 3 | * @author Michal Vasko <mvasko@cesnet.cz> |
| 4 | * @brief LYB data parser for libyang |
| 5 | * |
| 6 | * Copyright (c) 2018 CESNET, z.s.p.o. |
| 7 | * |
| 8 | * This source code is licensed under BSD 3-Clause License (the "License"). |
| 9 | * You may not use this file except in compliance with the License. |
| 10 | * You may obtain a copy of the License at |
| 11 | * |
| 12 | * https://opensource.org/licenses/BSD-3-Clause |
| 13 | */ |
| 14 | |
| 15 | #include <assert.h> |
| 16 | #include <errno.h> |
| 17 | #include <stdlib.h> |
| 18 | #include <string.h> |
| 19 | |
| 20 | #include "libyang.h" |
| 21 | #include "common.h" |
| 22 | #include "context.h" |
| 23 | #include "parser.h" |
| 24 | #include "tree_internal.h" |
| 25 | |
| 26 | #define LYB_HAVE_READ_GOTO(r, d, go) if (r < 0) goto go; d += r; |
| 27 | #define LYB_HAVE_READ_RETURN(r, d, ret) if (r < 0) return ret; d += r; |
| 28 | |
| 29 | static int |
| 30 | lyb_read(const char *data, uint8_t *buf, size_t count, struct lyb_state *lybs) |
| 31 | { |
| 32 | int ret = 0, i, empty_chunk_i; |
| 33 | size_t to_read; |
| 34 | |
| 35 | assert(data && lybs); |
| 36 | |
| 37 | while (1) { |
| 38 | /* check for fully-read (empty) data chunks */ |
| 39 | to_read = count; |
| 40 | empty_chunk_i = -1; |
| 41 | for (i = 0; i < lybs->used; ++i) { |
| 42 | /* we want the innermost chunks resolved first, so replace previous empty chunks, |
| 43 | * also ignore chunks that are completely finished, there is nothing for us to do */ |
| 44 | if ((lybs->written[i] <= count) && lybs->position[i]) { |
| 45 | /* empty chunk, do not read more */ |
| 46 | to_read = lybs->written[i]; |
| 47 | empty_chunk_i = i; |
| 48 | } |
| 49 | } |
| 50 | |
| 51 | if ((empty_chunk_i == -1) && !count) { |
| 52 | break; |
| 53 | } |
| 54 | |
| 55 | /* we are actually reading some data, not just finishing another chunk */ |
| 56 | if (to_read) { |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 57 | if (buf) { |
| 58 | memcpy(buf, data + ret, to_read); |
| 59 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 60 | |
| 61 | for (i = 0; i < lybs->used; ++i) { |
| 62 | /* decrease all written counters */ |
| 63 | lybs->written[i] -= to_read; |
| 64 | } |
| 65 | /* decrease count/buf */ |
| 66 | count -= to_read; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 67 | if (buf) { |
| 68 | buf += to_read; |
| 69 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 70 | |
| 71 | ret += to_read; |
| 72 | } |
| 73 | |
| 74 | if (empty_chunk_i > -1) { |
| 75 | /* read the next chunk size */ |
| 76 | memcpy(&lybs->written[empty_chunk_i], data + ret, LYB_SIZE_BYTES); |
| 77 | /* remember whether there is a following chunk or not */ |
| 78 | lybs->position[empty_chunk_i] = (lybs->written[empty_chunk_i] == LYB_SIZE_MAX ? 1 : 0); |
| 79 | |
| 80 | ret += LYB_SIZE_BYTES; |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | return ret; |
| 85 | } |
| 86 | |
| 87 | static int |
| 88 | lyb_read_number(uint64_t *num, uint64_t max_num, const char *data, struct lyb_state *lybs) |
| 89 | { |
| 90 | int max_bits, max_bytes, i, r, ret = 0; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 91 | uint8_t byte; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 92 | |
| 93 | for (max_bits = 0; max_num; max_num >>= 1, ++max_bits); |
| 94 | max_bytes = max_bits / 8 + (max_bits % 8 ? 1 : 0); |
| 95 | |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 96 | for (i = 0; i < max_bytes; ++i) { |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 97 | ret += (r = lyb_read(data, &byte, 1, lybs)); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 98 | LYB_HAVE_READ_RETURN(r, data, -1); |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 99 | |
| 100 | *(((uint8_t *)num) + i) = byte; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | return ret; |
| 104 | } |
| 105 | |
| 106 | static int |
| 107 | lyb_read_string(struct ly_ctx *ctx, const char **str, const char *data, struct lyb_state *lybs) |
| 108 | { |
| 109 | int ret; |
| 110 | size_t len; |
| 111 | |
| 112 | /* read until the end of this subtree */ |
| 113 | len = lybs->written[lybs->used - 1]; |
| 114 | |
| 115 | *str = malloc((len + 1) * sizeof **str); |
| 116 | LY_CHECK_ERR_RETURN(!*str, LOGMEM(ctx), -1); |
| 117 | |
| 118 | ret = lyb_read(data, (uint8_t *)*str, len, lybs); |
| 119 | LYB_HAVE_READ_GOTO(ret, data, error); |
| 120 | ((char *)*str)[len] = '\0'; |
| 121 | |
| 122 | /* store in the dictionary */ |
| 123 | *str = lydict_insert_zc(ctx, (char *)*str); |
| 124 | |
| 125 | return ret; |
| 126 | |
| 127 | error: |
| 128 | free((char *)*str); |
| 129 | *str = NULL; |
| 130 | return -1; |
| 131 | } |
| 132 | |
| 133 | static void |
| 134 | lyb_read_stop_subtree(struct lyb_state *lybs) |
| 135 | { |
| 136 | if (lybs->written[lybs->used - 1]) { |
| 137 | LOGINT(NULL); |
| 138 | } |
| 139 | |
| 140 | --lybs->used; |
| 141 | } |
| 142 | |
| 143 | static int |
| 144 | lyb_read_start_subtree(const char *data, struct lyb_state *lybs) |
| 145 | { |
| 146 | uint64_t num = 0; |
| 147 | |
| 148 | if (lybs->used == lybs->size) { |
| 149 | lybs->size += LYB_STATE_STEP; |
| 150 | lybs->written = ly_realloc(lybs->written, lybs->size * sizeof *lybs->written); |
| 151 | lybs->position = ly_realloc(lybs->position, lybs->size * sizeof *lybs->position); |
| 152 | LY_CHECK_ERR_RETURN(!lybs->written || !lybs->position, LOGMEM(NULL), -1); |
| 153 | } |
| 154 | |
| 155 | memcpy(&num, data, LYB_SIZE_BYTES); |
| 156 | |
| 157 | ++lybs->used; |
| 158 | lybs->written[lybs->used - 1] = num; |
| 159 | lybs->position[lybs->used - 1] = (num == LYB_SIZE_MAX ? 1 : 0); |
| 160 | |
| 161 | return LYB_SIZE_BYTES; |
| 162 | } |
| 163 | |
| 164 | static int |
| 165 | lyb_parse_model(struct ly_ctx *ctx, const char *data, const struct lys_module **mod, struct lyb_state *lybs) |
| 166 | { |
| 167 | int r, ret = 0; |
| 168 | uint16_t num = 0; |
| 169 | char *mod_name = NULL, mod_rev[11]; |
| 170 | |
| 171 | /* model name length */ |
| 172 | ret += (r = lyb_read(data, (uint8_t *)&num, sizeof(uint16_t), lybs)); |
| 173 | LYB_HAVE_READ_GOTO(r, data, error); |
| 174 | |
| 175 | mod_name = malloc(num + 1); |
| 176 | LY_CHECK_ERR_GOTO(!mod_name, LOGMEM(ctx), error); |
| 177 | |
| 178 | /* model name */ |
| 179 | ret += (r = lyb_read(data, (uint8_t *)mod_name, num, lybs)); |
| 180 | LYB_HAVE_READ_GOTO(r, data, error); |
| 181 | mod_name[num] = '\0'; |
| 182 | |
| 183 | /* revision */ |
| 184 | ret += (r = lyb_read(data, (uint8_t *)&num, sizeof(uint16_t), lybs)); |
| 185 | LYB_HAVE_READ_GOTO(r, data, error); |
| 186 | |
| 187 | if (num) { |
| 188 | sprintf(mod_rev, "%04u-%02u-%02u", ((num & 0xFE00) >> 9) + 2000, (num & 0x01E0) >> 5, (num & 0x001F)); |
| 189 | *mod = ly_ctx_get_module(ctx, mod_name, mod_rev, 0); |
| 190 | } else { |
| 191 | *mod = ly_ctx_get_module(ctx, mod_name, NULL, 0); |
| 192 | } |
| 193 | if (!*mod) { |
| 194 | LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Module \"%s@%s\" not found in the context.", mod_name, (num ? mod_rev : "<none>")); |
| 195 | goto error; |
| 196 | } |
| 197 | |
| 198 | free(mod_name); |
| 199 | return ret; |
| 200 | |
| 201 | error: |
| 202 | free(mod_name); |
| 203 | return -1; |
| 204 | } |
| 205 | |
| 206 | static struct lyd_node * |
| 207 | lyb_new_node(const struct lys_node *schema) |
| 208 | { |
| 209 | struct lyd_node *node; |
| 210 | |
| 211 | switch (schema->nodetype) { |
| 212 | case LYS_CONTAINER: |
| 213 | case LYS_LIST: |
| 214 | case LYS_NOTIF: |
| 215 | case LYS_RPC: |
| 216 | case LYS_ACTION: |
| 217 | node = calloc(sizeof(struct lyd_node), 1); |
| 218 | break; |
| 219 | case LYS_LEAF: |
| 220 | case LYS_LEAFLIST: |
| 221 | node = calloc(sizeof(struct lyd_node_leaf_list), 1); |
| 222 | break; |
| 223 | case LYS_ANYDATA: |
| 224 | case LYS_ANYXML: |
| 225 | node = calloc(sizeof(struct lyd_node_anydata), 1); |
| 226 | break; |
| 227 | default: |
| 228 | return NULL; |
| 229 | } |
| 230 | LY_CHECK_ERR_RETURN(!node, LOGMEM(schema->module->ctx), NULL); |
| 231 | |
| 232 | /* fill basic info */ |
| 233 | node->schema = (struct lys_node *)schema; |
| 234 | node->validity = ly_new_node_validity(schema); |
| 235 | if (resolve_applies_when(schema, 0, NULL)) { |
| 236 | node->when_status = LYD_WHEN; |
| 237 | } |
| 238 | node->prev = node; |
| 239 | |
| 240 | return node; |
| 241 | } |
| 242 | |
| 243 | static int |
| 244 | lyb_parse_anydata(struct lyd_node *node, const char *data, struct lyb_state *lybs) |
| 245 | { |
| 246 | int r, ret = 0; |
| 247 | struct lyd_node_anydata *any = (struct lyd_node_anydata *)node; |
| 248 | |
| 249 | /* read value type */ |
| 250 | ret += (r = lyb_read(data, (uint8_t *)&any->value_type, sizeof any->value_type, lybs)); |
| 251 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 252 | |
| 253 | /* read anydata content */ |
| 254 | if (any->value_type == LYD_ANYDATA_DATATREE) { |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 255 | any->value.tree = lyd_parse_lyb(node->schema->module->ctx, data, 0, NULL, NULL, &r); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 256 | ret += r; |
| 257 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 258 | } else { |
| 259 | ret += (r = lyb_read_string(node->schema->module->ctx, &any->value.str, data, lybs)); |
| 260 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 261 | } |
| 262 | |
| 263 | return ret; |
| 264 | } |
| 265 | |
| 266 | static int |
| 267 | lyb_parse_val(struct lyd_node_leaf_list *node, const char *data, struct lyb_state *lybs) |
| 268 | { |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 269 | int r, ret; |
| 270 | size_t i; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 271 | uint8_t byte; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 272 | uint64_t num; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 273 | struct ly_ctx *ctx = node->schema->module->ctx; |
| 274 | struct lys_type *type = &((struct lys_node_leaf *)node->schema)->type; |
| 275 | |
| 276 | if (node->value_flags & (LY_VALUE_USER || LY_VALUE_UNRES)) { |
| 277 | /* just read value_str */ |
| 278 | return lyb_read_string(ctx, &node->value_str, data, lybs); |
| 279 | } |
| 280 | |
| 281 | switch (node->value_type) { |
| 282 | case LY_TYPE_INST: |
| 283 | case LY_TYPE_IDENT: |
| 284 | case LY_TYPE_UNION: |
| 285 | /* we do not actually fill value now, but value_str */ |
| 286 | ret = lyb_read_string(ctx, &node->value_str, data, lybs); |
| 287 | break; |
| 288 | case LY_TYPE_BINARY: |
| 289 | case LY_TYPE_STRING: |
| 290 | case LY_TYPE_UNKNOWN: |
| 291 | /* read string */ |
| 292 | ret = lyb_read_string(ctx, &node->value.string, data, lybs); |
| 293 | break; |
| 294 | case LY_TYPE_BITS: |
| 295 | /* find the correct structure */ |
| 296 | for (; !type->info.bits.count; type = &type->der->type); |
| 297 | |
| 298 | node->value.bit = calloc(type->info.bits.count, sizeof *node->value.bit); |
| 299 | LY_CHECK_ERR_RETURN(!node->value.bit, LOGMEM(ctx), -1); |
| 300 | |
| 301 | /* read values */ |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 302 | ret = 0; |
| 303 | for (i = 0; i < type->info.bits.count; ++i) { |
| 304 | if (i % 8 == 0) { |
| 305 | /* read another byte */ |
| 306 | ret += (r = lyb_read(data, &byte, sizeof byte, lybs)); |
| 307 | if (r < 0) { |
| 308 | return -1; |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | if (byte & (0x01 << (i % 8))) { |
| 313 | /* bit is set */ |
| 314 | node->value.bit[i] = &type->info.bits.bit[i]; |
| 315 | } |
| 316 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 317 | break; |
| 318 | case LY_TYPE_BOOL: |
| 319 | /* read byte */ |
| 320 | ret = lyb_read(data, &byte, sizeof byte, lybs); |
| 321 | if ((ret > 0) && byte) { |
| 322 | node->value.bln = 1; |
| 323 | } |
| 324 | break; |
| 325 | case LY_TYPE_EMPTY: |
| 326 | /* nothing to read */ |
| 327 | ret = 0; |
| 328 | break; |
| 329 | case LY_TYPE_ENUM: |
| 330 | /* find the correct structure */ |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 331 | for (; !type->info.enums.count; type = &type->der->type); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 332 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 333 | num = 0; |
| 334 | ret = lyb_read_number(&num, type->info.enums.enm[type->info.enums.count - 1].value, data, lybs); |
| 335 | if ((ret > 0) && (num < type->info.enums.count)) { |
| 336 | node->value.enm = &type->info.enums.enm[num]; |
| 337 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 338 | break; |
| 339 | case LY_TYPE_INT8: |
| 340 | case LY_TYPE_UINT8: |
| 341 | ret = lyb_read_number((uint64_t *)&node->value.uint8, UINT8_MAX, data, lybs); |
| 342 | break; |
| 343 | case LY_TYPE_INT16: |
| 344 | case LY_TYPE_UINT16: |
| 345 | ret = lyb_read_number((uint64_t *)&node->value.uint16, UINT16_MAX, data, lybs); |
| 346 | break; |
| 347 | case LY_TYPE_INT32: |
| 348 | case LY_TYPE_UINT32: |
| 349 | ret = lyb_read_number((uint64_t *)&node->value.uint32, UINT32_MAX, data, lybs); |
| 350 | break; |
| 351 | case LY_TYPE_DEC64: |
| 352 | case LY_TYPE_INT64: |
| 353 | case LY_TYPE_UINT64: |
| 354 | ret = lyb_read_number((uint64_t *)&node->value.uint64, UINT64_MAX, data, lybs); |
| 355 | break; |
| 356 | default: |
| 357 | return -1; |
| 358 | } |
| 359 | |
| 360 | return ret; |
| 361 | } |
| 362 | |
| 363 | static int |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 364 | lyb_parse_val_str(struct lyd_node_leaf_list *node, struct unres_data *unres) |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 365 | { |
| 366 | struct ly_ctx *ctx = node->schema->module->ctx; |
| 367 | struct lys_type *type = &((struct lys_node_leaf *)node->schema)->type; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 368 | char num_str[22], *str; |
| 369 | uint32_t i, str_len; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 370 | |
| 371 | if (node->value_flags & LY_VALUE_UNRES) { |
| 372 | /* nothing to do */ |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | if (node->value_flags & LY_VALUE_USER) { |
| 377 | /* unfortunately, we need to also fill the value properly, so just parse it again */ |
| 378 | node->value_flags &= ~LY_VALUE_USER; |
| 379 | if (!lyp_parse_value(type, &node->value_str, NULL, node, NULL, |
| 380 | lyd_node_module((struct lyd_node *)node), 1, node->dflt, 1)) { |
| 381 | return -1; |
| 382 | } |
| 383 | |
| 384 | if (!(node->value_flags & LY_VALUE_USER)) { |
| 385 | LOGWRN(ctx, "Node \"%s\" value was stored as a user type, but it is not in the current context.", node->schema->name); |
| 386 | } |
| 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | switch (node->value_type) { |
| 391 | case LY_TYPE_INST: |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 392 | /* try to fill the instance-identifier target now */ |
| 393 | if (unres_data_add(unres, (struct lyd_node *)node, UNRES_INSTID)) { |
| 394 | return -1; |
| 395 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 396 | break; |
| 397 | case LY_TYPE_IDENT: |
| 398 | /* fill the identity pointer now */ |
| 399 | node->value.ident = resolve_identref(type, node->value_str, (struct lyd_node *)node, node->schema->module, node->dflt); |
| 400 | if (!node->value.ident) { |
| 401 | return -1; |
| 402 | } |
| 403 | break; |
| 404 | case LY_TYPE_BINARY: |
| 405 | case LY_TYPE_STRING: |
| 406 | case LY_TYPE_UNKNOWN: |
| 407 | /* just re-assign it */ |
| 408 | node->value_str = node->value.string; |
| 409 | break; |
| 410 | case LY_TYPE_BITS: |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 411 | for (; !type->info.bits.count; type = &type->der->type); |
| 412 | |
| 413 | /* print the set bits */ |
| 414 | str = malloc(1); |
| 415 | LY_CHECK_ERR_RETURN(!str, LOGMEM(ctx), -1); |
| 416 | str[0] = '\0'; |
| 417 | str_len = 1; |
| 418 | for (i = 0; i < type->info.bits.count; ++i) { |
| 419 | if (node->value.bit[i]) { |
| 420 | str = ly_realloc(str, str_len + strlen(node->value.bit[i]->name) + 1); |
| 421 | LY_CHECK_ERR_RETURN(!str, LOGMEM(ctx), -1); |
| 422 | |
| 423 | sprintf(str + str_len, "%s%s", str_len == 1 ? "" : " ", node->value.bit[i]->name); |
| 424 | |
| 425 | str_len += strlen(node->value.bit[i]->name) + 1; |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | node->value_str = lydict_insert_zc(ctx, str); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 430 | break; |
| 431 | case LY_TYPE_BOOL: |
| 432 | node->value_str = lydict_insert(ctx, (node->value.bln ? "true" : "false"), 0); |
| 433 | break; |
| 434 | case LY_TYPE_EMPTY: |
| 435 | case LY_TYPE_UNION: |
| 436 | /* leave value empty */ |
| 437 | break; |
| 438 | case LY_TYPE_ENUM: |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 439 | /* print the value */ |
| 440 | node->value_str = lydict_insert(ctx, node->value.enm->name, 0); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 441 | break; |
| 442 | case LY_TYPE_INT8: |
| 443 | sprintf(num_str, "%d", node->value.int8); |
| 444 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 445 | break; |
| 446 | case LY_TYPE_UINT8: |
| 447 | sprintf(num_str, "%u", node->value.uint8); |
| 448 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 449 | break; |
| 450 | case LY_TYPE_INT16: |
| 451 | sprintf(num_str, "%d", node->value.int16); |
| 452 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 453 | break; |
| 454 | case LY_TYPE_UINT16: |
| 455 | sprintf(num_str, "%u", node->value.uint16); |
| 456 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 457 | break; |
| 458 | case LY_TYPE_INT32: |
| 459 | sprintf(num_str, "%d", node->value.int32); |
| 460 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 461 | break; |
| 462 | case LY_TYPE_UINT32: |
| 463 | sprintf(num_str, "%u", node->value.uint32); |
| 464 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 465 | break; |
| 466 | case LY_TYPE_INT64: |
| 467 | sprintf(num_str, "%ld", node->value.int64); |
| 468 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 469 | break; |
| 470 | case LY_TYPE_UINT64: |
| 471 | sprintf(num_str, "%lu", node->value.uint64); |
| 472 | node->value_str = lydict_insert(ctx, num_str, 0); |
| 473 | break; |
| 474 | case LY_TYPE_DEC64: |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 475 | sprintf(num_str, "%ld.%ld", node->value.dec64 / (type->info.dec64.dig * 10), node->value.dec64 % (type->info.dec64.dig * 10)); |
| 476 | node->value_str = lydict_insert(ctx, num_str, 0); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 477 | break; |
| 478 | default: |
| 479 | return -1; |
| 480 | } |
| 481 | |
| 482 | return 0; |
| 483 | } |
| 484 | |
| 485 | static int |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 486 | lyb_parse_leaf(struct lyd_node *node, const char *data, struct unres_data *unres, struct lyb_state *lybs) |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 487 | { |
| 488 | int r, ret = 0; |
| 489 | uint8_t start_byte; |
| 490 | struct lyd_node_leaf_list *leaf = (struct lyd_node_leaf_list *)node; |
| 491 | |
| 492 | /* read value type and flags on the first byte */ |
| 493 | ret += (r = lyb_read(data, &start_byte, sizeof start_byte, lybs)); |
| 494 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 495 | |
| 496 | /* fill value type, flags */ |
| 497 | leaf->value_type = start_byte & 0x1F; |
| 498 | if (start_byte & 0x80) { |
| 499 | leaf->dflt = 1; |
| 500 | } |
| 501 | if (start_byte & 0x40) { |
| 502 | leaf->value_flags |= LY_VALUE_USER; |
| 503 | } |
| 504 | if (start_byte & 0x20) { |
| 505 | leaf->value_flags |= LY_VALUE_UNRES; |
| 506 | } |
| 507 | |
| 508 | ret += (r = lyb_parse_val(leaf, data, lybs)); |
| 509 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 510 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 511 | ret += (r = lyb_parse_val_str(leaf, unres)); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 512 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 513 | |
| 514 | return ret; |
| 515 | } |
| 516 | |
| 517 | static int |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 518 | lyb_is_schema_hash_match(struct lys_node *sibling, LYB_HASH hash) |
| 519 | { |
| 520 | LYB_HASH sibling_hash; |
| 521 | uint8_t collision_id; |
| 522 | |
| 523 | /* get collision ID */ |
| 524 | for (collision_id = 0; !(hash & (LYB_HASH_COLLISION_ID >> collision_id)); ++collision_id); |
| 525 | |
| 526 | /* get correct collision ID node hash */ |
| 527 | sibling_hash = lyb_hash(sibling, collision_id); |
| 528 | |
| 529 | return hash == sibling_hash; |
| 530 | } |
| 531 | |
| 532 | static int |
| 533 | lyb_parse_schema_hash(const struct lys_node *sparent, const struct lys_module *mod, const char *data, const char *yang_data_name, |
| 534 | int options, struct lys_node **snode, struct lyb_state *lybs) |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 535 | { |
| 536 | int r, ret = 0; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 537 | struct lys_node *sibling = NULL; |
| 538 | LYB_HASH hash = 0; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 539 | struct ly_ctx *ctx; |
| 540 | |
| 541 | assert((sparent || mod) && (!sparent || !mod)); |
| 542 | ctx = (sparent ? sparent->module->ctx : mod->ctx); |
| 543 | |
| 544 | /* read the hash */ |
| 545 | ret += (r = lyb_read(data, &hash, sizeof hash, lybs)); |
| 546 | LYB_HAVE_READ_RETURN(r, data, -1); |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 547 | if (!hash) { |
| 548 | return -1; |
| 549 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 550 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 551 | while ((sibling = (struct lys_node *)lys_getnext(sibling, sparent, mod, 0))) { |
| 552 | /* skip schema nodes from models not present during printing */ |
| 553 | if (lyb_has_schema_model(sibling, lybs->models, lybs->mod_count) && lyb_is_schema_hash_match(sibling, hash)) { |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 554 | /* match found */ |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 555 | break; |
| 556 | } |
| 557 | } |
| 558 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 559 | *snode = sibling; |
| 560 | if (!sibling && (options & LYD_OPT_STRICT)) { |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 561 | if (mod) { |
| 562 | LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Failed to find matching hash for a top-level node from \"%s\".", mod->name); |
| 563 | } else { |
| 564 | LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, sparent, "Failed to find matching hash for a child of \"%s\".", sparent->name); |
| 565 | } |
| 566 | return -1; |
| 567 | } |
| 568 | |
| 569 | return ret; |
| 570 | } |
| 571 | |
| 572 | static int |
| 573 | lyb_parse_subtree(struct ly_ctx *ctx, const char *data, struct lyd_node *parent, struct lyd_node **first_sibling, |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 574 | const char *yang_data_name, int options, struct unres_data *unres, struct lyb_state *lybs) |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 575 | { |
| 576 | int r, ret = 0; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 577 | struct lyd_node *node = NULL, *iter; |
| 578 | const struct lys_module *mod; |
| 579 | struct lys_node *sparent, *snode; |
| 580 | |
| 581 | assert((parent && !first_sibling) || (!parent && first_sibling)); |
| 582 | |
| 583 | /* register a new subtree */ |
| 584 | ret += (r = lyb_read_start_subtree(data, lybs)); |
| 585 | LYB_HAVE_READ_GOTO(r, data, error); |
| 586 | |
| 587 | if (!parent) { |
| 588 | /* top-level, read module name */ |
| 589 | ret += (r = lyb_parse_model(ctx, data, &mod, lybs)); |
| 590 | LYB_HAVE_READ_GOTO(r, data, error); |
| 591 | |
| 592 | sparent = NULL; |
| 593 | } else { |
| 594 | mod = NULL; |
| 595 | sparent = parent->schema; |
| 596 | } |
| 597 | |
| 598 | /* read hash, find the schema node */ |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 599 | ret += (r = lyb_parse_schema_hash(sparent, mod, data, yang_data_name, options, &snode, lybs)); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 600 | LYB_HAVE_READ_GOTO(r, data, error); |
| 601 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 602 | if (!snode) { |
Michal Vasko | 997071e | 2018-07-10 15:56:14 +0200 | [diff] [blame^] | 603 | /* unknown data subtree, skip it whole */ |
| 604 | do { |
| 605 | ret += (r = lyb_read(data, NULL, lybs->written[lybs->used - 1], lybs)); |
| 606 | } while (lybs->written[lybs->used - 1]); |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 607 | goto stop_subtree; |
| 608 | } |
| 609 | |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 610 | /* |
| 611 | * read the node |
| 612 | */ |
| 613 | node = lyb_new_node(snode); |
| 614 | if (!node) { |
| 615 | goto error; |
| 616 | } |
| 617 | |
| 618 | /* TODO read attributes */ |
| 619 | |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 620 | /* read node content */ |
| 621 | switch (snode->nodetype) { |
| 622 | case LYS_CONTAINER: |
| 623 | case LYS_LIST: |
| 624 | /* nothing to read */ |
| 625 | break; |
| 626 | case LYS_LEAF: |
| 627 | case LYS_LEAFLIST: |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 628 | ret += (r = lyb_parse_leaf(node, data, unres, lybs)); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 629 | LYB_HAVE_READ_GOTO(r, data, error); |
| 630 | break; |
| 631 | case LYS_ANYXML: |
| 632 | case LYS_ANYDATA: |
| 633 | ret += (r = lyb_parse_anydata(node, data, lybs)); |
| 634 | LYB_HAVE_READ_GOTO(r, data, error); |
| 635 | break; |
| 636 | default: |
| 637 | goto error; |
| 638 | } |
| 639 | |
| 640 | /* insert into data tree, manually */ |
| 641 | if (parent) { |
| 642 | if (!parent->child) { |
| 643 | /* only child */ |
| 644 | parent->child = node; |
| 645 | } else { |
| 646 | /* last child */ |
| 647 | parent->child->prev->next = node; |
| 648 | node->prev = parent->child->prev; |
| 649 | parent->child->prev = node; |
| 650 | } |
| 651 | node->parent = parent; |
| 652 | } else if (*first_sibling) { |
| 653 | /* last sibling */ |
| 654 | (*first_sibling)->prev->next = node; |
| 655 | node->prev = (*first_sibling)->prev; |
| 656 | (*first_sibling)->prev = node; |
| 657 | } else { |
| 658 | /* only sibling */ |
| 659 | *first_sibling = node; |
| 660 | } |
| 661 | |
| 662 | /* read all descendants */ |
| 663 | while (lybs->written[lybs->used - 1]) { |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 664 | ret += (r = lyb_parse_subtree(ctx, data, node, NULL, NULL, options, unres, lybs)); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 665 | LYB_HAVE_READ_GOTO(r, data, error); |
| 666 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 667 | |
| 668 | /* make containers default if should be */ |
| 669 | if (node->schema->nodetype == LYS_CONTAINER) { |
| 670 | LY_TREE_FOR(node->child, iter) { |
| 671 | if (!iter->dflt) { |
| 672 | break; |
| 673 | } |
| 674 | } |
| 675 | |
| 676 | if (!iter) { |
| 677 | node->dflt = 1; |
| 678 | } |
| 679 | } |
| 680 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 681 | stop_subtree: |
| 682 | /* end the subtree */ |
| 683 | lyb_read_stop_subtree(lybs); |
| 684 | |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 685 | return ret; |
| 686 | |
| 687 | error: |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 688 | lyd_free(node); |
| 689 | if (*first_sibling == node) { |
| 690 | *first_sibling = NULL; |
| 691 | } |
| 692 | return -1; |
| 693 | } |
| 694 | |
| 695 | static int |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 696 | lyb_parse_data_models(struct ly_ctx *ctx, const char *data, struct lyb_state *lybs) |
| 697 | { |
| 698 | int i, r, ret = 0; |
| 699 | |
| 700 | /* read model count */ |
| 701 | ret += (r = lyb_read(data, (uint8_t *)&lybs->mod_count, 2, lybs)); |
| 702 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 703 | |
| 704 | lybs->models = malloc(lybs->mod_count * sizeof *lybs->models); |
| 705 | LY_CHECK_ERR_RETURN(!lybs->models, LOGMEM(NULL), -1); |
| 706 | |
| 707 | /* read modules */ |
| 708 | for (i = 0; i < lybs->mod_count; ++i) { |
| 709 | ret += (r = lyb_parse_model(ctx, data, &lybs->models[i], lybs)); |
| 710 | LYB_HAVE_READ_RETURN(r, data, -1); |
| 711 | } |
| 712 | |
| 713 | return ret; |
| 714 | } |
| 715 | |
| 716 | static int |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 717 | lyb_parse_header(const char *data, struct lyb_state *lybs) |
| 718 | { |
| 719 | int ret = 0; |
| 720 | uint8_t byte = 0; |
| 721 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 722 | /* TODO version, any flags? */ |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 723 | ret += lyb_read(data, (uint8_t *)&byte, sizeof byte, lybs); |
| 724 | |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 725 | return ret; |
| 726 | } |
| 727 | |
| 728 | struct lyd_node * |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 729 | lyd_parse_lyb(struct ly_ctx *ctx, const char *data, int options, const struct lyd_node *data_tree, |
| 730 | const char *yang_data_name, int *parsed) |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 731 | { |
| 732 | int r, ret = 0; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 733 | struct lyd_node *node = NULL, *next, *act_notif = NULL; |
| 734 | struct unres_data *unres = NULL; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 735 | struct lyb_state lybs; |
| 736 | |
| 737 | if (!ctx || !data) { |
| 738 | LOGARG; |
| 739 | return NULL; |
| 740 | } |
| 741 | |
| 742 | lybs.written = malloc(LYB_STATE_STEP * sizeof *lybs.written); |
| 743 | lybs.position = malloc(LYB_STATE_STEP * sizeof *lybs.position); |
| 744 | LY_CHECK_ERR_GOTO(!lybs.written || !lybs.position, LOGMEM(ctx), finish); |
| 745 | lybs.used = 0; |
| 746 | lybs.size = LYB_STATE_STEP; |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 747 | lybs.models = NULL; |
| 748 | lybs.mod_count = 0; |
| 749 | |
| 750 | unres = calloc(1, sizeof *unres); |
| 751 | LY_CHECK_ERR_GOTO(!unres, LOGMEM(ctx), finish); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 752 | |
| 753 | /* read header */ |
| 754 | ret += (r = lyb_parse_header(data, &lybs)); |
| 755 | LYB_HAVE_READ_GOTO(r, data, finish); |
| 756 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 757 | /* read used models */ |
| 758 | ret += (r = lyb_parse_data_models(ctx, data, &lybs)); |
| 759 | LYB_HAVE_READ_GOTO(r, data, finish); |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 760 | |
| 761 | /* read subtree(s) */ |
| 762 | while (data[0]) { |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 763 | ret += (r = lyb_parse_subtree(ctx, data, NULL, &node, yang_data_name, options, unres, &lybs)); |
| 764 | if (r < 0) { |
| 765 | lyd_free_withsiblings(node); |
| 766 | node = NULL; |
| 767 | goto finish; |
| 768 | } |
| 769 | data += r; |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 770 | } |
| 771 | |
| 772 | /* read the last zero, parsing finished */ |
| 773 | ++ret; |
| 774 | r = ret; |
| 775 | |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 776 | if (options & LYD_OPT_DATA_ADD_YANGLIB) { |
| 777 | if (lyd_merge(node, ly_ctx_info(ctx), LYD_OPT_DESTRUCT | LYD_OPT_EXPLICIT)) { |
| 778 | LOGERR(ctx, LY_EINT, "Adding ietf-yang-library data failed."); |
| 779 | lyd_free_withsiblings(node); |
| 780 | node = NULL; |
| 781 | goto finish; |
| 782 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 783 | } |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 784 | |
| 785 | /* resolve any unresolved instance-identifiers */ |
| 786 | if (unres->count) { |
| 787 | if (options & (LYD_OPT_RPC | LYD_OPT_RPCREPLY | LYD_OPT_NOTIF)) { |
| 788 | LY_TREE_DFS_BEGIN(node, next, act_notif) { |
| 789 | if (act_notif->schema->nodetype & (LYS_RPC | LYS_ACTION | LYS_NOTIF)) { |
| 790 | break; |
| 791 | } |
| 792 | LY_TREE_DFS_END(node, next, act_notif); |
| 793 | } |
| 794 | } |
| 795 | if (lyd_defaults_add_unres(&node, options, ctx, data_tree, act_notif, unres, 0)) { |
| 796 | lyd_free_withsiblings(node); |
| 797 | node = NULL; |
| 798 | goto finish; |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | finish: |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 803 | free(lybs.written); |
| 804 | free(lybs.position); |
Michal Vasko | 228431e | 2018-07-10 15:47:11 +0200 | [diff] [blame] | 805 | free(lybs.models); |
| 806 | if (unres) { |
| 807 | free(unres->node); |
| 808 | free(unres->type); |
| 809 | free(unres); |
| 810 | } |
Michal Vasko | 1e82a3b | 2018-07-03 12:16:58 +0200 | [diff] [blame] | 811 | |
| 812 | if (parsed) { |
| 813 | *parsed = r; |
| 814 | } |
| 815 | return node; |
| 816 | } |