1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains routines that merge one tdata_t tree, called the child, 31 * into another, called the parent. Note that these names are used mainly for 32 * convenience and to represent the direction of the merge. They are not meant 33 * to imply any relationship between the tdata_t graphs prior to the merge. 34 * 35 * tdata_t structures contain two main elements - a hash of iidesc_t nodes, and 36 * a directed graph of tdesc_t nodes, pointed to by the iidesc_t nodes. Simply 37 * put, we merge the tdesc_t graphs, followed by the iidesc_t nodes, and then we 38 * clean up loose ends. 39 * 40 * The algorithm is as follows: 41 * 42 * 1. Mapping iidesc_t nodes 43 * 44 * For each child iidesc_t node, we first try to map its tdesc_t subgraph 45 * against the tdesc_t graph in the parent. For each node in the child subgraph 46 * that exists in the parent, a mapping between the two (between their type IDs) 47 * is established. For the child nodes that cannot be mapped onto existing 48 * parent nodes, a mapping is established between the child node ID and a 49 * newly-allocated ID that the node will use when it is re-created in the 50 * parent. These unmappable nodes are added to the md_tdtba (tdesc_t To Be 51 * Added) hash, which tracks nodes that need to be created in the parent. 52 * 53 * If all of the nodes in the subgraph for an iidesc_t in the child can be 54 * mapped to existing nodes in the parent, then we can try to map the child 55 * iidesc_t onto an iidesc_t in the parent. If we cannot find an equivalent 56 * iidesc_t, or if we were not able to completely map the tdesc_t subgraph(s), 57 * then we add this iidesc_t to the md_iitba (iidesc_t To Be Added) list. This 58 * list tracks iidesc_t nodes that are to be created in the parent. 59 * 60 * While visiting the tdesc_t nodes, we may discover a forward declaration (a 61 * FORWARD tdesc_t) in the parent that is resolved in the child. That is, there 62 * may be a structure or union definition in the child with the same name as the 63 * forward declaration in the parent. If we find such a node, we record an 64 * association in the md_fdida (Forward => Definition ID Association) list 65 * between the parent ID of the forward declaration and the ID that the 66 * definition will use when re-created in the parent. 67 * 68 * 2. Creating new tdesc_t nodes (the md_tdtba hash) 69 * 70 * We have now attempted to map all tdesc_t nodes from the child into the 71 * parent, and have, in md_tdtba, a hash of all tdesc_t nodes that need to be 72 * created (or, as we so wittily call it, conjured) in the parent. We iterate 73 * through this hash, creating the indicated tdesc_t nodes. For a given tdesc_t 74 * node, conjuring requires two steps - the copying of the common tdesc_t data 75 * (name, type, etc) from the child node, and the creation of links from the 76 * newly-created node to the parent equivalents of other tdesc_t nodes pointed 77 * to by node being conjured. Note that in some cases, the targets of these 78 * links will be on the md_tdtba hash themselves, and may not have been created 79 * yet. As such, we can't establish the links from these new nodes into the 80 * parent graph. We therefore conjure them with links to nodes in the *child* 81 * graph, and add pointers to the links to be created to the md_tdtbr (tdesc_t 82 * To Be Remapped) hash. For example, a POINTER tdesc_t that could not be 83 * resolved would have its &tdesc_t->t_tdesc added to md_tdtbr. 84 * 85 * 3. Creating new iidesc_t nodes (the md_iitba list) 86 * 87 * When we have completed step 2, all tdesc_t nodes have been created (or 88 * already existed) in the parent. Some of them may have incorrect links (the 89 * members of the md_tdtbr list), but they've all been created. As such, we can 90 * create all of the iidesc_t nodes, as we can attach the tdesc_t subgraph 91 * pointers correctly. We create each node, and attach the pointers to the 92 * appropriate parts of the parent tdesc_t graph. 93 * 94 * 4. Resolving newly-created tdesc_t node links (the md_tdtbr list) 95 * 96 * As in step 3, we rely on the fact that all of the tdesc_t nodes have been 97 * created. Each entry in the md_tdtbr list is a pointer to where a link into 98 * the parent will be established. As saved in the md_tdtbr list, these 99 * pointers point into the child tdesc_t subgraph. We can thus get the target 100 * type ID from the child, look at the ID mapping to determine the desired link 101 * target, and redirect the link accordingly. 102 * 103 * 5. Parent => child forward declaration resolution 104 * 105 * If entries were made in the md_fdida list in step 1, we have forward 106 * declarations in the parent that need to be resolved to their definitions 107 * re-created in step 2 from the child. Using the md_fdida list, we can locate 108 * the definition for the forward declaration, and we can redirect all inbound 109 * edges to the forward declaration node to the actual definition. 110 * 111 * A pox on the house of anyone who changes the algorithm without updating 112 * this comment. 113 */ 114 115 #include <stdio.h> 116 #include <strings.h> 117 #include <assert.h> 118 #include <pthread.h> 119 120 #include "ctf_headers.h" 121 #include "ctftools.h" 122 #include "list.h" 123 #include "alist.h" 124 #include "memory.h" 125 #include "traverse.h" 126 127 typedef struct equiv_data equiv_data_t; 128 typedef struct merge_cb_data merge_cb_data_t; 129 130 /* 131 * There are two traversals in this file, for equivalency and for tdesc_t 132 * re-creation, that do not fit into the tdtraverse() framework. We have our 133 * own traversal mechanism and ops vector here for those two cases. 134 */ 135 typedef struct tdesc_ops { 136 char *name; 137 int (*equiv)(tdesc_t *, tdesc_t *, equiv_data_t *); 138 tdesc_t *(*conjure)(tdesc_t *, int, merge_cb_data_t *); 139 } tdesc_ops_t; 140 extern tdesc_ops_t tdesc_ops[]; 141 142 /* 143 * The workhorse structure of tdata_t merging. Holds all lists of nodes to be 144 * processed during various phases of the merge algorithm. 145 */ 146 struct merge_cb_data { 147 tdata_t *md_parent; 148 tdata_t *md_tgt; 149 alist_t *md_ta; /* Type Association */ 150 alist_t *md_fdida; /* Forward -> Definition ID Association */ 151 list_t **md_iitba; /* iidesc_t nodes To Be Added to the parent */ 152 hash_t *md_tdtba; /* tdesc_t nodes To Be Added to the parent */ 153 list_t **md_tdtbr; /* tdesc_t nodes To Be Remapped */ 154 int md_flags; 155 }; /* merge_cb_data_t */ 156 157 /* 158 * When we first create a tdata_t from stabs data, we will have duplicate nodes. 159 * Normal merges, however, assume that the child tdata_t is already self-unique, 160 * and for speed reasons do not attempt to self-uniquify. If this flag is set, 161 * the merge algorithm will self-uniquify by avoiding the insertion of 162 * duplicates in the md_tdtdba list. 163 */ 164 #define MCD_F_SELFUNIQUIFY 0x1 165 166 /* 167 * When we merge the CTF data for the modules, we don't want it to contain any 168 * data that can be found in the reference module (usually genunix). If this 169 * flag is set, we're doing a merge between the fully merged tdata_t for this 170 * module and the tdata_t for the reference module, with the data unique to this 171 * module ending up in a third tdata_t. It is this third tdata_t that will end 172 * up in the .SUNW_ctf section for the module. 173 */ 174 #define MCD_F_REFMERGE 0x2 175 176 /* 177 * Mapping of child type IDs to parent type IDs 178 */ 179 180 static void 181 add_mapping(alist_t *ta, tid_t srcid, tid_t tgtid) 182 { 183 debug(3, "Adding mapping %u => %u\n", srcid, tgtid); 184 185 assert(!alist_find(ta, (void *)srcid, NULL)); 186 assert(srcid != 0 && tgtid != 0); 187 188 alist_add(ta, (void *)srcid, (void *)tgtid); 189 } 190 191 static tid_t 192 get_mapping(alist_t *ta, int srcid) 193 { 194 long ltgtid; 195 196 if (alist_find(ta, (void *)srcid, (void **)<gtid)) 197 return ((int)ltgtid); 198 else 199 return (0); 200 } 201 202 /* 203 * Determining equivalence of tdesc_t subgraphs 204 */ 205 206 struct equiv_data { 207 alist_t *ed_ta; 208 tdesc_t *ed_node; 209 tdesc_t *ed_tgt; 210 211 int ed_clear_mark; 212 int ed_cur_mark; 213 int ed_selfuniquify; 214 }; /* equiv_data_t */ 215 216 static int equiv_node(tdesc_t *, tdesc_t *, equiv_data_t *); 217 218 /*ARGSUSED2*/ 219 static int 220 equiv_intrinsic(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 221 { 222 intr_t *si = stdp->t_intr; 223 intr_t *ti = ttdp->t_intr; 224 225 if (si->intr_type != ti->intr_type || 226 si->intr_signed != ti->intr_signed || 227 si->intr_offset != ti->intr_offset || 228 si->intr_nbits != ti->intr_nbits) 229 return (0); 230 231 if (si->intr_type == INTR_INT && 232 si->intr_iformat != ti->intr_iformat) 233 return (0); 234 else if (si->intr_type == INTR_REAL && 235 si->intr_fformat != ti->intr_fformat) 236 return (0); 237 238 return (1); 239 } 240 241 static int 242 equiv_plain(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 243 { 244 return (equiv_node(stdp->t_tdesc, ttdp->t_tdesc, ed)); 245 } 246 247 static int 248 equiv_function(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 249 { 250 fndef_t *fn1 = stdp->t_fndef, *fn2 = ttdp->t_fndef; 251 int i; 252 253 if (fn1->fn_nargs != fn2->fn_nargs || 254 fn1->fn_vargs != fn2->fn_vargs) 255 return (0); 256 257 if (!equiv_node(fn1->fn_ret, fn2->fn_ret, ed)) 258 return (0); 259 260 for (i = 0; i < fn1->fn_nargs; i++) { 261 if (!equiv_node(fn1->fn_args[i], fn2->fn_args[i], ed)) 262 return (0); 263 } 264 265 return (1); 266 } 267 268 static int 269 equiv_array(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 270 { 271 ardef_t *ar1 = stdp->t_ardef, *ar2 = ttdp->t_ardef; 272 273 if (!equiv_node(ar1->ad_contents, ar2->ad_contents, ed) || 274 !equiv_node(ar1->ad_idxtype, ar2->ad_idxtype, ed)) 275 return (0); 276 277 if (ar1->ad_nelems != ar2->ad_nelems) 278 return (0); 279 280 return (1); 281 } 282 283 static int 284 equiv_su(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 285 { 286 mlist_t *ml1 = stdp->t_members, *ml2 = ttdp->t_members; 287 mlist_t *olm1 = NULL; 288 289 while (ml1 && ml2) { 290 if (ml1->ml_offset != ml2->ml_offset || 291 strcmp(ml1->ml_name, ml2->ml_name) != 0) 292 return (0); 293 294 /* 295 * Don't do the recursive equivalency checking more than 296 * we have to. 297 */ 298 if (olm1 == NULL || olm1->ml_type->t_id != ml1->ml_type->t_id) { 299 if (ml1->ml_size != ml2->ml_size || 300 !equiv_node(ml1->ml_type, ml2->ml_type, ed)) 301 return (0); 302 } 303 304 olm1 = ml1; 305 ml1 = ml1->ml_next; 306 ml2 = ml2->ml_next; 307 } 308 309 if (ml1 || ml2) 310 return (0); 311 312 return (1); 313 } 314 315 /*ARGSUSED2*/ 316 static int 317 equiv_enum(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 318 { 319 elist_t *el1 = stdp->t_emem; 320 elist_t *el2 = ttdp->t_emem; 321 322 while (el1 && el2) { 323 if (el1->el_number != el2->el_number || 324 strcmp(el1->el_name, el2->el_name) != 0) 325 return (0); 326 327 el1 = el1->el_next; 328 el2 = el2->el_next; 329 } 330 331 if (el1 || el2) 332 return (0); 333 334 return (1); 335 } 336 337 /*ARGSUSED*/ 338 static int 339 equiv_assert(tdesc_t *stdp, tdesc_t *ttdp, equiv_data_t *ed) 340 { 341 /* foul, evil, and very bad - this is a "shouldn't happen" */ 342 assert(1 == 0); 343 344 return (0); 345 } 346 347 static int 348 fwd_equiv(tdesc_t *ctdp, tdesc_t *mtdp) 349 { 350 tdesc_t *defn = (ctdp->t_type == FORWARD ? mtdp : ctdp); 351 352 return (defn->t_type == STRUCT || defn->t_type == UNION); 353 } 354 355 static int 356 equiv_node(tdesc_t *ctdp, tdesc_t *mtdp, equiv_data_t *ed) 357 { 358 int (*equiv)(); 359 int mapping; 360 361 if (ctdp->t_emark > ed->ed_clear_mark || 362 mtdp->t_emark > ed->ed_clear_mark) 363 return (ctdp->t_emark == mtdp->t_emark); 364 365 /* 366 * In normal (non-self-uniquify) mode, we don't want to do equivalency 367 * checking on a subgraph that has already been checked. If a mapping 368 * has already been established for a given child node, we can simply 369 * compare the mapping for the child node with the ID of the parent 370 * node. If we are in self-uniquify mode, then we're comparing two 371 * subgraphs within the child graph, and thus need to ignore any 372 * type mappings that have been created, as they are only valid into the 373 * parent. 374 */ 375 if ((mapping = get_mapping(ed->ed_ta, ctdp->t_id)) > 0 && 376 mapping == mtdp->t_id && !ed->ed_selfuniquify) 377 return (1); 378 379 if (!streq(ctdp->t_name, mtdp->t_name)) 380 return (0); 381 382 if (ctdp->t_type != mtdp->t_type) { 383 if (ctdp->t_type == FORWARD || mtdp->t_type == FORWARD) 384 return (fwd_equiv(ctdp, mtdp)); 385 else 386 return (0); 387 } 388 389 ctdp->t_emark = ed->ed_cur_mark; 390 mtdp->t_emark = ed->ed_cur_mark; 391 ed->ed_cur_mark++; 392 393 if ((equiv = tdesc_ops[ctdp->t_type].equiv) != NULL) 394 return (equiv(ctdp, mtdp, ed)); 395 396 return (1); 397 } 398 399 /* 400 * We perform an equivalency check on two subgraphs by traversing through them 401 * in lockstep. If a given node is equivalent in both the parent and the child, 402 * we mark it in both subgraphs, using the t_emark field, with a monotonically 403 * increasing number. If, in the course of the traversal, we reach a node that 404 * we have visited and numbered during this equivalency check, we have a cycle. 405 * If the previously-visited nodes don't have the same emark, then the edges 406 * that brought us to these nodes are not equivalent, and so the check ends. 407 * If the emarks are the same, the edges are equivalent. We then backtrack and 408 * continue the traversal. If we have exhausted all edges in the subgraph, and 409 * have not found any inequivalent nodes, then the subgraphs are equivalent. 410 */ 411 static int 412 equiv_cb(void *bucket, void *arg) 413 { 414 equiv_data_t *ed = arg; 415 tdesc_t *mtdp = bucket; 416 tdesc_t *ctdp = ed->ed_node; 417 418 ed->ed_clear_mark = ed->ed_cur_mark + 1; 419 ed->ed_cur_mark = ed->ed_clear_mark + 1; 420 421 if (equiv_node(ctdp, mtdp, ed)) { 422 debug(3, "equiv_node matched %d %d\n", ctdp->t_id, mtdp->t_id); 423 ed->ed_tgt = mtdp; 424 /* matched. stop looking */ 425 return (-1); 426 } 427 428 return (0); 429 } 430 431 /*ARGSUSED1*/ 432 static int 433 map_td_tree_pre(tdesc_t *ctdp, tdesc_t **ctdpp, void *private) 434 { 435 merge_cb_data_t *mcd = private; 436 437 if (get_mapping(mcd->md_ta, ctdp->t_id) > 0) 438 return (0); 439 440 return (1); 441 } 442 443 /*ARGSUSED1*/ 444 static int 445 map_td_tree_post(tdesc_t *ctdp, tdesc_t **ctdpp, void *private) 446 { 447 merge_cb_data_t *mcd = private; 448 equiv_data_t ed; 449 450 ed.ed_ta = mcd->md_ta; 451 ed.ed_clear_mark = mcd->md_parent->td_curemark; 452 ed.ed_cur_mark = mcd->md_parent->td_curemark + 1; 453 ed.ed_node = ctdp; 454 ed.ed_selfuniquify = 0; 455 456 debug(3, "map_td_tree_post on %d %s\n", ctdp->t_id, 457 ctdp->t_name == NULL ? "(anon)" : ctdp->t_name); 458 459 if (hash_find_iter(mcd->md_parent->td_layouthash, ctdp, 460 equiv_cb, &ed) < 0) { 461 /* We found an equivalent node */ 462 if (ed.ed_tgt->t_type == FORWARD && ctdp->t_type != FORWARD) { 463 int id = mcd->md_tgt->td_nextid++; 464 465 debug(3, "Creating new defn type %d\n", id); 466 add_mapping(mcd->md_ta, ctdp->t_id, id); 467 alist_add(mcd->md_fdida, (void *)(ulong_t)ed.ed_tgt, 468 (void *)(ulong_t)id); 469 hash_add(mcd->md_tdtba, ctdp); 470 } else 471 add_mapping(mcd->md_ta, ctdp->t_id, ed.ed_tgt->t_id); 472 473 } else if (debug_level > 1 && hash_iter(mcd->md_parent->td_idhash, 474 equiv_cb, &ed) < 0) { 475 /* 476 * We didn't find an equivalent node by looking through the 477 * layout hash, but we somehow found it by performing an 478 * exhaustive search through the entire graph. This usually 479 * means that the "name" hash function is broken. 480 */ 481 terminate("Second pass for %d (%s) == %d\n", ctdp->t_id, 482 (ctdp->t_name ? ctdp->t_name : "(anon)"), ed.ed_tgt->t_id); 483 } else { 484 int id = mcd->md_tgt->td_nextid++; 485 486 debug(3, "Creating new type %d\n", id); 487 add_mapping(mcd->md_ta, ctdp->t_id, id); 488 hash_add(mcd->md_tdtba, ctdp); 489 } 490 491 mcd->md_parent->td_curemark = ed.ed_cur_mark + 1; 492 493 return (1); 494 } 495 496 /*ARGSUSED1*/ 497 static int 498 map_td_tree_self_post(tdesc_t *ctdp, tdesc_t **ctdpp, void *private) 499 { 500 merge_cb_data_t *mcd = private; 501 equiv_data_t ed; 502 503 ed.ed_ta = mcd->md_ta; 504 ed.ed_clear_mark = mcd->md_parent->td_curemark; 505 ed.ed_cur_mark = mcd->md_parent->td_curemark + 1; 506 ed.ed_node = ctdp; 507 ed.ed_selfuniquify = 1; 508 ed.ed_tgt = NULL; 509 510 if (hash_find_iter(mcd->md_tdtba, ctdp, equiv_cb, &ed) < 0) { 511 debug(3, "Self check found %d in %d\n", ctdp->t_id, 512 ed.ed_tgt->t_id); 513 add_mapping(mcd->md_ta, ctdp->t_id, 514 get_mapping(mcd->md_ta, ed.ed_tgt->t_id)); 515 } else if (debug_level > 1 && hash_iter(mcd->md_tdtba, 516 equiv_cb, &ed) < 0) { 517 /* 518 * We didn't find an equivalent node using the quick way (going 519 * through the hash normally), but we did find it by iterating 520 * through the entire hash. This usually means that the hash 521 * function is broken. 522 */ 523 terminate("Self-unique second pass for %d (%s) == %d\n", 524 ctdp->t_id, (ctdp->t_name ? ctdp->t_name : "(anon)"), 525 ed.ed_tgt->t_id); 526 } else { 527 int id = mcd->md_tgt->td_nextid++; 528 529 debug(3, "Creating new type %d\n", id); 530 add_mapping(mcd->md_ta, ctdp->t_id, id); 531 hash_add(mcd->md_tdtba, ctdp); 532 } 533 534 mcd->md_parent->td_curemark = ed.ed_cur_mark + 1; 535 536 return (1); 537 } 538 539 static tdtrav_cb_f map_pre[] = { 540 NULL, 541 map_td_tree_pre, /* intrinsic */ 542 map_td_tree_pre, /* pointer */ 543 map_td_tree_pre, /* array */ 544 map_td_tree_pre, /* function */ 545 map_td_tree_pre, /* struct */ 546 map_td_tree_pre, /* union */ 547 map_td_tree_pre, /* enum */ 548 map_td_tree_pre, /* forward */ 549 map_td_tree_pre, /* typedef */ 550 tdtrav_assert, /* typedef_unres */ 551 map_td_tree_pre, /* volatile */ 552 map_td_tree_pre, /* const */ 553 map_td_tree_pre /* restrict */ 554 }; 555 556 static tdtrav_cb_f map_post[] = { 557 NULL, 558 map_td_tree_post, /* intrinsic */ 559 map_td_tree_post, /* pointer */ 560 map_td_tree_post, /* array */ 561 map_td_tree_post, /* function */ 562 map_td_tree_post, /* struct */ 563 map_td_tree_post, /* union */ 564 map_td_tree_post, /* enum */ 565 map_td_tree_post, /* forward */ 566 map_td_tree_post, /* typedef */ 567 tdtrav_assert, /* typedef_unres */ 568 map_td_tree_post, /* volatile */ 569 map_td_tree_post, /* const */ 570 map_td_tree_post /* restrict */ 571 }; 572 573 static tdtrav_cb_f map_self_post[] = { 574 NULL, 575 map_td_tree_self_post, /* intrinsic */ 576 map_td_tree_self_post, /* pointer */ 577 map_td_tree_self_post, /* array */ 578 map_td_tree_self_post, /* function */ 579 map_td_tree_self_post, /* struct */ 580 map_td_tree_self_post, /* union */ 581 map_td_tree_self_post, /* enum */ 582 map_td_tree_self_post, /* forward */ 583 map_td_tree_self_post, /* typedef */ 584 tdtrav_assert, /* typedef_unres */ 585 map_td_tree_self_post, /* volatile */ 586 map_td_tree_self_post, /* const */ 587 map_td_tree_self_post /* restrict */ 588 }; 589 590 /* 591 * Determining equivalence of iidesc_t nodes 592 */ 593 594 typedef struct iifind_data { 595 iidesc_t *iif_template; 596 alist_t *iif_ta; 597 int iif_newidx; 598 int iif_refmerge; 599 } iifind_data_t; 600 601 /* 602 * Check to see if this iidesc_t (node) - the current one on the list we're 603 * iterating through - matches the target one (iif->iif_template). Return -1 604 * if it matches, to stop the iteration. 605 */ 606 static int 607 iidesc_match(void *data, void *arg) 608 { 609 iidesc_t *node = data; 610 iifind_data_t *iif = arg; 611 int i; 612 613 if (node->ii_type != iif->iif_template->ii_type || 614 !streq(node->ii_name, iif->iif_template->ii_name) || 615 node->ii_dtype->t_id != iif->iif_newidx) 616 return (0); 617 618 if ((node->ii_type == II_SVAR || node->ii_type == II_SFUN) && 619 !streq(node->ii_owner, iif->iif_template->ii_owner)) 620 return (0); 621 622 if (node->ii_nargs != iif->iif_template->ii_nargs) 623 return (0); 624 625 for (i = 0; i < node->ii_nargs; i++) { 626 if (get_mapping(iif->iif_ta, 627 iif->iif_template->ii_args[i]->t_id) != 628 node->ii_args[i]->t_id) 629 return (0); 630 } 631 632 if (iif->iif_refmerge) { 633 switch (iif->iif_template->ii_type) { 634 case II_GFUN: 635 case II_SFUN: 636 case II_GVAR: 637 case II_SVAR: 638 debug(3, "suppressing duping of %d %s from %s\n", 639 iif->iif_template->ii_type, 640 iif->iif_template->ii_name, 641 (iif->iif_template->ii_owner ? 642 iif->iif_template->ii_owner : "NULL")); 643 return (0); 644 case II_NOT: 645 case II_PSYM: 646 case II_SOU: 647 case II_TYPE: 648 break; 649 } 650 } 651 652 return (-1); 653 } 654 655 static int 656 merge_type_cb(void *data, void *arg) 657 { 658 iidesc_t *sii = data; 659 merge_cb_data_t *mcd = arg; 660 iifind_data_t iif; 661 tdtrav_cb_f *post; 662 663 post = (mcd->md_flags & MCD_F_SELFUNIQUIFY ? map_self_post : map_post); 664 665 /* Map the tdesc nodes */ 666 (void) iitraverse(sii, &mcd->md_parent->td_curvgen, NULL, map_pre, post, 667 mcd); 668 669 /* Map the iidesc nodes */ 670 iif.iif_template = sii; 671 iif.iif_ta = mcd->md_ta; 672 iif.iif_newidx = get_mapping(mcd->md_ta, sii->ii_dtype->t_id); 673 iif.iif_refmerge = (mcd->md_flags & MCD_F_REFMERGE); 674 675 if (hash_match(mcd->md_parent->td_iihash, sii, iidesc_match, 676 &iif) == 1) 677 /* successfully mapped */ 678 return (1); 679 680 debug(3, "tba %s (%d)\n", (sii->ii_name ? sii->ii_name : "(anon)"), 681 sii->ii_type); 682 683 list_add(mcd->md_iitba, sii); 684 685 return (0); 686 } 687 688 static int 689 remap_node(tdesc_t **tgtp, tdesc_t *oldtgt, int selftid, tdesc_t *newself, 690 merge_cb_data_t *mcd) 691 { 692 tdesc_t *tgt = NULL; 693 tdesc_t template; 694 int oldid = oldtgt->t_id; 695 696 if (oldid == selftid) { 697 *tgtp = newself; 698 return (1); 699 } 700 701 if ((template.t_id = get_mapping(mcd->md_ta, oldid)) == 0) 702 terminate("failed to get mapping for tid %d\n", oldid); 703 704 if (!hash_find(mcd->md_parent->td_idhash, (void *)&template, 705 (void *)&tgt) && (!(mcd->md_flags & MCD_F_REFMERGE) || 706 !hash_find(mcd->md_tgt->td_idhash, (void *)&template, 707 (void *)&tgt))) { 708 debug(3, "Remap couldn't find %d (from %d)\n", template.t_id, 709 oldid); 710 *tgtp = oldtgt; 711 list_add(mcd->md_tdtbr, tgtp); 712 return (0); 713 } 714 715 *tgtp = tgt; 716 return (1); 717 } 718 719 static tdesc_t * 720 conjure_template(tdesc_t *old, int newselfid) 721 { 722 tdesc_t *new = xcalloc(sizeof (tdesc_t)); 723 724 new->t_name = old->t_name ? xstrdup(old->t_name) : NULL; 725 new->t_type = old->t_type; 726 new->t_size = old->t_size; 727 new->t_id = newselfid; 728 new->t_flags = old->t_flags; 729 730 return (new); 731 } 732 733 /*ARGSUSED2*/ 734 static tdesc_t * 735 conjure_intrinsic(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 736 { 737 tdesc_t *new = conjure_template(old, newselfid); 738 739 new->t_intr = xmalloc(sizeof (intr_t)); 740 bcopy(old->t_intr, new->t_intr, sizeof (intr_t)); 741 742 return (new); 743 } 744 745 static tdesc_t * 746 conjure_plain(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 747 { 748 tdesc_t *new = conjure_template(old, newselfid); 749 750 (void) remap_node(&new->t_tdesc, old->t_tdesc, old->t_id, new, mcd); 751 752 return (new); 753 } 754 755 static tdesc_t * 756 conjure_function(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 757 { 758 tdesc_t *new = conjure_template(old, newselfid); 759 fndef_t *nfn = xmalloc(sizeof (fndef_t)); 760 fndef_t *ofn = old->t_fndef; 761 int i; 762 763 (void) remap_node(&nfn->fn_ret, ofn->fn_ret, old->t_id, new, mcd); 764 765 nfn->fn_nargs = ofn->fn_nargs; 766 nfn->fn_vargs = ofn->fn_vargs; 767 768 if (nfn->fn_nargs > 0) 769 nfn->fn_args = xcalloc(sizeof (tdesc_t *) * ofn->fn_nargs); 770 771 for (i = 0; i < ofn->fn_nargs; i++) { 772 (void) remap_node(&nfn->fn_args[i], ofn->fn_args[i], old->t_id, 773 new, mcd); 774 } 775 776 new->t_fndef = nfn; 777 778 return (new); 779 } 780 781 static tdesc_t * 782 conjure_array(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 783 { 784 tdesc_t *new = conjure_template(old, newselfid); 785 ardef_t *nar = xmalloc(sizeof (ardef_t)); 786 ardef_t *oar = old->t_ardef; 787 788 (void) remap_node(&nar->ad_contents, oar->ad_contents, old->t_id, new, 789 mcd); 790 (void) remap_node(&nar->ad_idxtype, oar->ad_idxtype, old->t_id, new, 791 mcd); 792 793 nar->ad_nelems = oar->ad_nelems; 794 795 new->t_ardef = nar; 796 797 return (new); 798 } 799 800 static tdesc_t * 801 conjure_su(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 802 { 803 tdesc_t *new = conjure_template(old, newselfid); 804 mlist_t *omem, **nmemp; 805 806 for (omem = old->t_members, nmemp = &new->t_members; 807 omem; omem = omem->ml_next, nmemp = &((*nmemp)->ml_next)) { 808 *nmemp = xmalloc(sizeof (mlist_t)); 809 (*nmemp)->ml_offset = omem->ml_offset; 810 (*nmemp)->ml_size = omem->ml_size; 811 (*nmemp)->ml_name = xstrdup(omem->ml_name); 812 (void) remap_node(&((*nmemp)->ml_type), omem->ml_type, 813 old->t_id, new, mcd); 814 } 815 *nmemp = NULL; 816 817 return (new); 818 } 819 820 /*ARGSUSED2*/ 821 static tdesc_t * 822 conjure_enum(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 823 { 824 tdesc_t *new = conjure_template(old, newselfid); 825 elist_t *oel, **nelp; 826 827 for (oel = old->t_emem, nelp = &new->t_emem; 828 oel; oel = oel->el_next, nelp = &((*nelp)->el_next)) { 829 *nelp = xmalloc(sizeof (elist_t)); 830 (*nelp)->el_name = xstrdup(oel->el_name); 831 (*nelp)->el_number = oel->el_number; 832 } 833 *nelp = NULL; 834 835 return (new); 836 } 837 838 /*ARGSUSED2*/ 839 static tdesc_t * 840 conjure_forward(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 841 { 842 tdesc_t *new = conjure_template(old, newselfid); 843 844 list_add(&mcd->md_tgt->td_fwdlist, new); 845 846 return (new); 847 } 848 849 /*ARGSUSED*/ 850 static tdesc_t * 851 conjure_assert(tdesc_t *old, int newselfid, merge_cb_data_t *mcd) 852 { 853 assert(1 == 0); 854 return (NULL); 855 } 856 857 static iidesc_t * 858 conjure_iidesc(iidesc_t *old, merge_cb_data_t *mcd) 859 { 860 iidesc_t *new = iidesc_dup(old); 861 int i; 862 863 (void) remap_node(&new->ii_dtype, old->ii_dtype, -1, NULL, mcd); 864 for (i = 0; i < new->ii_nargs; i++) { 865 (void) remap_node(&new->ii_args[i], old->ii_args[i], -1, NULL, 866 mcd); 867 } 868 869 return (new); 870 } 871 872 static int 873 fwd_redir(tdesc_t *fwd, tdesc_t **fwdp, void *private) 874 { 875 alist_t *map = private; 876 tdesc_t *defn; 877 878 if (!alist_find(map, (void *)fwd, (void **)&defn)) 879 return (0); 880 881 debug(3, "Redirecting an edge to %s\n", 882 (defn->t_name ? defn->t_name : "(anon)")); 883 884 *fwdp = defn; 885 886 return (1); 887 } 888 889 static tdtrav_cb_f fwd_redir_cbs[] = { 890 NULL, 891 NULL, /* intrinsic */ 892 NULL, /* pointer */ 893 NULL, /* array */ 894 NULL, /* function */ 895 NULL, /* struct */ 896 NULL, /* union */ 897 NULL, /* enum */ 898 fwd_redir, /* forward */ 899 NULL, /* typedef */ 900 tdtrav_assert, /* typedef_unres */ 901 NULL, /* volatile */ 902 NULL, /* const */ 903 NULL /* restrict */ 904 }; 905 906 typedef struct redir_mstr_data { 907 tdata_t *rmd_tgt; 908 alist_t *rmd_map; 909 } redir_mstr_data_t; 910 911 static int 912 redir_mstr_fwd_cb(void *name, void *value, void *arg) 913 { 914 tdesc_t *fwd = name; 915 int defnid = (int)value; 916 redir_mstr_data_t *rmd = arg; 917 tdesc_t template; 918 tdesc_t *defn; 919 920 template.t_id = defnid; 921 922 if (!hash_find(rmd->rmd_tgt->td_idhash, (void *)&template, 923 (void *)&defn)) { 924 terminate("Couldn't unforward %d (%s)\n", defnid, 925 (defn->t_name ? defn->t_name : "(anon)")); 926 } 927 928 debug(3, "Forward map: resolved %d to %s\n", 929 defnid, (defn->t_name ? defn->t_name : "(anon)")); 930 931 alist_add(rmd->rmd_map, (void *)fwd, (void *)defn); 932 933 return (1); 934 } 935 936 static void 937 redir_mstr_fwds(merge_cb_data_t *mcd) 938 { 939 redir_mstr_data_t rmd; 940 alist_t *map = alist_new(NULL, NULL); 941 942 rmd.rmd_tgt = mcd->md_tgt; 943 rmd.rmd_map = map; 944 945 if (alist_iter(mcd->md_fdida, redir_mstr_fwd_cb, &rmd)) { 946 (void) iitraverse_hash(mcd->md_tgt->td_iihash, 947 &mcd->md_tgt->td_curvgen, fwd_redir_cbs, NULL, NULL, map); 948 } 949 950 alist_free(map); 951 } 952 953 static int 954 add_iitba_cb(void *data, void *private) 955 { 956 merge_cb_data_t *mcd = private; 957 iidesc_t *tba = data; 958 iidesc_t *new; 959 iifind_data_t iif; 960 int newidx; 961 962 newidx = get_mapping(mcd->md_ta, tba->ii_dtype->t_id); 963 assert(newidx != -1); 964 965 (void) list_remove(mcd->md_iitba, data, NULL, NULL); 966 967 iif.iif_template = tba; 968 iif.iif_ta = mcd->md_ta; 969 iif.iif_newidx = newidx; 970 iif.iif_refmerge = (mcd->md_flags & MCD_F_REFMERGE); 971 972 if (hash_match(mcd->md_parent->td_iihash, tba, iidesc_match, 973 &iif) == 1) { 974 debug(3, "iidesc_t %s already exists\n", 975 (tba->ii_name ? tba->ii_name : "(anon)")); 976 return (1); 977 } 978 979 new = conjure_iidesc(tba, mcd); 980 hash_add(mcd->md_tgt->td_iihash, new); 981 982 return (1); 983 } 984 985 static int 986 add_tdesc(tdesc_t *oldtdp, int newid, merge_cb_data_t *mcd) 987 { 988 tdesc_t *newtdp; 989 tdesc_t template; 990 991 template.t_id = newid; 992 assert(hash_find(mcd->md_parent->td_idhash, 993 (void *)&template, NULL) == 0); 994 995 debug(3, "trying to conjure %d %s (%d) as %d\n", 996 oldtdp->t_type, (oldtdp->t_name ? oldtdp->t_name : "(anon)"), 997 oldtdp->t_id, newid); 998 999 if ((newtdp = tdesc_ops[oldtdp->t_type].conjure(oldtdp, newid, 1000 mcd)) == NULL) 1001 /* couldn't map everything */ 1002 return (0); 1003 1004 debug(3, "succeeded\n"); 1005 1006 hash_add(mcd->md_tgt->td_idhash, newtdp); 1007 hash_add(mcd->md_tgt->td_layouthash, newtdp); 1008 1009 return (1); 1010 } 1011 1012 static int 1013 add_tdtba_cb(void *data, void *arg) 1014 { 1015 tdesc_t *tdp = data; 1016 merge_cb_data_t *mcd = arg; 1017 int newid; 1018 int rc; 1019 1020 newid = get_mapping(mcd->md_ta, tdp->t_id); 1021 assert(newid != -1); 1022 1023 if ((rc = add_tdesc(tdp, newid, mcd))) 1024 hash_remove(mcd->md_tdtba, (void *)tdp); 1025 1026 return (rc); 1027 } 1028 1029 static int 1030 add_tdtbr_cb(void *data, void *arg) 1031 { 1032 tdesc_t **tdpp = data; 1033 merge_cb_data_t *mcd = arg; 1034 1035 debug(3, "Remapping %s (%d)\n", 1036 ((*tdpp)->t_name ? (*tdpp)->t_name : "(anon)"), (*tdpp)->t_id); 1037 1038 if (!remap_node(tdpp, *tdpp, -1, NULL, mcd)) 1039 return (0); 1040 1041 (void) list_remove(mcd->md_tdtbr, (void *)tdpp, NULL, NULL); 1042 return (1); 1043 } 1044 1045 static void 1046 merge_types(hash_t *src, merge_cb_data_t *mcd) 1047 { 1048 list_t *iitba = NULL; 1049 list_t *tdtbr = NULL; 1050 int iirc, tdrc; 1051 1052 mcd->md_iitba = &iitba; 1053 mcd->md_tdtba = hash_new(TDATA_LAYOUT_HASH_SIZE, tdesc_layouthash, 1054 tdesc_layoutcmp); 1055 mcd->md_tdtbr = &tdtbr; 1056 1057 (void) hash_iter(src, merge_type_cb, mcd); 1058 1059 tdrc = hash_iter(mcd->md_tdtba, add_tdtba_cb, (void *)mcd); 1060 debug(3, "add_tdtba_cb added %d items\n", tdrc); 1061 1062 iirc = list_iter(*mcd->md_iitba, add_iitba_cb, (void *)mcd); 1063 debug(3, "add_iitba_cb added %d items\n", iirc); 1064 1065 assert(list_count(*mcd->md_iitba) == 0 && 1066 hash_count(mcd->md_tdtba) == 0); 1067 1068 tdrc = list_iter(*mcd->md_tdtbr, add_tdtbr_cb, (void *)mcd); 1069 debug(3, "add_tdtbr_cb added %d items\n", tdrc); 1070 1071 if (list_count(*mcd->md_tdtbr) != 0) 1072 terminate("Couldn't remap all nodes\n"); 1073 1074 /* 1075 * We now have an alist of master forwards and the ids of the new master 1076 * definitions for those forwards in mcd->md_fdida. By this point, 1077 * we're guaranteed that all of the master definitions referenced in 1078 * fdida have been added to the master tree. We now traverse through 1079 * the master tree, redirecting all edges inbound to forwards that have 1080 * definitions to those definitions. 1081 */ 1082 if (mcd->md_parent == mcd->md_tgt) { 1083 redir_mstr_fwds(mcd); 1084 } 1085 } 1086 1087 void 1088 merge_into_master(tdata_t *cur, tdata_t *mstr, tdata_t *tgt, int selfuniquify) 1089 { 1090 merge_cb_data_t mcd; 1091 1092 cur->td_ref++; 1093 mstr->td_ref++; 1094 if (tgt) 1095 tgt->td_ref++; 1096 1097 assert(cur->td_ref == 1 && mstr->td_ref == 1 && 1098 (tgt == NULL || tgt->td_ref == 1)); 1099 1100 mcd.md_parent = mstr; 1101 mcd.md_tgt = (tgt ? tgt : mstr); 1102 mcd.md_ta = alist_new(NULL, NULL); 1103 mcd.md_fdida = alist_new(NULL, NULL); 1104 mcd.md_flags = 0; 1105 1106 if (selfuniquify) 1107 mcd.md_flags |= MCD_F_SELFUNIQUIFY; 1108 if (tgt) 1109 mcd.md_flags |= MCD_F_REFMERGE; 1110 1111 mstr->td_curvgen = MAX(mstr->td_curvgen, cur->td_curvgen); 1112 mstr->td_curemark = MAX(mstr->td_curemark, cur->td_curemark); 1113 1114 merge_types(cur->td_iihash, &mcd); 1115 1116 if (debug_level >= 3) { 1117 debug(3, "Type association stats\n"); 1118 alist_stats(mcd.md_ta, 0); 1119 debug(3, "Layout hash stats\n"); 1120 hash_stats(mcd.md_tgt->td_layouthash, 1); 1121 } 1122 1123 alist_free(mcd.md_fdida); 1124 alist_free(mcd.md_ta); 1125 1126 cur->td_ref--; 1127 mstr->td_ref--; 1128 if (tgt) 1129 tgt->td_ref--; 1130 } 1131 1132 tdesc_ops_t tdesc_ops[] = { 1133 { "ERROR! BAD tdesc TYPE", NULL, NULL }, 1134 { "intrinsic", equiv_intrinsic, conjure_intrinsic }, 1135 { "pointer", equiv_plain, conjure_plain }, 1136 { "array", equiv_array, conjure_array }, 1137 { "function", equiv_function, conjure_function }, 1138 { "struct", equiv_su, conjure_su }, 1139 { "union", equiv_su, conjure_su }, 1140 { "enum", equiv_enum, conjure_enum }, 1141 { "forward", NULL, conjure_forward }, 1142 { "typedef", equiv_plain, conjure_plain }, 1143 { "typedef_unres", equiv_assert, conjure_assert }, 1144 { "volatile", equiv_plain, conjure_plain }, 1145 { "const", equiv_plain, conjure_plain }, 1146 { "restrict", equiv_plain, conjure_plain } 1147 }; 1148