Changeset View
Changeset View
Standalone View
Standalone View
source/blender/blenloader/intern/readfile.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
| Show First 20 Lines • Show All 9,409 Lines • ▼ Show 20 Lines | static BHead *read_libblock(FileData *fd, | ||||
| Main *main, | Main *main, | ||||
| BHead *bhead, | BHead *bhead, | ||||
| const int tag, | const int tag, | ||||
| const bool placeholder_set_indirect_extern, | const bool placeholder_set_indirect_extern, | ||||
| ID **r_id) | ID **r_id) | ||||
| { | { | ||||
| /* This routine reads a libblock and its direct data. Lib link functions will | /* This routine reads a libblock and its direct data. Lib link functions will | ||||
| * set points between datablocks. */ | * set points between datablocks. */ | ||||
| if (r_id) { | |||||
| *r_id = NULL; /* In case of early return. */ | |||||
| } | |||||
| /* Read libblock struct. */ | |||||
| fd->are_memchunks_identical = true; | |||||
| ID *id = read_struct(fd, bhead, "lib block"); | |||||
mont29: putting this before the linked data processing in case of undo is a step back in performances… | |||||
brechtAuthorUnsubmitted Done Inline ActionsThe reason was to simplify the control flow. But after refactoring it's become relatively easy to avoid this memory allocation, also for the case where we are restorign local datablocks. Done in D7336: Cleanup: avoid memory allocation for unchanged datablocks in undo. I'd rather not update this patch because it requires me to update the other 6 patches. brecht: The reason was to simplify the control flow. But after refactoring it's become relatively easy… | |||||
| if (id == NULL) { | |||||
| return blo_bhead_next(fd, bhead); | |||||
| } | |||||
| /* Determine ID type. */ | |||||
| const short idcode = GS(id->name); | |||||
| ListBase *lb = which_libbase(main, idcode); | |||||
| if (lb == NULL) { | |||||
| /* Unknown ID type. */ | |||||
| printf("%s: unknown id code '%c%c'\n", __func__, (idcode & 0xff), (idcode >> 8)); | |||||
| MEM_freeN(id); | |||||
| return blo_bhead_next(fd, bhead); | |||||
| } | |||||
| /* In undo case, most libs and linked data should be kept as is from previous state | /* In undo case, most libs and linked data should be kept as is from previous state | ||||
| * (see BLO_read_from_memfile). | * (see BLO_read_from_memfile). | ||||
| * However, some needed by the snapshot being read may have been removed in previous one, | * However, some needed by the snapshot being read may have been removed in previous one, | ||||
| * and would go missing. | * and would go missing. | ||||
| * This leads e.g. to disappearing objects in some undo/redo case, see T34446. | * This leads e.g. to disappearing objects in some undo/redo case, see T34446. | ||||
| * That means we have to carefully check whether current lib or | * That means we have to carefully check whether current lib or | ||||
| * libdata already exits in old main, if it does we merely copy it over into new main area, | * libdata already exits in old main, if it does we merely copy it over into new main area, | ||||
| * otherwise we have to do a full read of that bhead... */ | * otherwise we have to do a full read of that bhead... */ | ||||
| if (fd->memfile && ELEM(bhead->code, ID_LI, ID_LINK_PLACEHOLDER)) { | if (fd->memfile != NULL && ELEM(bhead->code, ID_LI, ID_LINK_PLACEHOLDER)) { | ||||
| const char *idname = blo_bhead_id_name(fd, bhead); | DEBUG_PRINTF("Checking %s...\n", id->name); | ||||
| DEBUG_PRINTF("Checking %s...\n", idname); | |||||
| if (bhead->code == ID_LI) { | if (bhead->code == ID_LI) { | ||||
| Main *libmain = fd->old_mainlist->first; | Main *libmain = fd->old_mainlist->first; | ||||
| /* Skip oldmain itself... */ | /* Skip oldmain itself... */ | ||||
| for (libmain = libmain->next; libmain; libmain = libmain->next) { | for (libmain = libmain->next; libmain; libmain = libmain->next) { | ||||
| DEBUG_PRINTF("... against %s: ", libmain->curlib ? libmain->curlib->id.name : "<NULL>"); | DEBUG_PRINTF("... against %s: ", libmain->curlib ? libmain->curlib->id.name : "<NULL>"); | ||||
| if (libmain->curlib && STREQ(idname, libmain->curlib->id.name)) { | if (libmain->curlib && STREQ(id->name, libmain->curlib->id.name)) { | ||||
| Main *oldmain = fd->old_mainlist->first; | Main *oldmain = fd->old_mainlist->first; | ||||
| DEBUG_PRINTF("FOUND!\n"); | DEBUG_PRINTF("FOUND!\n"); | ||||
| /* In case of a library, we need to re-add its main to fd->mainlist, | /* In case of a library, we need to re-add its main to fd->mainlist, | ||||
| * because if we have later a missing ID_LINK_PLACEHOLDER, | * because if we have later a missing ID_LINK_PLACEHOLDER, | ||||
| * we need to get the correct lib it is linked to! | * we need to get the correct lib it is linked to! | ||||
| * Order is crucial, we cannot bulk-add it in BLO_read_from_memfile() | * Order is crucial, we cannot bulk-add it in BLO_read_from_memfile() | ||||
| * like it used to be. */ | * like it used to be. */ | ||||
| BLI_remlink(fd->old_mainlist, libmain); | BLI_remlink(fd->old_mainlist, libmain); | ||||
| BLI_remlink_safe(&oldmain->libraries, libmain->curlib); | BLI_remlink_safe(&oldmain->libraries, libmain->curlib); | ||||
| BLI_addtail(fd->mainlist, libmain); | BLI_addtail(fd->mainlist, libmain); | ||||
| BLI_addtail(&main->libraries, libmain->curlib); | BLI_addtail(&main->libraries, libmain->curlib); | ||||
| if (r_id) { | MEM_freeN(id); | ||||
| *r_id = NULL; /* Just in case... */ | |||||
| } | |||||
| return blo_bhead_next(fd, bhead); | return blo_bhead_next(fd, bhead); | ||||
| } | } | ||||
| DEBUG_PRINTF("nothing...\n"); | DEBUG_PRINTF("nothing...\n"); | ||||
| } | } | ||||
| } | } | ||||
| else { | else { | ||||
| DEBUG_PRINTF("... in %s (%s): ", | DEBUG_PRINTF("... in %s (%s): ", | ||||
| main->curlib ? main->curlib->id.name : "<NULL>", | main->curlib ? main->curlib->id.name : "<NULL>", | ||||
| main->curlib ? main->curlib->name : "<NULL>"); | main->curlib ? main->curlib->name : "<NULL>"); | ||||
| ID *id = BKE_libblock_find_name(main, GS(idname), idname + 2); | ID *existing_id = BKE_libblock_find_name(main, GS(id->name), id->name + 2); | ||||
| if (id != NULL) { | if (existing_id != NULL) { | ||||
| DEBUG_PRINTF("FOUND!\n"); | DEBUG_PRINTF("FOUND!\n"); | ||||
| /* Even though we found our linked ID, | /* Even though we found our linked ID, | ||||
| * there is no guarantee its address is still the same. */ | * there is no guarantee its address is still the same. */ | ||||
| if (id != bhead->old) { | if (existing_id != bhead->old) { | ||||
| oldnewmap_insert(fd->libmap, bhead->old, id, GS(id->name)); | oldnewmap_insert(fd->libmap, bhead->old, existing_id, GS(existing_id->name)); | ||||
| } | } | ||||
| /* No need to do anything else for ID_LINK_PLACEHOLDER, | /* No need to do anything else for ID_LINK_PLACEHOLDER, | ||||
| * it's assumed already present in its lib's main. */ | * it's assumed already present in its lib's main. */ | ||||
| if (r_id) { | MEM_freeN(id); | ||||
| *r_id = NULL; /* Just in case... */ | |||||
| } | |||||
| return blo_bhead_next(fd, bhead); | return blo_bhead_next(fd, bhead); | ||||
| } | } | ||||
| DEBUG_PRINTF("nothing...\n"); | DEBUG_PRINTF("nothing...\n"); | ||||
| } | } | ||||
| } | } | ||||
| /* read libblock */ | /* read libblock */ | ||||
| fd->are_memchunks_identical = true; | |||||
| ID *id = read_struct(fd, bhead, "lib block"); | |||||
| const short idcode = id != NULL ? GS(id->name) : 0; | |||||
| BHead *id_bhead = bhead; | BHead *id_bhead = bhead; | ||||
| /* Used when undoing from memfile, we swap changed IDs into their old addresses when found. */ | |||||
| ID *id_old = NULL; | |||||
| bool do_id_swap = false; | |||||
| if (id != NULL) { | |||||
| const bool do_partial_undo = (fd->skip_flags & BLO_READ_SKIP_UNDO_OLD_MAIN) == 0; | |||||
| if (id_bhead->code != ID_LINK_PLACEHOLDER) { | if (id_bhead->code != ID_LINK_PLACEHOLDER) { | ||||
| /* need a name for the mallocN, just for debugging and sane prints on leaks */ | /* need a name for the mallocN, just for debugging and sane prints on leaks */ | ||||
| const char *allocname = dataname(idcode); | const char *allocname = dataname(idcode); | ||||
| /* read all data into fd->datamap */ | /* read all data into fd->datamap */ | ||||
| /* TODO: instead of building oldnewmap here we could just quickly check the bheads... could | /* TODO: for the undo case instead of building oldnewmap here we could just quickly check the | ||||
| * save some more ticks. Probably not worth it though, bottleneck is full depsgraph rebuild | * bheads... could save some more ticks. Probably not worth it though, bottleneck is full | ||||
| * and evaluate, not actual file reading. */ | * depsgraph rebuild and evaluate, not actual file reading. */ | ||||
| bhead = read_data_into_oldnewmap(fd, id_bhead, allocname); | bhead = read_data_into_oldnewmap(fd, id_bhead, allocname); | ||||
| } | |||||
| /* Restore existing datablocks for undo. */ | |||||
| const bool do_partial_undo = (fd->skip_flags & BLO_READ_SKIP_UNDO_OLD_MAIN) == 0; | |||||
| /* Used when undoing from memfile, we swap changed IDs into their old addresses when found. */ | |||||
| ID *id_old = NULL; | |||||
| bool do_id_swap = false; | |||||
| if (fd->memfile != NULL) { | |||||
| if (id_bhead->code != ID_LINK_PLACEHOLDER) { | |||||
| DEBUG_PRINTF( | DEBUG_PRINTF( | ||||
| "%s: ID %s is unchanged: %d\n", __func__, id->name, fd->are_memchunks_identical); | "%s: ID %s is unchanged: %d\n", __func__, id->name, fd->are_memchunks_identical); | ||||
| if (fd->memfile != NULL) { | |||||
| BLI_assert(fd->old_idmap != NULL || !do_partial_undo); | BLI_assert(fd->old_idmap != NULL || !do_partial_undo); | ||||
| /* This code should only ever be reached for local data-blocks. */ | /* This code should only ever be reached for local data-blocks. */ | ||||
| BLI_assert(main->curlib == NULL); | BLI_assert(main->curlib == NULL); | ||||
| /* Find the 'current' existing ID we want to reuse instead of the one we would read from | /* Find the 'current' existing ID we want to reuse instead of the one we would read from | ||||
| * the undo memfile. */ | * the undo memfile. */ | ||||
| DEBUG_PRINTF("\t Looking for ID %s with uuid %u instead of newly read one\n", | DEBUG_PRINTF("\t Looking for ID %s with uuid %u instead of newly read one\n", | ||||
| id->name, | id->name, | ||||
| id->session_uuid); | id->session_uuid); | ||||
| id_old = do_partial_undo ? BKE_main_idmap_lookup_uuid(fd->old_idmap, id->session_uuid) : | id_old = do_partial_undo ? BKE_main_idmap_lookup_uuid(fd->old_idmap, id->session_uuid) : | ||||
| NULL; | NULL; | ||||
| bool can_finalize_and_return = false; | bool can_finalize_and_return = false; | ||||
| if (ELEM(idcode, ID_WM, ID_SCR, ID_WS)) { | if (ELEM(idcode, ID_WM, ID_SCR, ID_WS)) { | ||||
| /* Read WindowManager, Screen and WorkSpace IDs are never actually used during undo (see | /* Read WindowManager, Screen and WorkSpace IDs are never actually used during undo (see | ||||
| * `setup_app_data()` in `blendfile.c`). | * `setup_app_data()` in `blendfile.c`). | ||||
| * So we can just abort here, just ensuring libmapping is set accordingly. */ | * So we can just abort here, just ensuring libmapping is set accordingly. */ | ||||
| can_finalize_and_return = true; | can_finalize_and_return = true; | ||||
| } | } | ||||
| else if (id_old != NULL && fd->are_memchunks_identical) { | else if (id_old != NULL && fd->are_memchunks_identical) { | ||||
| /* Do not add LIB_TAG_NEW here, this should not be needed/used in undo case anyway (as | /* Do not add LIB_TAG_NEW here, this should not be needed/used in undo case anyway (as | ||||
| * this is only for do_version-like code), but for sake of consistency, and also because | * this is only for do_version-like code), but for sake of consistency, and also because | ||||
| * it will tell us which ID is re-used from old Main, and which one is actually new. */ | * it will tell us which ID is re-used from old Main, and which one is actually new. */ | ||||
| id_old->tag = tag | LIB_TAG_NEED_LINK | LIB_TAG_UNDO_OLD_ID_REUSED; | id_old->tag = tag | LIB_TAG_NEED_LINK | LIB_TAG_UNDO_OLD_ID_REUSED; | ||||
| id_old->lib = main->curlib; | id_old->lib = main->curlib; | ||||
| id_old->us = ID_FAKE_USERS(id_old); | id_old->us = ID_FAKE_USERS(id_old); | ||||
| /* Do not reset id->icon_id here, memory allocated for it remains valid. */ | /* Do not reset id->icon_id here, memory allocated for it remains valid. */ | ||||
| /* Needed because .blend may have been saved with crap value here... */ | /* Needed because .blend may have been saved with crap value here... */ | ||||
| id_old->newid = NULL; | id_old->newid = NULL; | ||||
| id_old->orig_id = NULL; | id_old->orig_id = NULL; | ||||
| /* About recalc: since that ID did not change at all, we know that its recalc fields also | /* About recalc: since that ID did not change at all, we know that its recalc fields also | ||||
| * remained unchanged, so no need to handle neither recalc nor recalc_undo_future here. | * remained unchanged, so no need to handle neither recalc nor recalc_undo_future here. | ||||
| */ | */ | ||||
| Main *old_bmain = fd->old_mainlist->first; | Main *old_bmain = fd->old_mainlist->first; | ||||
| ListBase *old_lb = which_libbase(old_bmain, idcode); | ListBase *old_lb = which_libbase(old_bmain, idcode); | ||||
| ListBase *new_lb = which_libbase(main, idcode); | ListBase *new_lb = which_libbase(main, idcode); | ||||
| BLI_remlink(old_lb, id_old); | BLI_remlink(old_lb, id_old); | ||||
| BLI_addtail(new_lb, id_old); | BLI_addtail(new_lb, id_old); | ||||
| can_finalize_and_return = true; | can_finalize_and_return = true; | ||||
| } | } | ||||
| if (can_finalize_and_return) { | if (can_finalize_and_return) { | ||||
| DEBUG_PRINTF("Re-using existing ID %s instead of newly read one\n", id_old->name); | DEBUG_PRINTF("Re-using existing ID %s instead of newly read one\n", id_old->name); | ||||
| oldnewmap_insert(fd->libmap, id_bhead->old, id_old, id_bhead->code); | oldnewmap_insert(fd->libmap, id_bhead->old, id_old, id_bhead->code); | ||||
| oldnewmap_insert(fd->libmap, id_old, id_old, id_bhead->code); | oldnewmap_insert(fd->libmap, id_old, id_old, id_bhead->code); | ||||
| if (r_id) { | if (r_id) { | ||||
| *r_id = id_old; | *r_id = id_old; | ||||
| } | } | ||||
| if (do_partial_undo) { | if (do_partial_undo) { | ||||
| /* Even though we re-use the old ID as-is, it does not mean that we are 100% safe from | /* Even though we re-use the old ID as-is, it does not mean that we are 100% safe from | ||||
| * needing some depsgraph updates for it (it could depend on another ID which address | * needing some depsgraph updates for it (it could depend on another ID which address | ||||
| * did not change, but which actual content might have been re-read from the memfile). | * did not change, but which actual content might have been re-read from the memfile). | ||||
| * IMPORTANT: Do not fully overwrite recalc flag here, depsgraph may not have been ran | * IMPORTANT: Do not fully overwrite recalc flag here, depsgraph may not have been ran | ||||
| * yet for previous undo step(s), we do not want to erase flags set by those. | * yet for previous undo step(s), we do not want to erase flags set by those. | ||||
| */ | */ | ||||
| if (fd->undo_direction < 0) { | if (fd->undo_direction < 0) { | ||||
| /* We are coming from the future (i.e. do an actual undo, and not a redo), we use our | /* We are coming from the future (i.e. do an actual undo, and not a redo), we use our | ||||
| * old reused ID's 'accumulated recalc flags since last memfile undo step saving' as | * old reused ID's 'accumulated recalc flags since last memfile undo step saving' as | ||||
| * recalc flags. */ | * recalc flags. */ | ||||
| id_old->recalc |= id_old->recalc_undo_accumulated; | id_old->recalc |= id_old->recalc_undo_accumulated; | ||||
| } | } | ||||
| else { | else { | ||||
| /* We are coming from the past (i.e. do a redo), we use the saved 'accumulated recalc | /* We are coming from the past (i.e. do a redo), we use the saved 'accumulated recalc | ||||
| * flags since last memfile undo step saving' from the newly read ID as recalc flags. | * flags since last memfile undo step saving' from the newly read ID as recalc flags. | ||||
| */ | */ | ||||
| id_old->recalc |= id->recalc_undo_accumulated; | id_old->recalc |= id->recalc_undo_accumulated; | ||||
| } | } | ||||
| /* There is no need to flush the depsgraph's CoWs here, since that ID's data itself did | /* There is no need to flush the depsgraph's CoWs here, since that ID's data itself did | ||||
| * not change. */ | * not change. */ | ||||
| /* We need to 'accumulate' the accumulated recalc flags of all undo steps until we | /* We need to 'accumulate' the accumulated recalc flags of all undo steps until we | ||||
| * actually perform a depsgraph update, otherwise we'd only ever use the flags from one | * actually perform a depsgraph update, otherwise we'd only ever use the flags from one | ||||
| * of the steps, and never get proper flags matching all others. */ | * of the steps, and never get proper flags matching all others. */ | ||||
| id_old->recalc_undo_accumulated |= id->recalc_undo_accumulated; | id_old->recalc_undo_accumulated |= id->recalc_undo_accumulated; | ||||
| } | } | ||||
| MEM_freeN(id); | MEM_freeN(id); | ||||
| oldnewmap_clear(fd->datamap); | oldnewmap_clear(fd->datamap); | ||||
| return bhead; | return bhead; | ||||
| } | } | ||||
| } | } | ||||
| } | |||||
| /* do after read_struct, for dna reconstruct */ | |||||
| ListBase *lb = which_libbase(main, idcode); | |||||
| if (lb) { | |||||
| /* Some re-used old IDs might also use newly read ones, so we have to check for old memory | /* Some re-used old IDs might also use newly read ones, so we have to check for old memory | ||||
| * addresses for those as well. */ | * addresses for those as well. */ | ||||
| if (fd->memfile != NULL && do_partial_undo && id->lib == NULL) { | if (do_partial_undo && id->lib == NULL) { | ||||
| BLI_assert(fd->old_idmap != NULL); | BLI_assert(fd->old_idmap != NULL); | ||||
| DEBUG_PRINTF("\t Looking for ID %s with uuid %u instead of newly read one\n", | DEBUG_PRINTF("\t Looking for ID %s with uuid %u instead of newly read one\n", | ||||
| id->name, | id->name, | ||||
| id->session_uuid); | id->session_uuid); | ||||
| id_old = BKE_main_idmap_lookup_uuid(fd->old_idmap, id->session_uuid); | id_old = BKE_main_idmap_lookup_uuid(fd->old_idmap, id->session_uuid); | ||||
| if (id_old != NULL) { | if (id_old != NULL) { | ||||
| BLI_assert(MEM_allocN_len(id) == MEM_allocN_len(id_old)); | BLI_assert(MEM_allocN_len(id) == MEM_allocN_len(id_old)); | ||||
| /* UI IDs are always re-used from old bmain at higher-level calling code, so never swap | /* UI IDs are always re-used from old bmain at higher-level calling code, so never swap | ||||
| * those. Besides maybe custom properties, no other ID should have pointers to those | * those. Besides maybe custom properties, no other ID should have pointers to those | ||||
| * anyway... | * anyway... | ||||
| * And linked IDs are handled separately as well. */ | * And linked IDs are handled separately as well. */ | ||||
| do_id_swap = !ELEM(idcode, ID_WM, ID_SCR, ID_WS) && | do_id_swap = !ELEM(idcode, ID_WM, ID_SCR, ID_WS) && | ||||
| !(id_bhead->code == ID_LINK_PLACEHOLDER); | !(id_bhead->code == ID_LINK_PLACEHOLDER); | ||||
| } | } | ||||
| } | } | ||||
| /* At this point, we know we are going to keep that newly read & allocated ID, so we need to | /* At this point, we know we are going to keep that newly read & allocated ID, so we need to | ||||
| * reallocate it to ensure we actually get a unique memory address for it. */ | * reallocate it to ensure we actually get a unique memory address for it. */ | ||||
| if (!do_id_swap) { | if (!do_id_swap) { | ||||
| DEBUG_PRINTF("using newly-read ID %s to a new mem address\n", id->name); | DEBUG_PRINTF("using newly-read ID %s to a new mem address\n", id->name); | ||||
| } | } | ||||
| else { | else { | ||||
| DEBUG_PRINTF("using newly-read ID %s to its old, already existing address\n", id->name); | DEBUG_PRINTF("using newly-read ID %s to its old, already existing address\n", id->name); | ||||
| } | } | ||||
| } | |||||
| /* for ID_LINK_PLACEHOLDER check */ | /* for ID_LINK_PLACEHOLDER check */ | ||||
| ID *id_target = do_id_swap ? id_old : id; | ID *id_target = do_id_swap ? id_old : id; | ||||
| oldnewmap_insert(fd->libmap, id_bhead->old, id_target, id_bhead->code); | oldnewmap_insert(fd->libmap, id_bhead->old, id_target, id_bhead->code); | ||||
| oldnewmap_insert(fd->libmap, id_old, id_target, id_bhead->code); | oldnewmap_insert(fd->libmap, id_old, id_target, id_bhead->code); | ||||
| BLI_addtail(lb, id); | BLI_addtail(lb, id); | ||||
| } | |||||
| else { | |||||
| /* unknown ID type */ | |||||
| printf("%s: unknown id code '%c%c'\n", __func__, (idcode & 0xff), (idcode >> 8)); | |||||
| MEM_freeN(id); | |||||
| id = NULL; | |||||
| } | |||||
| } | |||||
| if (r_id) { | if (r_id) { | ||||
| *r_id = do_id_swap ? id_old : id; | *r_id = do_id_swap ? id_old : id; | ||||
| } | } | ||||
| if (!id) { | |||||
| return blo_bhead_next(fd, id_bhead); | |||||
| } | |||||
| /* Set tag for new datablock to indicate lib linking and versioning needs | /* Set tag for new datablock to indicate lib linking and versioning needs | ||||
| * to be done still. */ | * to be done still. */ | ||||
| int id_tag = tag | LIB_TAG_NEED_LINK | LIB_TAG_NEW; | int id_tag = tag | LIB_TAG_NEED_LINK | LIB_TAG_NEW; | ||||
| if (id_bhead->code == ID_LINK_PLACEHOLDER) { | if (id_bhead->code == ID_LINK_PLACEHOLDER) { | ||||
| /* Tag to get replaced by the actual linked datablock. */ | /* Tag to get replaced by the actual linked datablock. */ | ||||
| id_tag |= LIB_TAG_ID_LINK_PLACEHOLDER; | id_tag |= LIB_TAG_ID_LINK_PLACEHOLDER; | ||||
| ▲ Show 20 Lines • Show All 2,797 Lines • Show Last 20 Lines | |||||
putting this before the linked data processing in case of undo is a step back in performances, since just getting the name of the ID is much cheaper than actually reading the whole struct.
Not saying this is a huge importance, we now file reading is not really performance critical currently in undo, but still, why?