Merge branch 'js/misc-fixes'

Assorted fixes for issues found with CodeQL.

* js/misc-fixes:
  sequencer: stop pretending that an assignment is a condition
  bundle-uri: avoid using undefined output of `sscanf()`
  commit-graph: avoid using stale stack addresses
  trace2: avoid "futile conditional"
  Avoid redundant conditions
  fetch: avoid unnecessary work when there is no current branch
  has_dir_name(): make code more obvious
  upload-pack: rename `enum` to reflect the operation
  commit-graph: avoid malloc'ing a local variable
  fetch: carefully clear local variable's address after use
  commit: simplify code
This commit is contained in:
Junio C Hamano
2025-05-27 13:59:10 -07:00
10 changed files with 130 additions and 161 deletions

View File

@@ -1022,7 +1022,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
for (i = 0; i < the_repository->index->cache_nr; i++) for (i = 0; i < the_repository->index->cache_nr; i++)
if (ce_intent_to_add(the_repository->index->cache[i])) if (ce_intent_to_add(the_repository->index->cache[i]))
ita_nr++; ita_nr++;
committable = the_repository->index->cache_nr - ita_nr > 0; committable = the_repository->index->cache_nr > ita_nr;
} else { } else {
/* /*
* Unless the user did explicitly request a submodule * Unless the user did explicitly request a submodule

View File

@@ -1728,7 +1728,7 @@ static int do_fetch(struct transport *transport,
if (transport->remote->follow_remote_head != FOLLOW_REMOTE_NEVER) if (transport->remote->follow_remote_head != FOLLOW_REMOTE_NEVER)
do_set_head = 1; do_set_head = 1;
} }
if (branch_has_merge_config(branch) && if (branch && branch_has_merge_config(branch) &&
!strcmp(branch->remote_name, transport->remote->name)) { !strcmp(branch->remote_name, transport->remote->name)) {
int i; int i;
for (i = 0; i < branch->merge_nr; i++) { for (i = 0; i < branch->merge_nr; i++) {
@@ -2560,6 +2560,7 @@ int cmd_fetch(int argc,
if (server_options.nr) if (server_options.nr)
gtransport->server_options = &server_options; gtransport->server_options = &server_options;
result = transport_fetch_refs(gtransport, NULL); result = transport_fetch_refs(gtransport, NULL);
gtransport->smart_options->acked_commits = NULL;
oidset_iter_init(&acked_commits, &iter); oidset_iter_init(&acked_commits, &iter);
while ((oid = oidset_iter_next(&iter))) while ((oid = oidset_iter_next(&iter)))

View File

@@ -532,12 +532,14 @@ static int fetch_bundles_by_token(struct repository *r,
*/ */
if (!repo_config_get_value(r, if (!repo_config_get_value(r,
"fetch.bundlecreationtoken", "fetch.bundlecreationtoken",
&creationTokenStr) && &creationTokenStr)) {
sscanf(creationTokenStr, "%"PRIu64, &maxCreationToken) == 1 && if (sscanf(creationTokenStr, "%"PRIu64, &maxCreationToken) != 1)
bundles.items[0]->creationToken <= maxCreationToken) { maxCreationToken = 0;
if (bundles.items[0]->creationToken <= maxCreationToken) {
free(bundles.items); free(bundles.items);
return 0; return 0;
} }
}
/* /*
* Attempt to download and unbundle the minimum number of bundles by * Attempt to download and unbundle the minimum number of bundles by

View File

@@ -2511,7 +2511,17 @@ int write_commit_graph(struct object_directory *odb,
const struct commit_graph_opts *opts) const struct commit_graph_opts *opts)
{ {
struct repository *r = the_repository; struct repository *r = the_repository;
struct write_commit_graph_context *ctx; struct write_commit_graph_context ctx = {
.r = r,
.odb = odb,
.append = flags & COMMIT_GRAPH_WRITE_APPEND ? 1 : 0,
.report_progress = flags & COMMIT_GRAPH_WRITE_PROGRESS ? 1 : 0,
.split = flags & COMMIT_GRAPH_WRITE_SPLIT ? 1 : 0,
.opts = opts,
.total_bloom_filter_data_size = 0,
.write_generation_data = (get_configured_generation_version(r) == 2),
.num_generation_data_overflows = 0,
};
uint32_t i; uint32_t i;
int res = 0; int res = 0;
int replace = 0; int replace = 0;
@@ -2533,17 +2543,6 @@ int write_commit_graph(struct object_directory *odb,
return 0; return 0;
} }
CALLOC_ARRAY(ctx, 1);
ctx->r = r;
ctx->odb = odb;
ctx->append = flags & COMMIT_GRAPH_WRITE_APPEND ? 1 : 0;
ctx->report_progress = flags & COMMIT_GRAPH_WRITE_PROGRESS ? 1 : 0;
ctx->split = flags & COMMIT_GRAPH_WRITE_SPLIT ? 1 : 0;
ctx->opts = opts;
ctx->total_bloom_filter_data_size = 0;
ctx->write_generation_data = (get_configured_generation_version(r) == 2);
ctx->num_generation_data_overflows = 0;
bloom_settings.hash_version = r->settings.commit_graph_changed_paths_version; bloom_settings.hash_version = r->settings.commit_graph_changed_paths_version;
bloom_settings.bits_per_entry = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_BITS_PER_ENTRY", bloom_settings.bits_per_entry = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_BITS_PER_ENTRY",
bloom_settings.bits_per_entry); bloom_settings.bits_per_entry);
@@ -2551,14 +2550,14 @@ int write_commit_graph(struct object_directory *odb,
bloom_settings.num_hashes); bloom_settings.num_hashes);
bloom_settings.max_changed_paths = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_MAX_CHANGED_PATHS", bloom_settings.max_changed_paths = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_MAX_CHANGED_PATHS",
bloom_settings.max_changed_paths); bloom_settings.max_changed_paths);
ctx->bloom_settings = &bloom_settings; ctx.bloom_settings = &bloom_settings;
init_topo_level_slab(&topo_levels); init_topo_level_slab(&topo_levels);
ctx->topo_levels = &topo_levels; ctx.topo_levels = &topo_levels;
prepare_commit_graph(ctx->r); prepare_commit_graph(ctx.r);
if (ctx->r->objects->commit_graph) { if (ctx.r->objects->commit_graph) {
struct commit_graph *g = ctx->r->objects->commit_graph; struct commit_graph *g = ctx.r->objects->commit_graph;
while (g) { while (g) {
g->topo_levels = &topo_levels; g->topo_levels = &topo_levels;
@@ -2567,15 +2566,15 @@ int write_commit_graph(struct object_directory *odb,
} }
if (flags & COMMIT_GRAPH_WRITE_BLOOM_FILTERS) if (flags & COMMIT_GRAPH_WRITE_BLOOM_FILTERS)
ctx->changed_paths = 1; ctx.changed_paths = 1;
if (!(flags & COMMIT_GRAPH_NO_WRITE_BLOOM_FILTERS)) { if (!(flags & COMMIT_GRAPH_NO_WRITE_BLOOM_FILTERS)) {
struct commit_graph *g; struct commit_graph *g;
g = ctx->r->objects->commit_graph; g = ctx.r->objects->commit_graph;
/* We have changed-paths already. Keep them in the next graph */ /* We have changed-paths already. Keep them in the next graph */
if (g && g->bloom_filter_settings) { if (g && g->bloom_filter_settings) {
ctx->changed_paths = 1; ctx.changed_paths = 1;
/* don't propagate the hash_version unless unspecified */ /* don't propagate the hash_version unless unspecified */
if (bloom_settings.hash_version == -1) if (bloom_settings.hash_version == -1)
@@ -2588,116 +2587,123 @@ int write_commit_graph(struct object_directory *odb,
bloom_settings.hash_version = bloom_settings.hash_version == 2 ? 2 : 1; bloom_settings.hash_version = bloom_settings.hash_version == 2 ? 2 : 1;
if (ctx->split) { if (ctx.split) {
struct commit_graph *g = ctx->r->objects->commit_graph; struct commit_graph *g = ctx.r->objects->commit_graph;
while (g) { while (g) {
ctx->num_commit_graphs_before++; ctx.num_commit_graphs_before++;
g = g->base_graph; g = g->base_graph;
} }
if (ctx->num_commit_graphs_before) { if (ctx.num_commit_graphs_before) {
ALLOC_ARRAY(ctx->commit_graph_filenames_before, ctx->num_commit_graphs_before); ALLOC_ARRAY(ctx.commit_graph_filenames_before, ctx.num_commit_graphs_before);
i = ctx->num_commit_graphs_before; i = ctx.num_commit_graphs_before;
g = ctx->r->objects->commit_graph; g = ctx.r->objects->commit_graph;
while (g) { while (g) {
ctx->commit_graph_filenames_before[--i] = xstrdup(g->filename); ctx.commit_graph_filenames_before[--i] = xstrdup(g->filename);
g = g->base_graph; g = g->base_graph;
} }
} }
if (ctx->opts) if (ctx.opts)
replace = ctx->opts->split_flags & COMMIT_GRAPH_SPLIT_REPLACE; replace = ctx.opts->split_flags & COMMIT_GRAPH_SPLIT_REPLACE;
} }
ctx->approx_nr_objects = repo_approximate_object_count(the_repository); ctx.approx_nr_objects = repo_approximate_object_count(the_repository);
if (ctx->append && ctx->r->objects->commit_graph) { if (ctx.append && ctx.r->objects->commit_graph) {
struct commit_graph *g = ctx->r->objects->commit_graph; struct commit_graph *g = ctx.r->objects->commit_graph;
for (i = 0; i < g->num_commits; i++) { for (i = 0; i < g->num_commits; i++) {
struct object_id oid; struct object_id oid;
oidread(&oid, g->chunk_oid_lookup + st_mult(g->hash_len, i), oidread(&oid, g->chunk_oid_lookup + st_mult(g->hash_len, i),
the_repository->hash_algo); the_repository->hash_algo);
oid_array_append(&ctx->oids, &oid); oid_array_append(&ctx.oids, &oid);
} }
} }
if (pack_indexes) { if (pack_indexes) {
ctx->order_by_pack = 1; ctx.order_by_pack = 1;
if ((res = fill_oids_from_packs(ctx, pack_indexes))) if ((res = fill_oids_from_packs(&ctx, pack_indexes)))
goto cleanup; goto cleanup;
} }
if (commits) { if (commits) {
if ((res = fill_oids_from_commits(ctx, commits))) if ((res = fill_oids_from_commits(&ctx, commits)))
goto cleanup; goto cleanup;
} }
if (!pack_indexes && !commits) { if (!pack_indexes && !commits) {
ctx->order_by_pack = 1; ctx.order_by_pack = 1;
fill_oids_from_all_packs(ctx); fill_oids_from_all_packs(&ctx);
} }
close_reachable(ctx); close_reachable(&ctx);
copy_oids_to_commits(ctx); copy_oids_to_commits(&ctx);
if (ctx->commits.nr >= GRAPH_EDGE_LAST_MASK) { if (ctx.commits.nr >= GRAPH_EDGE_LAST_MASK) {
error(_("too many commits to write graph")); error(_("too many commits to write graph"));
res = -1; res = -1;
goto cleanup; goto cleanup;
} }
if (!ctx->commits.nr && !replace) if (!ctx.commits.nr && !replace)
goto cleanup; goto cleanup;
if (ctx->split) { if (ctx.split) {
split_graph_merge_strategy(ctx); split_graph_merge_strategy(&ctx);
if (!replace) if (!replace)
merge_commit_graphs(ctx); merge_commit_graphs(&ctx);
} else } else
ctx->num_commit_graphs_after = 1; ctx.num_commit_graphs_after = 1;
ctx->trust_generation_numbers = validate_mixed_generation_chain(ctx->r->objects->commit_graph); ctx.trust_generation_numbers = validate_mixed_generation_chain(ctx.r->objects->commit_graph);
compute_topological_levels(ctx); compute_topological_levels(&ctx);
if (ctx->write_generation_data) if (ctx.write_generation_data)
compute_generation_numbers(ctx); compute_generation_numbers(&ctx);
if (ctx->changed_paths) if (ctx.changed_paths)
compute_bloom_filters(ctx); compute_bloom_filters(&ctx);
res = write_commit_graph_file(ctx); res = write_commit_graph_file(&ctx);
if (ctx->changed_paths) if (ctx.changed_paths)
deinit_bloom_filters(); deinit_bloom_filters();
if (ctx->split) if (ctx.split)
mark_commit_graphs(ctx); mark_commit_graphs(&ctx);
expire_commit_graphs(ctx); expire_commit_graphs(&ctx);
cleanup: cleanup:
free(ctx->graph_name); free(ctx.graph_name);
free(ctx->base_graph_name); free(ctx.base_graph_name);
free(ctx->commits.list); free(ctx.commits.list);
oid_array_clear(&ctx->oids); oid_array_clear(&ctx.oids);
clear_topo_level_slab(&topo_levels); clear_topo_level_slab(&topo_levels);
for (i = 0; i < ctx->num_commit_graphs_before; i++) if (ctx.r->objects->commit_graph) {
free(ctx->commit_graph_filenames_before[i]); struct commit_graph *g = ctx.r->objects->commit_graph;
free(ctx->commit_graph_filenames_before);
for (i = 0; i < ctx->num_commit_graphs_after; i++) { while (g) {
free(ctx->commit_graph_filenames_after[i]); g->topo_levels = NULL;
free(ctx->commit_graph_hash_after[i]); g = g->base_graph;
}
} }
free(ctx->commit_graph_filenames_after);
free(ctx->commit_graph_hash_after);
free(ctx); for (i = 0; i < ctx.num_commit_graphs_before; i++)
free(ctx.commit_graph_filenames_before[i]);
free(ctx.commit_graph_filenames_before);
for (i = 0; i < ctx.num_commit_graphs_after; i++) {
free(ctx.commit_graph_filenames_after[i]);
free(ctx.commit_graph_hash_after[i]);
}
free(ctx.commit_graph_filenames_after);
free(ctx.commit_graph_hash_after);
return res; return res;
} }

2
help.c
View File

@@ -214,7 +214,7 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
else if (cmp == 0) { else if (cmp == 0) {
ei++; ei++;
free(cmds->names[ci++]); free(cmds->names[ci++]);
} else if (cmp > 0) } else
ei++; ei++;
} }

View File

@@ -1117,48 +1117,19 @@ static int has_dir_name(struct index_state *istate,
* *
* Compare the entry's full path with the last path in the index. * Compare the entry's full path with the last path in the index.
*/ */
if (istate->cache_nr > 0) { if (!istate->cache_nr)
return 0;
cmp_last = strcmp_offset(name, cmp_last = strcmp_offset(name,
istate->cache[istate->cache_nr - 1]->name, istate->cache[istate->cache_nr - 1]->name,
&len_eq_last); &len_eq_last);
if (cmp_last > 0) { if (cmp_last > 0 && name[len_eq_last] != '/')
if (name[len_eq_last] != '/') {
/* /*
* The entry sorts AFTER the last one in the * The entry sorts AFTER the last one in the
* index. * index and their paths have no common prefix,
* * so there cannot be a F/D conflict.
* If there were a conflict with "file", then our
* name would start with "file/" and the last index
* entry would start with "file" but not "file/".
*
* The next character after common prefix is
* not '/', so there can be no conflict.
*/ */
return retval; return 0;
} else {
/*
* The entry sorts AFTER the last one in the
* index, and the next character after common
* prefix is '/'.
*
* Either the last index entry is a file in
* conflict with this entry, or it has a name
* which sorts between this entry and the
* potential conflicting file.
*
* In both cases, we fall through to the loop
* below and let the regular search code handle it.
*/
}
} else if (cmp_last == 0) {
/*
* The entry exactly matches the last one in the
* index, but because of multiple stage and CE_REMOVE
* items, we fall through and let the regular search
* code handle it.
*/
}
}
for (;;) { for (;;) {
size_t len; size_t len;

View File

@@ -2636,9 +2636,12 @@ static int is_command(enum todo_command command, const char **bol)
const char nick = todo_command_info[command].c; const char nick = todo_command_info[command].c;
const char *p = *bol; const char *p = *bol;
return (skip_prefix(p, str, &p) || (nick && *p++ == nick)) && if ((skip_prefix(p, str, &p) || (nick && *p++ == nick)) &&
(*p == ' ' || *p == '\t' || *p == '\n' || *p == '\r' || !*p) && (*p == ' ' || *p == '\t' || *p == '\n' || *p == '\r' || !*p)) {
(*bol = p); *bol = p;
return 1;
}
return 0;
} }
static int check_label_or_ref_arg(enum todo_command command, const char *arg) static int check_label_or_ref_arg(enum todo_command command, const char *arg)

View File

@@ -102,25 +102,11 @@ void tr2_update_final_timers(void)
struct tr2_timer *t_final = &final_timer_block.timer[tid]; struct tr2_timer *t_final = &final_timer_block.timer[tid];
struct tr2_timer *t = &ctx->timer_block.timer[tid]; struct tr2_timer *t = &ctx->timer_block.timer[tid];
if (t->recursion_count) {
/* /*
* The current thread is exiting with * `t->recursion_count` could technically be non-zero, which
* timer[tid] still running. * would constitute a bug. Reporting the bug would potentially
* * cause an infinite recursion, though, so let's ignore it.
* Technically, this is a bug, but I'm going
* to ignore it.
*
* I don't think it is worth calling die()
* for. I don't think it is worth killing the
* process for this bookkeeping error. We
* might want to call warning(), but I'm going
* to wait on that.
*
* The downside here is that total_ns won't
* include the current open interval (now -
* start_ns). I can live with that.
*/ */
}
if (!t->interval_count) if (!t->interval_count)
continue; /* this timer was not used by this thread */ continue; /* this timer was not used by this thread */

View File

@@ -1437,7 +1437,7 @@ static int udt_do_read(struct unidirectional_transfer *t)
transfer_debug("%s EOF (with %i bytes in buffer)", transfer_debug("%s EOF (with %i bytes in buffer)",
t->src_name, (int)t->bufuse); t->src_name, (int)t->bufuse);
t->state = SSTATE_FLUSHING; t->state = SSTATE_FLUSHING;
} else if (bytes > 0) { } else {
t->bufuse += bytes; t->bufuse += bytes;
transfer_debug("Read %i bytes from %s (buffer now at %i)", transfer_debug("Read %i bytes from %s (buffer now at %i)",
(int)bytes, t->src_name, (int)t->bufuse); (int)bytes, t->src_name, (int)t->bufuse);

View File

@@ -1780,16 +1780,16 @@ static void send_shallow_info(struct upload_pack_data *data)
packet_delim(1); packet_delim(1);
} }
enum fetch_state { enum upload_state {
FETCH_PROCESS_ARGS = 0, UPLOAD_PROCESS_ARGS = 0,
FETCH_SEND_ACKS, UPLOAD_SEND_ACKS,
FETCH_SEND_PACK, UPLOAD_SEND_PACK,
FETCH_DONE, UPLOAD_DONE,
}; };
int upload_pack_v2(struct repository *r, struct packet_reader *request) int upload_pack_v2(struct repository *r, struct packet_reader *request)
{ {
enum fetch_state state = FETCH_PROCESS_ARGS; enum upload_state state = UPLOAD_PROCESS_ARGS;
struct upload_pack_data data; struct upload_pack_data data;
clear_object_flags(the_repository, ALL_FLAGS); clear_object_flags(the_repository, ALL_FLAGS);
@@ -1798,9 +1798,9 @@ int upload_pack_v2(struct repository *r, struct packet_reader *request)
data.use_sideband = LARGE_PACKET_MAX; data.use_sideband = LARGE_PACKET_MAX;
get_upload_pack_config(r, &data); get_upload_pack_config(r, &data);
while (state != FETCH_DONE) { while (state != UPLOAD_DONE) {
switch (state) { switch (state) {
case FETCH_PROCESS_ARGS: case UPLOAD_PROCESS_ARGS:
process_args(request, &data); process_args(request, &data);
if (!data.want_obj.nr && !data.wait_for_done) { if (!data.want_obj.nr && !data.wait_for_done) {
@@ -1811,27 +1811,27 @@ int upload_pack_v2(struct repository *r, struct packet_reader *request)
* to just send 'have's without 'want's); guess * to just send 'have's without 'want's); guess
* they didn't want anything. * they didn't want anything.
*/ */
state = FETCH_DONE; state = UPLOAD_DONE;
} else if (data.seen_haves) { } else if (data.seen_haves) {
/* /*
* Request had 'have' lines, so lets ACK them. * Request had 'have' lines, so lets ACK them.
*/ */
state = FETCH_SEND_ACKS; state = UPLOAD_SEND_ACKS;
} else { } else {
/* /*
* Request had 'want's but no 'have's so we can * Request had 'want's but no 'have's so we can
* immediately go to construct and send a pack. * immediately go to construct and send a pack.
*/ */
state = FETCH_SEND_PACK; state = UPLOAD_SEND_PACK;
} }
break; break;
case FETCH_SEND_ACKS: case UPLOAD_SEND_ACKS:
if (process_haves_and_send_acks(&data)) if (process_haves_and_send_acks(&data))
state = FETCH_SEND_PACK; state = UPLOAD_SEND_PACK;
else else
state = FETCH_DONE; state = UPLOAD_DONE;
break; break;
case FETCH_SEND_PACK: case UPLOAD_SEND_PACK:
send_wanted_ref_info(&data); send_wanted_ref_info(&data);
send_shallow_info(&data); send_shallow_info(&data);
@@ -1841,9 +1841,9 @@ int upload_pack_v2(struct repository *r, struct packet_reader *request)
packet_writer_write(&data.writer, "packfile\n"); packet_writer_write(&data.writer, "packfile\n");
create_pack_file(&data, NULL); create_pack_file(&data, NULL);
} }
state = FETCH_DONE; state = UPLOAD_DONE;
break; break;
case FETCH_DONE: case UPLOAD_DONE:
continue; continue;
} }
} }