Merge branch 'ps/reftable-libgit2-cleanup'

Code clean-ups.

* ps/reftable-libgit2-cleanup:
  refs/reftable: always reload stacks when creating lock
  reftable: don't second-guess errors from flock interface
  reftable/stack: handle outdated stacks when compacting
  reftable/stack: allow passing flags to `reftable_stack_add()`
  reftable/stack: fix compiler warning due to missing braces
  reftable/stack: reorder code to avoid forward declarations
  reftable/writer: drop Git-specific `QSORT()` macro
  reftable/writer: fix type used for number of records
This commit is contained in:
Junio C Hamano
2025-08-29 09:44:36 -07:00
8 changed files with 283 additions and 286 deletions

View File

@@ -1012,10 +1012,6 @@ static int prepare_transaction_update(struct write_transaction_table_arg **out,
if (!arg) {
struct reftable_addition *addition;
ret = reftable_stack_reload(be->stack);
if (ret)
return ret;
ret = reftable_stack_new_addition(&addition, be->stack,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
if (ret) {
@@ -1974,7 +1970,8 @@ static int reftable_be_rename_ref(struct ref_store *ref_store,
ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
if (ret)
goto done;
ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
done:
assert(ret != REFTABLE_API_ERROR);
@@ -2003,7 +2000,8 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
if (ret)
goto done;
ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
done:
assert(ret != REFTABLE_API_ERROR);
@@ -2375,7 +2373,8 @@ static int reftable_be_create_reflog(struct ref_store *ref_store,
goto done;
arg.stack = be->stack;
ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
done:
return ret;
@@ -2446,7 +2445,8 @@ static int reftable_be_delete_reflog(struct ref_store *ref_store,
return ret;
arg.stack = be->stack;
ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
assert(ret != REFTABLE_API_ERROR);
return ret;
@@ -2567,6 +2567,11 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
if (ret < 0)
goto done;
ret = reftable_stack_new_addition(&add, be->stack,
REFTABLE_STACK_NEW_ADDITION_RELOAD);
if (ret < 0)
goto done;
ret = reftable_stack_init_log_iterator(be->stack, &it);
if (ret < 0)
goto done;
@@ -2575,10 +2580,6 @@ static int reftable_be_reflog_expire(struct ref_store *ref_store,
if (ret < 0)
goto done;
ret = reftable_stack_new_addition(&add, be->stack, 0);
if (ret < 0)
goto done;
ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
if (ret < 0)
goto done;

View File

@@ -68,12 +68,15 @@ int reftable_addition_commit(struct reftable_addition *add);
* transaction. Releases the lock if held. */
void reftable_addition_destroy(struct reftable_addition *add);
/* add a new table to the stack. The write_table function must call
* reftable_writer_set_limits, add refs and return an error value. */
/*
* Add a new table to the stack. The write_table function must call
* reftable_writer_set_limits, add refs and return an error value.
* The flags are passed through to `reftable_stack_new_addition()`.
*/
int reftable_stack_add(struct reftable_stack *st,
int (*write_table)(struct reftable_writer *wr,
void *write_arg),
void *write_arg);
void *write_arg, unsigned flags);
struct reftable_iterator;

View File

@@ -156,7 +156,7 @@ int reftable_writer_add_ref(struct reftable_writer *w,
the records before adding them, reordering the records array passed in.
*/
int reftable_writer_add_refs(struct reftable_writer *w,
struct reftable_ref_record *refs, int n);
struct reftable_ref_record *refs, size_t n);
/*
adds reftable_log_records. Log records are keyed by (refname, decreasing
@@ -171,7 +171,7 @@ int reftable_writer_add_log(struct reftable_writer *w,
the records before adding them, reordering records array passed in.
*/
int reftable_writer_add_logs(struct reftable_writer *w,
struct reftable_log_record *logs, int n);
struct reftable_log_record *logs, size_t n);
/* reftable_writer_close finalizes the reftable. The writer is retained so
* statistics can be inspected. */

View File

@@ -17,18 +17,6 @@
#include "table.h"
#include "writer.h"
static int stack_try_add(struct reftable_stack *st,
int (*write_table)(struct reftable_writer *wr,
void *arg),
void *arg);
static int stack_write_compact(struct reftable_stack *st,
struct reftable_writer *wr,
size_t first, size_t last,
struct reftable_log_expiry_config *config);
static void reftable_addition_close(struct reftable_addition *add);
static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
int reuse_open);
static int stack_filename(struct reftable_buf *dest, struct reftable_stack *st,
const char *name)
{
@@ -84,54 +72,6 @@ static int fd_writer_flush(void *arg)
return stack_fsync(writer->opts, writer->fd);
}
int reftable_new_stack(struct reftable_stack **dest, const char *dir,
const struct reftable_write_options *_opts)
{
struct reftable_buf list_file_name = REFTABLE_BUF_INIT;
struct reftable_write_options opts = { 0 };
struct reftable_stack *p;
int err;
p = reftable_calloc(1, sizeof(*p));
if (!p) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto out;
}
if (_opts)
opts = *_opts;
if (opts.hash_id == 0)
opts.hash_id = REFTABLE_HASH_SHA1;
*dest = NULL;
reftable_buf_reset(&list_file_name);
if ((err = reftable_buf_addstr(&list_file_name, dir)) < 0 ||
(err = reftable_buf_addstr(&list_file_name, "/tables.list")) < 0)
goto out;
p->list_file = reftable_buf_detach(&list_file_name);
p->list_fd = -1;
p->opts = opts;
p->reftable_dir = reftable_strdup(dir);
if (!p->reftable_dir) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto out;
}
err = reftable_stack_reload_maybe_reuse(p, 1);
if (err < 0)
goto out;
*dest = p;
err = 0;
out:
if (err < 0)
reftable_stack_destroy(p);
return err;
}
static int fd_read_lines(int fd, char ***namesp)
{
char *buf = NULL;
@@ -591,9 +531,59 @@ out:
return err;
}
/* -1 = error
0 = up to date
1 = changed. */
int reftable_new_stack(struct reftable_stack **dest, const char *dir,
const struct reftable_write_options *_opts)
{
struct reftable_buf list_file_name = REFTABLE_BUF_INIT;
struct reftable_write_options opts = { 0 };
struct reftable_stack *p;
int err;
p = reftable_calloc(1, sizeof(*p));
if (!p) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto out;
}
if (_opts)
opts = *_opts;
if (opts.hash_id == 0)
opts.hash_id = REFTABLE_HASH_SHA1;
*dest = NULL;
reftable_buf_reset(&list_file_name);
if ((err = reftable_buf_addstr(&list_file_name, dir)) < 0 ||
(err = reftable_buf_addstr(&list_file_name, "/tables.list")) < 0)
goto out;
p->list_file = reftable_buf_detach(&list_file_name);
p->list_fd = -1;
p->opts = opts;
p->reftable_dir = reftable_strdup(dir);
if (!p->reftable_dir) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto out;
}
err = reftable_stack_reload_maybe_reuse(p, 1);
if (err < 0)
goto out;
*dest = p;
err = 0;
out:
if (err < 0)
reftable_stack_destroy(p);
return err;
}
/*
* Check whether the given stack is up-to-date with what we have in memory.
* Returns 0 if so, 1 if the stack is out-of-date or a negative error code
* otherwise.
*/
static int stack_uptodate(struct reftable_stack *st)
{
char **names = NULL;
@@ -667,34 +657,6 @@ int reftable_stack_reload(struct reftable_stack *st)
return err;
}
int reftable_stack_add(struct reftable_stack *st,
int (*write)(struct reftable_writer *wr, void *arg),
void *arg)
{
int err = stack_try_add(st, write, arg);
if (err < 0) {
if (err == REFTABLE_OUTDATED_ERROR) {
/* Ignore error return, we want to propagate
REFTABLE_OUTDATED_ERROR.
*/
reftable_stack_reload(st);
}
return err;
}
return 0;
}
static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max)
{
char buf[100];
uint32_t rnd = reftable_rand();
snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
min, max, rnd);
reftable_buf_reset(dest);
return reftable_buf_addstr(dest, buf);
}
struct reftable_addition {
struct reftable_flock tables_list_lock;
struct reftable_stack *stack;
@@ -704,7 +666,25 @@ struct reftable_addition {
uint64_t next_update_index;
};
#define REFTABLE_ADDITION_INIT {0}
static void reftable_addition_close(struct reftable_addition *add)
{
struct reftable_buf nm = REFTABLE_BUF_INIT;
size_t i;
for (i = 0; i < add->new_tables_len; i++) {
if (!stack_filename(&nm, add->stack, add->new_tables[i]))
unlink(nm.buf);
reftable_free(add->new_tables[i]);
add->new_tables[i] = NULL;
}
reftable_free(add->new_tables);
add->new_tables = NULL;
add->new_tables_len = 0;
add->new_tables_cap = 0;
flock_release(&add->tables_list_lock);
reftable_buf_release(&nm);
}
static int reftable_stack_init_addition(struct reftable_addition *add,
struct reftable_stack *st,
@@ -713,18 +693,14 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
struct reftable_buf lock_file_name = REFTABLE_BUF_INIT;
int err;
memset(add, 0, sizeof(*add));
add->stack = st;
err = flock_acquire(&add->tables_list_lock, st->list_file,
st->opts.lock_timeout_ms);
if (err < 0) {
if (errno == EEXIST) {
err = REFTABLE_LOCK_ERROR;
} else {
err = REFTABLE_IO_ERROR;
}
if (err < 0)
goto done;
}
if (st->opts.default_permissions) {
if (chmod(add->tables_list_lock.path,
st->opts.default_permissions) < 0) {
@@ -754,24 +730,54 @@ done:
return err;
}
static void reftable_addition_close(struct reftable_addition *add)
static int stack_try_add(struct reftable_stack *st,
int (*write_table)(struct reftable_writer *wr,
void *arg),
void *arg, unsigned flags)
{
struct reftable_buf nm = REFTABLE_BUF_INIT;
size_t i;
struct reftable_addition add;
int err;
for (i = 0; i < add->new_tables_len; i++) {
if (!stack_filename(&nm, add->stack, add->new_tables[i]))
unlink(nm.buf);
reftable_free(add->new_tables[i]);
add->new_tables[i] = NULL;
err = reftable_stack_init_addition(&add, st, flags);
if (err < 0)
goto done;
err = reftable_addition_add(&add, write_table, arg);
if (err < 0)
goto done;
err = reftable_addition_commit(&add);
done:
reftable_addition_close(&add);
return err;
}
int reftable_stack_add(struct reftable_stack *st,
int (*write)(struct reftable_writer *wr, void *arg),
void *arg, unsigned flags)
{
int err = stack_try_add(st, write, arg, flags);
if (err < 0) {
if (err == REFTABLE_OUTDATED_ERROR) {
/* Ignore error return, we want to propagate
REFTABLE_OUTDATED_ERROR.
*/
reftable_stack_reload(st);
}
return err;
}
reftable_free(add->new_tables);
add->new_tables = NULL;
add->new_tables_len = 0;
add->new_tables_cap = 0;
flock_release(&add->tables_list_lock);
reftable_buf_release(&nm);
return 0;
}
static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max)
{
char buf[100];
uint32_t rnd = reftable_rand();
snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
min, max, rnd);
reftable_buf_reset(dest);
return reftable_buf_addstr(dest, buf);
}
void reftable_addition_destroy(struct reftable_addition *add)
@@ -841,10 +847,13 @@ int reftable_addition_commit(struct reftable_addition *add)
* control. It is possible that a concurrent writer is already
* trying to compact parts of the stack, which would lead to a
* `REFTABLE_LOCK_ERROR` because parts of the stack are locked
* already. This is a benign error though, so we ignore it.
* already. Similarly, the stack may have been rewritten by a
* concurrent writer, which causes `REFTABLE_OUTDATED_ERROR`.
* Both of these errors are benign, so we simply ignore them.
*/
err = reftable_stack_auto_compact(add->stack);
if (err < 0 && err != REFTABLE_LOCK_ERROR)
if (err < 0 && err != REFTABLE_LOCK_ERROR &&
err != REFTABLE_OUTDATED_ERROR)
goto done;
err = 0;
}
@@ -858,39 +867,18 @@ int reftable_stack_new_addition(struct reftable_addition **dest,
struct reftable_stack *st,
unsigned int flags)
{
int err = 0;
struct reftable_addition empty = REFTABLE_ADDITION_INIT;
int err;
REFTABLE_CALLOC_ARRAY(*dest, 1);
if (!*dest)
return REFTABLE_OUT_OF_MEMORY_ERROR;
**dest = empty;
err = reftable_stack_init_addition(*dest, st, flags);
if (err) {
reftable_free(*dest);
*dest = NULL;
}
return err;
}
static int stack_try_add(struct reftable_stack *st,
int (*write_table)(struct reftable_writer *wr,
void *arg),
void *arg)
{
struct reftable_addition add = REFTABLE_ADDITION_INIT;
int err = reftable_stack_init_addition(&add, st, 0);
if (err < 0)
goto done;
err = reftable_addition_add(&add, write_table, arg);
if (err < 0)
goto done;
err = reftable_addition_commit(&add);
done:
reftable_addition_close(&add);
return err;
}
@@ -1007,72 +995,6 @@ uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
return 1;
}
static int stack_compact_locked(struct reftable_stack *st,
size_t first, size_t last,
struct reftable_log_expiry_config *config,
struct reftable_tmpfile *tab_file_out)
{
struct reftable_buf next_name = REFTABLE_BUF_INIT;
struct reftable_buf tab_file_path = REFTABLE_BUF_INIT;
struct reftable_writer *wr = NULL;
struct fd_writer writer= {
.opts = &st->opts,
};
struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT;
int err = 0;
err = format_name(&next_name, reftable_table_min_update_index(st->tables[first]),
reftable_table_max_update_index(st->tables[last]));
if (err < 0)
goto done;
err = stack_filename(&tab_file_path, st, next_name.buf);
if (err < 0)
goto done;
err = reftable_buf_addstr(&tab_file_path, ".temp.XXXXXX");
if (err < 0)
goto done;
err = tmpfile_from_pattern(&tab_file, tab_file_path.buf);
if (err < 0)
goto done;
if (st->opts.default_permissions &&
chmod(tab_file.path, st->opts.default_permissions) < 0) {
err = REFTABLE_IO_ERROR;
goto done;
}
writer.fd = tab_file.fd;
err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush,
&writer, &st->opts);
if (err < 0)
goto done;
err = stack_write_compact(st, wr, first, last, config);
if (err < 0)
goto done;
err = reftable_writer_close(wr);
if (err < 0)
goto done;
err = tmpfile_close(&tab_file);
if (err < 0)
goto done;
*tab_file_out = tab_file;
tab_file = REFTABLE_TMPFILE_INIT;
done:
tmpfile_delete(&tab_file);
reftable_writer_free(wr);
reftable_buf_release(&next_name);
reftable_buf_release(&tab_file_path);
return err;
}
static int stack_write_compact(struct reftable_stack *st,
struct reftable_writer *wr,
size_t first, size_t last,
@@ -1172,6 +1094,72 @@ done:
return err;
}
static int stack_compact_locked(struct reftable_stack *st,
size_t first, size_t last,
struct reftable_log_expiry_config *config,
struct reftable_tmpfile *tab_file_out)
{
struct reftable_buf next_name = REFTABLE_BUF_INIT;
struct reftable_buf tab_file_path = REFTABLE_BUF_INIT;
struct reftable_writer *wr = NULL;
struct fd_writer writer= {
.opts = &st->opts,
};
struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT;
int err = 0;
err = format_name(&next_name, reftable_table_min_update_index(st->tables[first]),
reftable_table_max_update_index(st->tables[last]));
if (err < 0)
goto done;
err = stack_filename(&tab_file_path, st, next_name.buf);
if (err < 0)
goto done;
err = reftable_buf_addstr(&tab_file_path, ".temp.XXXXXX");
if (err < 0)
goto done;
err = tmpfile_from_pattern(&tab_file, tab_file_path.buf);
if (err < 0)
goto done;
if (st->opts.default_permissions &&
chmod(tab_file.path, st->opts.default_permissions) < 0) {
err = REFTABLE_IO_ERROR;
goto done;
}
writer.fd = tab_file.fd;
err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush,
&writer, &st->opts);
if (err < 0)
goto done;
err = stack_write_compact(st, wr, first, last, config);
if (err < 0)
goto done;
err = reftable_writer_close(wr);
if (err < 0)
goto done;
err = tmpfile_close(&tab_file);
if (err < 0)
goto done;
*tab_file_out = tab_file;
tab_file = REFTABLE_TMPFILE_INIT;
done:
tmpfile_delete(&tab_file);
reftable_writer_free(wr);
reftable_buf_release(&next_name);
reftable_buf_release(&tab_file_path);
return err;
}
enum stack_compact_range_flags {
/*
* Perform a best-effort compaction. That is, even if we cannot lock
@@ -1219,18 +1207,28 @@ static int stack_compact_range(struct reftable_stack *st,
* which are part of the user-specified range.
*/
err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms);
if (err < 0) {
if (errno == EEXIST)
err = REFTABLE_LOCK_ERROR;
else
err = REFTABLE_IO_ERROR;
if (err < 0)
goto done;
/*
* Check whether the stack is up-to-date. We unfortunately cannot
* handle the situation gracefully in case it's _not_ up-to-date
* because the range of tables that the user has requested us to
* compact may have been changed. So instead we abort.
*
* We could in theory improve the situation by having the caller not
* pass in a range, but instead the list of tables to compact. If so,
* we could check that relevant tables still exist. But for now it's
* good enough to just abort.
*/
err = stack_uptodate(st);
if (err < 0)
goto done;
if (err > 0) {
err = REFTABLE_OUTDATED_ERROR;
goto done;
}
err = stack_uptodate(st);
if (err)
goto done;
/*
* Lock all tables in the user-provided range. This is the slice of our
* stack which we'll compact.
@@ -1264,7 +1262,7 @@ static int stack_compact_range(struct reftable_stack *st,
* tables, otherwise there would be nothing to compact.
* In that case, we return a lock error to our caller.
*/
if (errno == EEXIST && last - (i - 1) >= 2 &&
if (err == REFTABLE_LOCK_ERROR && last - (i - 1) >= 2 &&
flags & STACK_COMPACT_RANGE_BEST_EFFORT) {
err = 0;
/*
@@ -1276,13 +1274,9 @@ static int stack_compact_range(struct reftable_stack *st,
*/
first = (i - 1) + 1;
break;
} else if (errno == EEXIST) {
err = REFTABLE_LOCK_ERROR;
goto done;
} else {
err = REFTABLE_IO_ERROR;
goto done;
}
goto done;
}
/*
@@ -1291,10 +1285,8 @@ static int stack_compact_range(struct reftable_stack *st,
* of tables.
*/
err = flock_close(&table_locks[nlocks++]);
if (err < 0) {
err = REFTABLE_IO_ERROR;
if (err < 0)
goto done;
}
}
/*
@@ -1326,13 +1318,8 @@ static int stack_compact_range(struct reftable_stack *st,
* the new table.
*/
err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms);
if (err < 0) {
if (errno == EEXIST)
err = REFTABLE_LOCK_ERROR;
else
err = REFTABLE_IO_ERROR;
if (err < 0)
goto done;
}
if (st->opts.default_permissions) {
if (chmod(tables_list_lock.path,

View File

@@ -72,7 +72,7 @@ int flock_acquire(struct reftable_flock *l, const char *target_path,
reftable_free(lockfile);
if (errno == EEXIST)
return REFTABLE_LOCK_ERROR;
return -1;
return REFTABLE_IO_ERROR;
}
l->fd = get_lock_file_fd(lockfile);

View File

@@ -81,7 +81,9 @@ struct reftable_flock {
* to acquire the lock. If `timeout_ms` is 0 we don't wait, if it is negative
* we block indefinitely.
*
* Retrun 0 on success, a reftable error code on error.
* Retrun 0 on success, a reftable error code on error. Specifically,
* `REFTABLE_LOCK_ERROR` should be returned in case the target path is already
* locked.
*/
int flock_acquire(struct reftable_flock *l, const char *target_path,
long timeout_ms);

View File

@@ -395,14 +395,16 @@ out:
}
int reftable_writer_add_refs(struct reftable_writer *w,
struct reftable_ref_record *refs, int n)
struct reftable_ref_record *refs, size_t n)
{
int err = 0;
int i = 0;
QSORT(refs, n, reftable_ref_record_compare_name);
for (i = 0; err == 0 && i < n; i++) {
if (n)
qsort(refs, n, sizeof(*refs), reftable_ref_record_compare_name);
for (size_t i = 0; err == 0 && i < n; i++)
err = reftable_writer_add_ref(w, &refs[i]);
}
return err;
}
@@ -486,15 +488,16 @@ done:
}
int reftable_writer_add_logs(struct reftable_writer *w,
struct reftable_log_record *logs, int n)
struct reftable_log_record *logs, size_t n)
{
int err = 0;
int i = 0;
QSORT(logs, n, reftable_log_record_compare_key);
for (i = 0; err == 0 && i < n; i++) {
if (n)
qsort(logs, n, sizeof(*logs), reftable_log_record_compare_key);
for (size_t i = 0; err == 0 && i < n; i++)
err = reftable_writer_add_log(w, &logs[i]);
}
return err;
}

View File

@@ -128,7 +128,7 @@ static void write_n_ref_tables(struct reftable_stack *st,
cl_reftable_set_hash(ref.value.val1, i, REFTABLE_HASH_SHA1);
cl_assert_equal_i(reftable_stack_add(st,
&write_test_ref, &ref), 0);
&write_test_ref, &ref, 0), 0);
}
st->opts.disable_auto_compact = disable_auto_compact;
@@ -171,7 +171,7 @@ void test_reftable_stack__add_one(void)
err = reftable_new_stack(&st, dir, &opts);
cl_assert(!err);
err = reftable_stack_add(st, write_test_ref, &ref);
err = reftable_stack_add(st, write_test_ref, &ref, 0);
cl_assert(!err);
err = reftable_stack_read_ref(st, ref.refname, &dest);
@@ -235,12 +235,12 @@ void test_reftable_stack__uptodate(void)
cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0);
cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0);
cl_assert_equal_i(reftable_stack_add(st1, write_test_ref,
&ref1), 0);
&ref1, 0), 0);
cl_assert_equal_i(reftable_stack_add(st2, write_test_ref,
&ref2), REFTABLE_OUTDATED_ERROR);
&ref2, 0), REFTABLE_OUTDATED_ERROR);
cl_assert_equal_i(reftable_stack_reload(st2), 0);
cl_assert_equal_i(reftable_stack_add(st2, write_test_ref,
&ref2), 0);
&ref2, 0), 0);
reftable_stack_destroy(st1);
reftable_stack_destroy(st2);
clear_dir(dir);
@@ -406,7 +406,7 @@ void test_reftable_stack__auto_compaction_fails_gracefully(void)
cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&ref), 0);
&ref, 0), 0);
cl_assert_equal_i(st->merged->tables_len, 1);
cl_assert_equal_i(st->stats.attempts, 0);
cl_assert_equal_i(st->stats.failures, 0);
@@ -424,7 +424,7 @@ void test_reftable_stack__auto_compaction_fails_gracefully(void)
write_file_buf(table_path.buf, "", 0);
ref.update_index = 2;
err = reftable_stack_add(st, write_test_ref, &ref);
err = reftable_stack_add(st, write_test_ref, &ref, 0);
cl_assert(!err);
cl_assert_equal_i(st->merged->tables_len, 2);
cl_assert_equal_i(st->stats.attempts, 1);
@@ -460,9 +460,9 @@ void test_reftable_stack__update_index_check(void)
cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&ref1), 0);
&ref1, 0), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&ref2), REFTABLE_API_ERROR);
&ref2, 0), REFTABLE_API_ERROR);
reftable_stack_destroy(st);
clear_dir(dir);
}
@@ -477,7 +477,7 @@ void test_reftable_stack__lock_failure(void)
cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--)
cl_assert_equal_i(reftable_stack_add(st, write_error,
&i), i);
&i, 0), i);
reftable_stack_destroy(st);
clear_dir(dir);
@@ -521,7 +521,7 @@ void test_reftable_stack__add(void)
for (i = 0; i < N; i++)
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&refs[i]), 0);
&refs[i], 0), 0);
for (i = 0; i < N; i++) {
struct write_log_arg arg = {
@@ -529,7 +529,7 @@ void test_reftable_stack__add(void)
.update_index = reftable_stack_next_update_index(st),
};
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg), 0);
&arg, 0), 0);
}
cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0);
@@ -612,8 +612,8 @@ void test_reftable_stack__iterator(void)
}
for (i = 0; i < N; i++)
cl_assert_equal_i(reftable_stack_add(st,
write_test_ref, &refs[i]), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&refs[i], 0), 0);
for (i = 0; i < N; i++) {
struct write_log_arg arg = {
@@ -621,8 +621,8 @@ void test_reftable_stack__iterator(void)
.update_index = reftable_stack_next_update_index(st),
};
cl_assert_equal_i(reftable_stack_add(st,
write_test_log, &arg), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg, 0), 0);
}
reftable_stack_init_ref_iterator(st, &it);
@@ -697,11 +697,11 @@ void test_reftable_stack__log_normalize(void)
input.value.update.message = (char *) "one\ntwo";
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg), REFTABLE_API_ERROR);
&arg, 0), REFTABLE_API_ERROR);
input.value.update.message = (char *) "one";
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg), 0);
&arg, 0), 0);
cl_assert_equal_i(reftable_stack_read_log(st, input.refname,
&dest), 0);
cl_assert_equal_s(dest.value.update.message, "one\n");
@@ -709,7 +709,7 @@ void test_reftable_stack__log_normalize(void)
input.value.update.message = (char *) "two\n";
arg.update_index = 2;
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg), 0);
&arg, 0), 0);
cl_assert_equal_i(reftable_stack_read_log(st, input.refname,
&dest), 0);
cl_assert_equal_s(dest.value.update.message, "two\n");
@@ -759,15 +759,16 @@ void test_reftable_stack__tombstone(void)
}
}
for (i = 0; i < N; i++)
cl_assert_equal_i(reftable_stack_add(st, write_test_ref, &refs[i]), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&refs[i], 0), 0);
for (i = 0; i < N; i++) {
struct write_log_arg arg = {
.log = &logs[i],
.update_index = reftable_stack_next_update_index(st),
};
cl_assert_equal_i(reftable_stack_add(st,
write_test_log, &arg), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg, 0), 0);
}
cl_assert_equal_i(reftable_stack_read_ref(st, "branch",
@@ -815,7 +816,7 @@ void test_reftable_stack__hash_id(void)
cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&ref), 0);
&ref, 0), 0);
/* can't read it with the wrong hash ID. */
cl_assert_equal_i(reftable_new_stack(&st32, dir,
@@ -884,7 +885,7 @@ void test_reftable_stack__reflog_expire(void)
.update_index = reftable_stack_next_update_index(st),
};
cl_assert_equal_i(reftable_stack_add(st, write_test_log,
&arg), 0);
&arg, 0), 0);
}
cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0);
@@ -924,7 +925,7 @@ void test_reftable_stack__empty_add(void)
cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0);
cl_assert_equal_i(reftable_stack_add(st, write_nothing,
NULL), 0);
NULL, 0), 0);
cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0);
clear_dir(dir);
reftable_stack_destroy(st);
@@ -963,7 +964,7 @@ void test_reftable_stack__auto_compaction(void)
};
snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i);
err = reftable_stack_add(st, write_test_ref, &ref);
err = reftable_stack_add(st, write_test_ref, &ref, 0);
cl_assert(!err);
err = reftable_stack_auto_compact(st);
@@ -999,7 +1000,7 @@ void test_reftable_stack__auto_compaction_factor(void)
};
xsnprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i);
err = reftable_stack_add(st, &write_test_ref, &ref);
err = reftable_stack_add(st, &write_test_ref, &ref, 0);
cl_assert(!err);
cl_assert(i < 5 || st->merged->tables_len < 5 * fastlogN(i, 5));
@@ -1078,8 +1079,8 @@ void test_reftable_stack__add_performs_auto_compaction(void)
snprintf(buf, sizeof(buf), "branch-%04"PRIuMAX, (uintmax_t)i);
ref.refname = buf;
cl_assert_equal_i(reftable_stack_add(st,
write_test_ref, &ref), 0);
cl_assert_equal_i(reftable_stack_add(st, write_test_ref,
&ref, 0), 0);
/*
* The stack length should grow continuously for all runs where