17
#include "reftable-error.h"
20
static int writer_flush_block(struct reftable_writer *w);
23
static void writer_clear_index(struct reftable_writer *w);
26
static int writer_finish_public_section(struct reftable_writer *w);
28
static struct reftable_block_stats *
29
writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
33
return &w->stats.ref_stats;
35
return &w->stats.obj_stats;
37
return &w->stats.idx_stats;
39
return &w->stats.log_stats;
47
static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
51
if (w->pending_padding > 0) {
52
uint8_t *zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed));
53
int n = w->write(w->write_arg, zeroed, w->pending_padding);
57
w->pending_padding = 0;
58
reftable_free(zeroed);
61
w->pending_padding = padding;
62
n = w->write(w->write_arg, data, len);
69
static void options_set_defaults(struct reftable_write_options *opts)
71
if (opts->restart_interval == 0) {
72
opts->restart_interval = 16;
75
if (opts->hash_id == 0) {
76
opts->hash_id = GIT_SHA1_FORMAT_ID;
78
if (opts->block_size == 0) {
79
opts->block_size = DEFAULT_BLOCK_SIZE;
83
static int writer_version(struct reftable_writer *w)
85
return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
90
static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
92
memcpy(dest, "REFT", 4);
94
dest[4] = writer_version(w);
96
put_be24(dest + 5, w->opts.block_size);
97
put_be64(dest + 8, w->min_update_index);
98
put_be64(dest + 16, w->max_update_index);
99
if (writer_version(w) == 2) {
100
put_be32(dest + 24, w->opts.hash_id);
102
return header_size(writer_version(w));
105
static void writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
109
block_start = header_size(writer_version(w));
112
strbuf_reset(&w->last_key);
113
block_writer_init(&w->block_writer_data, typ, w->block,
114
w->opts.block_size, block_start,
115
hash_size(w->opts.hash_id));
116
w->block_writer = &w->block_writer_data;
117
w->block_writer->restart_interval = w->opts.restart_interval;
120
struct reftable_writer *
121
reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
122
int (*flush_func)(void *),
123
void *writer_arg, const struct reftable_write_options *_opts)
125
struct reftable_writer *wp = reftable_calloc(1, sizeof(*wp));
126
struct reftable_write_options opts = {0};
130
options_set_defaults(&opts);
131
if (opts.block_size >= (1 << 24))
132
BUG("configured block size exceeds 16MB");
134
strbuf_init(&wp->block_writer_data.last_key, 0);
135
strbuf_init(&wp->last_key, 0);
136
REFTABLE_CALLOC_ARRAY(wp->block, opts.block_size);
137
wp->write = writer_func;
138
wp->write_arg = writer_arg;
140
wp->flush = flush_func;
141
writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
146
void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
149
w->min_update_index = min;
150
w->max_update_index = max;
153
static void writer_release(struct reftable_writer *w)
156
reftable_free(w->block);
158
block_writer_release(&w->block_writer_data);
159
w->block_writer = NULL;
160
writer_clear_index(w);
161
strbuf_release(&w->last_key);
165
void reftable_writer_free(struct reftable_writer *w)
171
struct obj_index_tree_node {
178
#define OBJ_INDEX_TREE_NODE_INIT \
180
.hash = STRBUF_INIT \
183
static int obj_index_tree_node_compare(const void *a, const void *b)
185
return strbuf_cmp(&((const struct obj_index_tree_node *)a)->hash,
186
&((const struct obj_index_tree_node *)b)->hash);
189
static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
191
uint64_t off = w->next;
193
struct obj_index_tree_node want = { .hash = *hash };
195
struct tree_node *node = tree_search(&want, &w->obj_index_tree,
196
&obj_index_tree_node_compare, 0);
197
struct obj_index_tree_node *key = NULL;
199
struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
200
key = reftable_malloc(sizeof(struct obj_index_tree_node));
203
strbuf_reset(&key->hash);
204
strbuf_addbuf(&key->hash, hash);
205
tree_search((void *)key, &w->obj_index_tree,
206
&obj_index_tree_node_compare, 1);
211
if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) {
215
REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
216
key->offsets[key->offset_len++] = off;
219
static int writer_add_record(struct reftable_writer *w,
220
struct reftable_record *rec)
222
struct strbuf key = STRBUF_INIT;
225
reftable_record_key(rec, &key);
226
if (strbuf_cmp(&w->last_key, &key) >= 0) {
227
err = REFTABLE_API_ERROR;
231
strbuf_reset(&w->last_key);
232
strbuf_addbuf(&w->last_key, &key);
233
if (!w->block_writer)
234
writer_reinit_block_writer(w, reftable_record_type(rec));
236
if (block_writer_type(w->block_writer) != reftable_record_type(rec))
237
BUG("record of type %d added to writer of type %d",
238
reftable_record_type(rec), block_writer_type(w->block_writer));
245
if (!block_writer_add(w->block_writer, rec)) {
254
err = writer_flush_block(w);
257
writer_reinit_block_writer(w, reftable_record_type(rec));
267
err = block_writer_add(w->block_writer, rec);
269
err = REFTABLE_ENTRY_TOO_BIG_ERROR;
274
strbuf_release(&key);
278
int reftable_writer_add_ref(struct reftable_writer *w,
279
struct reftable_ref_record *ref)
281
struct reftable_record rec = {
282
.type = BLOCK_TYPE_REF,
290
return REFTABLE_API_ERROR;
291
if (ref->update_index < w->min_update_index ||
292
ref->update_index > w->max_update_index)
293
return REFTABLE_API_ERROR;
295
rec.u.ref.update_index -= w->min_update_index;
297
err = writer_add_record(w, &rec);
301
if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
302
struct strbuf h = STRBUF_INIT;
303
strbuf_add(&h, (char *)reftable_ref_record_val1(ref),
304
hash_size(w->opts.hash_id));
305
writer_index_hash(w, &h);
309
if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
310
struct strbuf h = STRBUF_INIT;
311
strbuf_add(&h, reftable_ref_record_val2(ref),
312
hash_size(w->opts.hash_id));
313
writer_index_hash(w, &h);
319
int reftable_writer_add_refs(struct reftable_writer *w,
320
struct reftable_ref_record *refs, int n)
324
QSORT(refs, n, reftable_ref_record_compare_name);
325
for (i = 0; err == 0 && i < n; i++) {
326
err = reftable_writer_add_ref(w, &refs[i]);
331
static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
332
struct reftable_log_record *log)
334
struct reftable_record rec = {
335
.type = BLOCK_TYPE_LOG,
340
if (w->block_writer &&
341
block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
342
int err = writer_finish_public_section(w);
347
w->next -= w->pending_padding;
348
w->pending_padding = 0;
349
return writer_add_record(w, &rec);
352
int reftable_writer_add_log(struct reftable_writer *w,
353
struct reftable_log_record *log)
355
char *input_log_message = NULL;
356
struct strbuf cleaned_message = STRBUF_INIT;
359
if (log->value_type == REFTABLE_LOG_DELETION)
360
return reftable_writer_add_log_verbatim(w, log);
363
return REFTABLE_API_ERROR;
365
input_log_message = log->value.update.message;
366
if (!w->opts.exact_log_message && log->value.update.message) {
367
strbuf_addstr(&cleaned_message, log->value.update.message);
368
while (cleaned_message.len &&
369
cleaned_message.buf[cleaned_message.len - 1] == '\n')
370
strbuf_setlen(&cleaned_message,
371
cleaned_message.len - 1);
372
if (strchr(cleaned_message.buf, '\n')) {
374
err = REFTABLE_API_ERROR;
377
strbuf_addstr(&cleaned_message, "\n");
378
log->value.update.message = cleaned_message.buf;
381
err = reftable_writer_add_log_verbatim(w, log);
382
log->value.update.message = input_log_message;
384
strbuf_release(&cleaned_message);
388
int reftable_writer_add_logs(struct reftable_writer *w,
389
struct reftable_log_record *logs, int n)
393
QSORT(logs, n, reftable_log_record_compare_key);
395
for (i = 0; err == 0 && i < n; i++) {
396
err = reftable_writer_add_log(w, &logs[i]);
401
static int writer_finish_section(struct reftable_writer *w)
403
struct reftable_block_stats *bstats = NULL;
404
uint8_t typ = block_writer_type(w->block_writer);
405
uint64_t index_start = 0;
407
size_t threshold = w->opts.unpadded ? 1 : 3;
408
int before_blocks = w->stats.idx_stats.blocks;
411
err = writer_flush_block(w);
433
while (w->index_len > threshold) {
434
struct reftable_index_record *idx = NULL;
438
index_start = w->next;
439
writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
442
idx_len = w->index_len;
447
for (i = 0; i < idx_len; i++) {
448
struct reftable_record rec = {
449
.type = BLOCK_TYPE_INDEX,
455
err = writer_add_record(w, &rec);
460
err = writer_flush_block(w);
464
for (i = 0; i < idx_len; i++)
465
strbuf_release(&idx[i].last_key);
474
writer_clear_index(w);
476
bstats = writer_reftable_block_stats(w, typ);
477
bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
478
bstats->index_offset = index_start;
479
bstats->max_index_level = max_level;
482
strbuf_reset(&w->last_key);
487
struct common_prefix_arg {
492
static void update_common(void *void_arg, void *key)
494
struct common_prefix_arg *arg = void_arg;
495
struct obj_index_tree_node *entry = key;
497
int n = common_prefix_size(&entry->hash, arg->last);
502
arg->last = &entry->hash;
505
struct write_record_arg {
506
struct reftable_writer *w;
510
static void write_object_record(void *void_arg, void *key)
512
struct write_record_arg *arg = void_arg;
513
struct obj_index_tree_node *entry = key;
514
struct reftable_record
515
rec = { .type = BLOCK_TYPE_OBJ,
517
.hash_prefix = (uint8_t *)entry->hash.buf,
518
.hash_prefix_len = arg->w->stats.object_id_len,
519
.offsets = entry->offsets,
520
.offset_len = entry->offset_len,
525
arg->err = block_writer_add(arg->w->block_writer, &rec);
529
arg->err = writer_flush_block(arg->w);
533
writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
534
arg->err = block_writer_add(arg->w->block_writer, &rec);
538
rec.u.obj.offset_len = 0;
539
arg->err = block_writer_add(arg->w->block_writer, &rec);
542
assert(arg->err == 0);
547
static void object_record_free(void *void_arg UNUSED, void *key)
549
struct obj_index_tree_node *entry = key;
551
FREE_AND_NULL(entry->offsets);
552
strbuf_release(&entry->hash);
553
reftable_free(entry);
556
static int writer_dump_object_index(struct reftable_writer *w)
558
struct write_record_arg closure = { .w = w };
559
struct common_prefix_arg common = {
562
if (w->obj_index_tree) {
563
infix_walk(w->obj_index_tree, &update_common, &common);
565
w->stats.object_id_len = common.max + 1;
567
writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
569
if (w->obj_index_tree) {
570
infix_walk(w->obj_index_tree, &write_object_record, &closure);
575
return writer_finish_section(w);
578
static int writer_finish_public_section(struct reftable_writer *w)
583
if (!w->block_writer)
586
typ = block_writer_type(w->block_writer);
587
err = writer_finish_section(w);
590
if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
591
w->stats.ref_stats.index_blocks > 0) {
592
err = writer_dump_object_index(w);
597
if (w->obj_index_tree) {
598
infix_walk(w->obj_index_tree, &object_record_free, NULL);
599
tree_free(w->obj_index_tree);
600
w->obj_index_tree = NULL;
603
w->block_writer = NULL;
607
int reftable_writer_close(struct reftable_writer *w)
611
int err = writer_finish_public_section(w);
612
int empty_table = w->next == 0;
615
w->pending_padding = 0;
619
int n = writer_write_header(w, header);
620
err = padded_write(w, header, n, 0);
625
p += writer_write_header(w, footer);
626
put_be64(p, w->stats.ref_stats.index_offset);
628
put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
630
put_be64(p, w->stats.obj_stats.index_offset);
633
put_be64(p, w->stats.log_stats.offset);
635
put_be64(p, w->stats.log_stats.index_offset);
638
put_be32(p, crc32(0, footer, p - footer));
641
err = w->flush(w->write_arg);
643
err = REFTABLE_IO_ERROR;
647
err = padded_write(w, footer, footer_size(writer_version(w)), 0);
652
err = REFTABLE_EMPTY_TABLE_ERROR;
661
static void writer_clear_index(struct reftable_writer *w)
663
for (size_t i = 0; w->index && i < w->index_len; i++)
664
strbuf_release(&w->index[i].last_key);
665
FREE_AND_NULL(w->index);
670
static int writer_flush_nonempty_block(struct reftable_writer *w)
672
struct reftable_index_record index_record = {
673
.last_key = STRBUF_INIT,
675
uint8_t typ = block_writer_type(w->block_writer);
676
struct reftable_block_stats *bstats;
677
int raw_bytes, padding = 0, err;
678
uint64_t block_typ_off;
687
raw_bytes = block_writer_finish(w->block_writer);
695
if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG)
696
padding = w->opts.block_size - raw_bytes;
698
bstats = writer_reftable_block_stats(w, typ);
699
block_typ_off = (bstats->blocks == 0) ? w->next : 0;
700
if (block_typ_off > 0)
701
bstats->offset = block_typ_off;
702
bstats->entries += w->block_writer->entries;
703
bstats->restarts += w->block_writer->restart_len;
712
writer_write_header(w, w->block);
714
err = padded_write(w, w->block, raw_bytes, padding);
728
REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
729
index_record.offset = w->next;
730
strbuf_reset(&index_record.last_key);
731
strbuf_addbuf(&index_record.last_key, &w->block_writer->last_key);
732
w->index[w->index_len] = index_record;
735
w->next += padding + raw_bytes;
736
w->block_writer = NULL;
741
static int writer_flush_block(struct reftable_writer *w)
743
if (!w->block_writer)
745
if (w->block_writer->entries == 0)
747
return writer_flush_nonempty_block(w);
750
const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w)