diff options
Diffstat (limited to 'libbcachefs/move.c')
-rw-r--r-- | libbcachefs/move.c | 812 |
1 files changed, 225 insertions, 587 deletions
diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 36d20dc8..093efb09 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -1,14 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" -#include "alloc_background.h" #include "alloc_foreground.h" #include "backpointers.h" #include "bkey_buf.h" #include "btree_gc.h" #include "btree_update.h" #include "btree_update_interior.h" -#include "buckets.h" #include "disk_groups.h" #include "ec.h" #include "error.h" @@ -17,7 +15,6 @@ #include "journal_reclaim.h" #include "move.h" #include "replicas.h" -#include "subvolume.h" #include "super-io.h" #include "keylist.h" @@ -26,7 +23,19 @@ #include <trace/events/bcachefs.h> -#define SECTORS_IN_FLIGHT_PER_DEVICE 2048 +static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats) +{ + mutex_lock(&c->data_progress_lock); + list_add(&stats->list, &c->data_progress_list); + mutex_unlock(&c->data_progress_lock); +} + +static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats) +{ + mutex_lock(&c->data_progress_lock); + list_del(&stats->list); + mutex_unlock(&c->data_progress_lock); +} struct moving_io { struct list_head list; @@ -38,414 +47,30 @@ struct moving_io { struct bch_read_bio rbio; - struct migrate_write write; + struct data_update write; /* Must be last since it is variable size */ struct bio_vec bi_inline_vecs[0]; }; -struct moving_context { - /* Closure for waiting on all reads and writes to complete */ - struct closure cl; - - struct bch_move_stats *stats; - - struct list_head reads; - - /* in flight sectors: */ - atomic_t read_sectors; - atomic_t write_sectors; - - wait_queue_head_t wait; -}; - -static int insert_snapshot_whiteouts(struct btree_trans *trans, - enum btree_id id, - struct bpos old_pos, - struct bpos new_pos) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter, update_iter; - struct bkey_s_c k; - struct snapshots_seen s; - int ret; - - if (!btree_type_has_snapshots(id)) - return 0; - - snapshots_seen_init(&s); - - if (!bkey_cmp(old_pos, new_pos)) - return 0; - - if (!snapshot_t(c, old_pos.snapshot)->children[0]) - return 0; - - bch2_trans_iter_init(trans, &iter, id, old_pos, - BTREE_ITER_NOT_EXTENTS| - BTREE_ITER_ALL_SNAPSHOTS); - while (1) { -next: - k = bch2_btree_iter_prev(&iter); - ret = bkey_err(k); - if (ret) - break; - - if (bkey_cmp(old_pos, k.k->p)) - break; - - if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) { - struct bkey_i *update; - u32 *i; - - darray_for_each(s.ids, i) - if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, *i)) - goto next; - - update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i)); - - ret = PTR_ERR_OR_ZERO(update); - if (ret) - break; - - bkey_init(&update->k); - update->k.p = new_pos; - update->k.p.snapshot = k.k->p.snapshot; - - bch2_trans_iter_init(trans, &update_iter, id, update->k.p, - BTREE_ITER_NOT_EXTENTS| - BTREE_ITER_ALL_SNAPSHOTS| - BTREE_ITER_INTENT); - ret = bch2_btree_iter_traverse(&update_iter) ?: - bch2_trans_update(trans, &update_iter, update, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); - bch2_trans_iter_exit(trans, &update_iter); - if (ret) - break; - - ret = snapshots_seen_add(c, &s, k.k->p.snapshot); - if (ret) - break; - } - } - bch2_trans_iter_exit(trans, &iter); - darray_exit(&s.ids); - - return ret; -} - -static int bch2_migrate_index_update(struct bch_write_op *op) -{ - struct bch_fs *c = op->c; - struct btree_trans trans; - struct btree_iter iter; - struct migrate_write *m = - container_of(op, struct migrate_write, op); - struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets); - struct keylist *keys = &op->insert_keys; - struct bkey_buf _new, _insert; - int ret = 0; - - bch2_bkey_buf_init(&_new); - bch2_bkey_buf_init(&_insert); - bch2_bkey_buf_realloc(&_insert, c, U8_MAX); - - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024); - - bch2_trans_iter_init(&trans, &iter, m->btree_id, - bkey_start_pos(&bch2_keylist_front(keys)->k), - BTREE_ITER_SLOTS|BTREE_ITER_INTENT); - - while (1) { - struct bkey_s_c k; - struct bkey_i *insert; - struct bkey_i_extent *new; - const union bch_extent_entry *entry; - struct extent_ptr_decoded p; - struct bpos next_pos; - bool did_work = false; - bool should_check_enospc; - s64 i_sectors_delta = 0, disk_sectors_delta = 0; - - bch2_trans_begin(&trans); - - k = bch2_btree_iter_peek_slot(&iter); - ret = bkey_err(k); - if (ret) - goto err; - - new = bkey_i_to_extent(bch2_keylist_front(keys)); - - if (bversion_cmp(k.k->version, new->k.version) || - !bch2_bkey_matches_ptr(c, k, m->ptr, m->offset)) - goto nomatch; - - bkey_reassemble(_insert.k, k); - insert = _insert.k; - - bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys)); - new = bkey_i_to_extent(_new.k); - bch2_cut_front(iter.pos, &new->k_i); - - bch2_cut_front(iter.pos, insert); - bch2_cut_back(new->k.p, insert); - bch2_cut_back(insert->k.p, &new->k_i); - - if (m->data_cmd == DATA_REWRITE) { - struct bch_extent_ptr *new_ptr, *old_ptr = (void *) - bch2_bkey_has_device(bkey_i_to_s_c(insert), - m->data_opts.rewrite_dev); - if (!old_ptr) - goto nomatch; - - if (old_ptr->cached) - extent_for_each_ptr(extent_i_to_s(new), new_ptr) - new_ptr->cached = true; - - __bch2_bkey_drop_ptr(bkey_i_to_s(insert), old_ptr); - } - - extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) { - if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) { - /* - * raced with another move op? extent already - * has a pointer to the device we just wrote - * data to - */ - continue; - } - - bch2_extent_ptr_decoded_append(insert, &p); - did_work = true; - } - - if (!did_work) - goto nomatch; - - bch2_bkey_narrow_crcs(insert, - (struct bch_extent_crc_unpacked) { 0 }); - bch2_extent_normalize(c, bkey_i_to_s(insert)); - bch2_bkey_mark_replicas_cached(c, bkey_i_to_s(insert), - op->opts.background_target, - op->opts.data_replicas); - - ret = bch2_sum_sector_overwrites(&trans, &iter, insert, - &should_check_enospc, - &i_sectors_delta, - &disk_sectors_delta); - if (ret) - goto err; - - if (disk_sectors_delta > (s64) op->res.sectors) { - ret = bch2_disk_reservation_add(c, &op->res, - disk_sectors_delta - op->res.sectors, - !should_check_enospc - ? BCH_DISK_RESERVATION_NOFAIL : 0); - if (ret) - goto out; - } - - next_pos = insert->k.p; - - ret = insert_snapshot_whiteouts(&trans, m->btree_id, - k.k->p, insert->k.p) ?: - bch2_trans_update(&trans, &iter, insert, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: - bch2_trans_commit(&trans, &op->res, - op_journal_seq(op), - BTREE_INSERT_NOFAIL| - m->data_opts.btree_insert_flags); - if (!ret) { - bch2_btree_iter_set_pos(&iter, next_pos); - atomic_long_inc(&c->extent_migrate_done); - if (ec_ob) - bch2_ob_add_backpointer(c, ec_ob, &insert->k); - } -err: - if (ret == -EINTR) - ret = 0; - if (ret) - break; -next: - while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { - bch2_keylist_pop_front(keys); - if (bch2_keylist_empty(keys)) - goto out; - } - continue; -nomatch: - if (m->ctxt) { - BUG_ON(k.k->p.offset <= iter.pos.offset); - atomic64_inc(&m->ctxt->stats->keys_raced); - atomic64_add(k.k->p.offset - iter.pos.offset, - &m->ctxt->stats->sectors_raced); - } - atomic_long_inc(&c->extent_migrate_raced); - trace_move_race(&new->k); - bch2_btree_iter_advance(&iter); - goto next; - } -out: - bch2_trans_iter_exit(&trans, &iter); - bch2_trans_exit(&trans); - bch2_bkey_buf_exit(&_insert, c); - bch2_bkey_buf_exit(&_new, c); - BUG_ON(ret == -EINTR); - return ret; -} - -void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio) -{ - /* write bio must own pages: */ - BUG_ON(!m->op.wbio.bio.bi_vcnt); - - m->ptr = rbio->pick.ptr; - m->offset = rbio->data_pos.offset - rbio->pick.crc.offset; - m->op.devs_have = rbio->devs_have; - m->op.pos = rbio->data_pos; - m->op.version = rbio->version; - m->op.crc = rbio->pick.crc; - m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; - - if (m->data_cmd == DATA_REWRITE) - bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev); -} - -int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, - struct write_point_specifier wp, - struct bch_io_opts io_opts, - enum data_cmd data_cmd, - struct data_opts data_opts, - enum btree_id btree_id, - struct bkey_s_c k) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - struct bch_extent_crc_unpacked crc; - struct extent_ptr_decoded p; - int ret; - - m->btree_id = btree_id; - m->data_cmd = data_cmd; - m->data_opts = data_opts; - m->nr_ptrs_reserved = 0; - - bch2_write_op_init(&m->op, c, io_opts); - - if (!bch2_bkey_is_incompressible(k)) - m->op.compression_type = - bch2_compression_opt_to_type[io_opts.background_compression ?: - io_opts.compression]; - else - m->op.incompressible = true; - - m->op.target = data_opts.target, - m->op.write_point = wp; - - /* - * op->csum_type is normally initialized from the fs/file's current - * options - but if an extent is encrypted, we require that it stays - * encrypted: - */ - bkey_for_each_crc(k.k, ptrs, crc, entry) - if (bch2_csum_type_is_encryption(crc.csum_type)) { - m->op.nonce = crc.nonce + crc.offset; - m->op.csum_type = crc.csum_type; - break; - } - - if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) { - m->op.alloc_reserve = RESERVE_movinggc; - } else { - /* XXX: this should probably be passed in */ - m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; - } - - m->op.flags |= BCH_WRITE_PAGES_STABLE| - BCH_WRITE_PAGES_OWNED| - BCH_WRITE_DATA_ENCODED| - BCH_WRITE_FROM_INTERNAL; - - m->op.nr_replicas = data_opts.nr_replicas; - m->op.nr_replicas_required = data_opts.nr_replicas; - m->op.index_update_fn = bch2_migrate_index_update; - - switch (data_cmd) { - case DATA_ADD_REPLICAS: { - /* - * DATA_ADD_REPLICAS is used for moving data to a different - * device in the background, and due to compression the new copy - * might take up more space than the old copy: - */ -#if 0 - int nr = (int) io_opts.data_replicas - - bch2_bkey_nr_ptrs_allocated(k); -#endif - int nr = (int) io_opts.data_replicas; - - if (nr > 0) { - m->op.nr_replicas = m->nr_ptrs_reserved = nr; - - ret = bch2_disk_reservation_get(c, &m->op.res, - k.k->size, m->op.nr_replicas, 0); - if (ret) - return ret; - } - break; - } - case DATA_REWRITE: { - unsigned compressed_sectors = 0; - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) - if (p.ptr.dev == data_opts.rewrite_dev) { - if (p.ptr.cached) - m->op.flags |= BCH_WRITE_CACHED; - - if (!p.ptr.cached && - crc_is_compressed(p.crc)) - compressed_sectors += p.crc.compressed_size; - } - - if (compressed_sectors) { - ret = bch2_disk_reservation_add(c, &m->op.res, - k.k->size * m->op.nr_replicas, - BCH_DISK_RESERVATION_NOFAIL); - if (ret) - return ret; - } - break; - } - case DATA_PROMOTE: - m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; - m->op.flags |= BCH_WRITE_CACHED; - break; - default: - BUG(); - } - - return 0; -} - static void move_free(struct closure *cl) { struct moving_io *io = container_of(cl, struct moving_io, cl); struct moving_context *ctxt = io->write.ctxt; - struct bvec_iter_all iter; - struct bio_vec *bv; - - bch2_disk_reservation_put(io->write.op.c, &io->write.op.res); - - bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter) - if (bv->bv_page) - __free_page(bv->bv_page); + struct bch_fs *c = ctxt->c; + bch2_data_update_exit(&io->write); wake_up(&ctxt->wait); - + percpu_ref_put(&c->writes); kfree(io); } static void move_write_done(struct closure *cl) { struct moving_io *io = container_of(cl, struct moving_io, cl); + struct moving_context *ctxt = io->write.ctxt; + + if (io->write.op.error) + ctxt->write_error = true; atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); closure_return_with_destructor(cl, move_free); @@ -460,10 +85,9 @@ static void move_write(struct closure *cl) return; } - bch2_migrate_read_done(&io->write, &io->rbio); - atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); - closure_call(&io->write.op.cl, bch2_write, NULL, cl); + + bch2_data_update_read_done(&io->write, io->rbio.pick.crc, cl); continue_at(cl, move_write_done, NULL); } @@ -520,14 +144,55 @@ static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, atomic_read(&ctxt->write_sectors) != sectors_pending); } +void bch2_moving_ctxt_exit(struct moving_context *ctxt) +{ + move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); + closure_sync(&ctxt->cl); + progress_list_del(ctxt->c, ctxt->stats); + + EBUG_ON(atomic_read(&ctxt->write_sectors)); + + trace_move_data(ctxt->c, + atomic64_read(&ctxt->stats->sectors_moved), + atomic64_read(&ctxt->stats->keys_moved)); +} + +void bch2_moving_ctxt_init(struct moving_context *ctxt, + struct bch_fs *c, + struct bch_ratelimit *rate, + struct bch_move_stats *stats, + struct write_point_specifier wp, + bool wait_on_copygc) +{ + memset(ctxt, 0, sizeof(*ctxt)); + + ctxt->c = c; + ctxt->rate = rate; + ctxt->stats = stats; + ctxt->wp = wp; + ctxt->wait_on_copygc = wait_on_copygc; + + progress_list_add(c, stats); + closure_init_stack(&ctxt->cl); + INIT_LIST_HEAD(&ctxt->reads); + init_waitqueue_head(&ctxt->wait); + + if (stats) + stats->data_type = BCH_DATA_user; +} + +void bch_move_stats_init(struct bch_move_stats *stats, char *name) +{ + memset(stats, 0, sizeof(*stats)); + scnprintf(stats->name, sizeof(stats->name), "%s", name); +} + static int bch2_move_extent(struct btree_trans *trans, struct moving_context *ctxt, - struct write_point_specifier wp, struct bch_io_opts io_opts, enum btree_id btree_id, struct bkey_s_c k, - enum data_cmd data_cmd, - struct data_opts data_opts) + struct data_update_opts data_opts) { struct bch_fs *c = trans->c; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); @@ -537,6 +202,9 @@ static int bch2_move_extent(struct btree_trans *trans, unsigned sectors = k.k->size, pages; int ret = -ENOMEM; + if (!percpu_ref_tryget_live(&c->writes)) + return -EROFS; + /* write path might have to decompress data: */ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); @@ -570,11 +238,13 @@ static int bch2_move_extent(struct btree_trans *trans, io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); io->rbio.bio.bi_end_io = move_read_endio; - ret = bch2_migrate_write_init(c, &io->write, wp, io_opts, - data_cmd, data_opts, btree_id, k); + ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts, + data_opts, btree_id, k); if (ret) goto err_free_pages; + io->write.ctxt = ctxt; + atomic64_inc(&ctxt->stats->keys_moved); atomic64_add(k.k->size, &ctxt->stats->sectors_moved); this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); @@ -600,6 +270,7 @@ err_free_pages: err_free: kfree(io); err: + percpu_ref_put(&c->writes); trace_move_alloc_mem_fail(k.k); return ret; } @@ -636,13 +307,20 @@ err: } static int move_ratelimit(struct btree_trans *trans, - struct moving_context *ctxt, - struct bch_ratelimit *rate) + struct moving_context *ctxt) { + struct bch_fs *c = trans->c; u64 delay; + if (ctxt->wait_on_copygc) { + bch2_trans_unlock(trans); + wait_event_killable(c->copygc_running_wq, + !c->copygc_running || + kthread_should_stop()); + } + do { - delay = rate ? bch2_ratelimit_delay(rate) : 0; + delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0; if (delay) { bch2_trans_unlock(trans); @@ -665,11 +343,11 @@ static int move_ratelimit(struct btree_trans *trans, move_ctxt_wait_event(ctxt, trans, atomic_read(&ctxt->write_sectors) < - SECTORS_IN_FLIGHT_PER_DEVICE); + c->opts.move_bytes_in_flight >> 9); move_ctxt_wait_event(ctxt, trans, atomic_read(&ctxt->read_sectors) < - SECTORS_IN_FLIGHT_PER_DEVICE); + c->opts.move_bytes_in_flight >> 9); return 0; } @@ -699,41 +377,37 @@ static int move_get_io_opts(struct btree_trans *trans, return 0; } -static int __bch2_move_data(struct bch_fs *c, - struct moving_context *ctxt, - struct bch_ratelimit *rate, - struct write_point_specifier wp, - struct bpos start, - struct bpos end, - move_pred_fn pred, void *arg, - struct bch_move_stats *stats, - enum btree_id btree_id) +static int __bch2_move_data(struct moving_context *ctxt, + struct bpos start, + struct bpos end, + move_pred_fn pred, void *arg, + enum btree_id btree_id) { + struct bch_fs *c = ctxt->c; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); struct bkey_buf sk; struct btree_trans trans; struct btree_iter iter; struct bkey_s_c k; - struct data_opts data_opts; - enum data_cmd data_cmd; + struct data_update_opts data_opts; u64 cur_inum = U64_MAX; int ret = 0, ret2; bch2_bkey_buf_init(&sk); bch2_trans_init(&trans, c, 0, 0); - stats->data_type = BCH_DATA_user; - stats->btree_id = btree_id; - stats->pos = start; + ctxt->stats->data_type = BCH_DATA_user; + ctxt->stats->btree_id = btree_id; + ctxt->stats->pos = start; bch2_trans_iter_init(&trans, &iter, btree_id, start, BTREE_ITER_PREFETCH| BTREE_ITER_ALL_SNAPSHOTS); - if (rate) - bch2_ratelimit_reset(rate); + if (ctxt->rate) + bch2_ratelimit_reset(ctxt->rate); - while (!move_ratelimit(&trans, ctxt, rate)) { + while (!move_ratelimit(&trans, ctxt)) { bch2_trans_begin(&trans); k = bch2_btree_iter_peek(&iter); @@ -749,7 +423,7 @@ static int __bch2_move_data(struct bch_fs *c, if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) break; - stats->pos = iter.pos; + ctxt->stats->pos = iter.pos; if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; @@ -758,18 +432,9 @@ static int __bch2_move_data(struct bch_fs *c, if (ret) continue; - switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) { - case DATA_SKIP: + memset(&data_opts, 0, sizeof(data_opts)); + if (!pred(c, arg, k, &io_opts, &data_opts)) goto next; - case DATA_SCRUB: - BUG(); - case DATA_ADD_REPLICAS: - case DATA_REWRITE: - case DATA_PROMOTE: - break; - default: - BUG(); - } /* * The iterator gets unlocked by __bch2_read_extent - need to @@ -778,8 +443,8 @@ static int __bch2_move_data(struct bch_fs *c, bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k, - data_cmd, data_opts); + ret2 = bch2_move_extent(&trans, ctxt, io_opts, + btree_id, k, data_opts); if (ret2) { if (ret2 == -EINTR) continue; @@ -794,10 +459,10 @@ static int __bch2_move_data(struct bch_fs *c, goto next; } - if (rate) - bch2_ratelimit_increment(rate, k.k->size); + if (ctxt->rate) + bch2_ratelimit_increment(ctxt->rate, k.k->size); next: - atomic64_add(k.k->size, &stats->sectors_seen); + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: bch2_btree_iter_advance(&iter); } @@ -809,48 +474,20 @@ next_nondata: return ret; } -inline void bch_move_stats_init(struct bch_move_stats *stats, char *name) -{ - memset(stats, 0, sizeof(*stats)); - - scnprintf(stats->name, sizeof(stats->name), - "%s", name); -} - -static inline void progress_list_add(struct bch_fs *c, - struct bch_move_stats *stats) -{ - mutex_lock(&c->data_progress_lock); - list_add(&stats->list, &c->data_progress_list); - mutex_unlock(&c->data_progress_lock); -} - -static inline void progress_list_del(struct bch_fs *c, - struct bch_move_stats *stats) -{ - mutex_lock(&c->data_progress_lock); - list_del(&stats->list); - mutex_unlock(&c->data_progress_lock); -} - int bch2_move_data(struct bch_fs *c, enum btree_id start_btree_id, struct bpos start_pos, enum btree_id end_btree_id, struct bpos end_pos, struct bch_ratelimit *rate, + struct bch_move_stats *stats, struct write_point_specifier wp, - move_pred_fn pred, void *arg, - struct bch_move_stats *stats) + bool wait_on_copygc, + move_pred_fn pred, void *arg) { - struct moving_context ctxt = { .stats = stats }; + struct moving_context ctxt; enum btree_id id; int ret; - progress_list_add(c, stats); - closure_init_stack(&ctxt.cl); - INIT_LIST_HEAD(&ctxt.reads); - init_waitqueue_head(&ctxt.wait); - - stats->data_type = BCH_DATA_user; + bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); for (id = start_btree_id; id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); @@ -861,24 +498,16 @@ int bch2_move_data(struct bch_fs *c, id != BTREE_ID_reflink) continue; - ret = __bch2_move_data(c, &ctxt, rate, wp, + ret = __bch2_move_data(&ctxt, id == start_btree_id ? start_pos : POS_MIN, id == end_btree_id ? end_pos : POS_MAX, - pred, arg, stats, id); + pred, arg, id); if (ret) break; } - move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads)); - closure_sync(&ctxt.cl); - - EBUG_ON(atomic_read(&ctxt.write_sectors)); + bch2_moving_ctxt_exit(&ctxt); - trace_move_data(c, - atomic64_read(&stats->sectors_moved), - atomic64_read(&stats->keys_moved)); - - progress_list_del(c, stats); return ret; } @@ -891,6 +520,7 @@ static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_CACHED); +again: k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); @@ -901,10 +531,16 @@ static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket a.v->dirty_sectors) { struct printbuf buf = PRINTBUF; + if (a.v->data_type == BCH_DATA_btree) { + bch2_trans_unlock(trans); + if (bch2_btree_interior_updates_flush(c)) + goto again; + } + prt_str(&buf, "failed to evacuate bucket "); bch2_bkey_val_to_text(&buf, c, k); - bch_err_ratelimited(c, "%s", buf.buf); + bch2_trans_inconsistent(trans, "%s", buf.buf); printbuf_exit(&buf); } } @@ -913,33 +549,24 @@ static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket return ret; } -int bch2_evacuate_bucket(struct bch_fs *c, - struct bpos bucket, int gen, - struct bch_ratelimit *rate, - struct write_point_specifier wp, - enum data_cmd data_cmd, - struct data_opts *data_opts, - struct bch_move_stats *stats) +int __bch2_evacuate_bucket(struct moving_context *ctxt, + struct bpos bucket, int gen, + struct data_update_opts _data_opts) { + struct bch_fs *c = ctxt->c; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct moving_context ctxt = { .stats = stats }; struct btree_trans trans; struct btree_iter iter; struct bkey_buf sk; struct bch_backpointer bp; + struct data_update_opts data_opts; u64 bp_offset = 0, cur_inum = U64_MAX; int ret = 0; bch2_bkey_buf_init(&sk); bch2_trans_init(&trans, c, 0, 0); - progress_list_add(c, stats); - closure_init_stack(&ctxt.cl); - INIT_LIST_HEAD(&ctxt.reads); - init_waitqueue_head(&ctxt.wait); - stats->data_type = BCH_DATA_user; - - while (!(ret = move_ratelimit(&trans, &ctxt, rate))) { + while (!(ret = move_ratelimit(&trans, ctxt))) { bch2_trans_begin(&trans); ret = bch2_get_next_backpointer(&trans, bucket, gen, @@ -952,7 +579,9 @@ int bch2_evacuate_bucket(struct bch_fs *c, break; if (!bp.level) { + const struct bch_extent_ptr *ptr; struct bkey_s_c k; + unsigned i = 0; k = bch2_backpointer_get_key(&trans, &iter, bucket, bp_offset, bp); @@ -972,24 +601,31 @@ int bch2_evacuate_bucket(struct bch_fs *c, if (ret) continue; - data_opts->target = io_opts.background_target; - data_opts->rewrite_dev = bucket.inode; + data_opts = _data_opts; + data_opts.target = io_opts.background_target; + data_opts.rewrite_ptrs = 0; + + bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { + if (ptr->dev == bucket.inode) + data_opts.rewrite_ptrs |= 1U << i; + i++; + } - ret = bch2_move_extent(&trans, &ctxt, wp, io_opts, bp.btree_id, k, - data_cmd, *data_opts); + ret = bch2_move_extent(&trans, ctxt, io_opts, + bp.btree_id, k, data_opts); if (ret == -EINTR) continue; if (ret == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(&ctxt, &trans); + bch2_move_ctxt_wait_for_io(ctxt, &trans); continue; } if (ret) goto err; - if (rate) - bch2_ratelimit_increment(rate, k.k->size); - atomic64_add(k.k->size, &stats->sectors_seen); + if (ctxt->rate) + bch2_ratelimit_increment(ctxt->rate, k.k->size); + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); } else { struct btree *b; @@ -1011,10 +647,11 @@ int bch2_evacuate_bucket(struct bch_fs *c, if (ret) goto err; - if (rate) - bch2_ratelimit_increment(rate, c->opts.btree_node_size >> 9); - atomic64_add(c->opts.btree_node_size >> 9, &stats->sectors_seen); - atomic64_add(c->opts.btree_node_size >> 9, &stats->sectors_moved); + if (ctxt->rate) + bch2_ratelimit_increment(ctxt->rate, + c->opts.btree_node_size >> 9); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); } bp_offset++; @@ -1022,30 +659,38 @@ int bch2_evacuate_bucket(struct bch_fs *c, if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) { bch2_trans_unlock(&trans); - move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads)); - closure_sync(&ctxt.cl); - lockrestart_do(&trans, verify_bucket_evacuated(&trans, bucket, gen)); + move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); + closure_sync(&ctxt->cl); + if (!ctxt->write_error) + lockrestart_do(&trans, verify_bucket_evacuated(&trans, bucket, gen)); } err: bch2_trans_exit(&trans); bch2_bkey_buf_exit(&sk, c); + return ret; +} - move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads)); - closure_sync(&ctxt.cl); - progress_list_del(c, stats); - - EBUG_ON(atomic_read(&ctxt.write_sectors)); +int bch2_evacuate_bucket(struct bch_fs *c, + struct bpos bucket, int gen, + struct data_update_opts data_opts, + struct bch_ratelimit *rate, + struct bch_move_stats *stats, + struct write_point_specifier wp, + bool wait_on_copygc) +{ + struct moving_context ctxt; + int ret; - trace_move_data(c, - atomic64_read(&stats->sectors_moved), - atomic64_read(&stats->keys_moved)); + bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); + ret = __bch2_evacuate_bucket(&ctxt, bucket, gen, data_opts); + bch2_moving_ctxt_exit(&ctxt); return ret; } -typedef enum data_cmd (*move_btree_pred)(struct bch_fs *, void *, - struct btree *, struct bch_io_opts *, - struct data_opts *); +typedef bool (*move_btree_pred)(struct bch_fs *, void *, + struct btree *, struct bch_io_opts *, + struct data_update_opts *); static int bch2_move_btree(struct bch_fs *c, enum btree_id start_btree_id, struct bpos start_pos, @@ -1059,8 +704,7 @@ static int bch2_move_btree(struct bch_fs *c, struct btree_iter iter; struct btree *b; enum btree_id id; - struct data_opts data_opts; - enum data_cmd cmd; + struct data_update_opts data_opts; int ret = 0; bch2_trans_init(&trans, c, 0, 0); @@ -1089,17 +733,8 @@ retry: stats->pos = iter.pos; - switch ((cmd = pred(c, arg, b, &io_opts, &data_opts))) { - case DATA_SKIP: + if (!pred(c, arg, b, &io_opts, &data_opts)) goto next; - case DATA_SCRUB: - BUG(); - case DATA_ADD_REPLICAS: - case DATA_REWRITE: - break; - default: - BUG(); - } ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret; if (ret == -EINTR) @@ -1129,20 +764,10 @@ next: return ret; } -#if 0 -static enum data_cmd scrub_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) -{ - return DATA_SCRUB; -} -#endif - -static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) +static bool rereplicate_pred(struct bch_fs *c, void *arg, + struct bkey_s_c k, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { unsigned nr_good = bch2_bkey_durability(c, k); unsigned replicas = bkey_is_btree_ptr(k.k) @@ -1150,43 +775,50 @@ static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, : io_opts->data_replicas; if (!nr_good || nr_good >= replicas) - return DATA_SKIP; + return false; data_opts->target = 0; - data_opts->nr_replicas = 1; + data_opts->extra_replicas = replicas - nr_good; data_opts->btree_insert_flags = 0; - return DATA_ADD_REPLICAS; + return true; } -static enum data_cmd migrate_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) +static bool migrate_pred(struct bch_fs *c, void *arg, + struct bkey_s_c k, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const struct bch_extent_ptr *ptr; struct bch_ioctl_data *op = arg; + unsigned i = 0; - if (!bch2_bkey_has_device(k, op->migrate.dev)) - return DATA_SKIP; - + data_opts->rewrite_ptrs = 0; data_opts->target = 0; - data_opts->nr_replicas = 1; + data_opts->extra_replicas = 0; data_opts->btree_insert_flags = 0; - data_opts->rewrite_dev = op->migrate.dev; - return DATA_REWRITE; + + bkey_for_each_ptr(ptrs, ptr) { + if (ptr->dev == op->migrate.dev) + data_opts->rewrite_ptrs |= 1U << i; + i++; + } + + return data_opts->rewrite_ptrs != 0;; } -static enum data_cmd rereplicate_btree_pred(struct bch_fs *c, void *arg, - struct btree *b, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) +static bool rereplicate_btree_pred(struct bch_fs *c, void *arg, + struct btree *b, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); } -static enum data_cmd migrate_btree_pred(struct bch_fs *c, void *arg, - struct btree *b, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) +static bool migrate_btree_pred(struct bch_fs *c, void *arg, + struct btree *b, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); } @@ -1215,21 +847,21 @@ static bool bformat_needs_redo(struct bkey_format *f) return false; } -static enum data_cmd rewrite_old_nodes_pred(struct bch_fs *c, void *arg, - struct btree *b, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) +static bool rewrite_old_nodes_pred(struct bch_fs *c, void *arg, + struct btree *b, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { if (b->version_ondisk != c->sb.version || btree_node_need_rewrite(b) || bformat_needs_redo(&b->format)) { data_opts->target = 0; - data_opts->nr_replicas = 1; + data_opts->extra_replicas = 0; data_opts->btree_insert_flags = 0; - return DATA_REWRITE; + return true; } - return DATA_SKIP; + return false; } int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) @@ -1273,8 +905,11 @@ int bch2_data_job(struct bch_fs *c, ret = bch2_move_data(c, op.start_btree, op.start_pos, op.end_btree, op.end_pos, - NULL, writepoint_hashed((unsigned long) current), - rereplicate_pred, c, stats) ?: ret; + NULL, + stats, + writepoint_hashed((unsigned long) current), + true, + rereplicate_pred, c) ?: ret; ret = bch2_replicas_gc2(c) ?: ret; break; case BCH_DATA_OP_MIGRATE: @@ -1294,8 +929,11 @@ int bch2_data_job(struct bch_fs *c, ret = bch2_move_data(c, op.start_btree, op.start_pos, op.end_btree, op.end_pos, - NULL, writepoint_hashed((unsigned long) current), - migrate_pred, &op, stats) ?: ret; + NULL, + stats, + writepoint_hashed((unsigned long) current), + true, + migrate_pred, &op) ?: ret; ret = bch2_replicas_gc2(c) ?: ret; break; case BCH_DATA_OP_REWRITE_OLD_NODES: |