drbd: Move list of epochs from mdev to tconn
This is necessary since the transfer_log on the sending is also per tconn. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
1d2783d532
commit
12038a3a71
4 changed files with 42 additions and 43 deletions
|
@ -859,6 +859,9 @@ struct drbd_tconn { /* is a resource from the config file */
|
||||||
void *int_dig_in;
|
void *int_dig_in;
|
||||||
void *int_dig_vv;
|
void *int_dig_vv;
|
||||||
|
|
||||||
|
struct drbd_epoch *current_epoch;
|
||||||
|
spinlock_t epoch_lock;
|
||||||
|
unsigned int epochs;
|
||||||
enum write_ordering_e write_ordering;
|
enum write_ordering_e write_ordering;
|
||||||
|
|
||||||
struct drbd_thread receiver;
|
struct drbd_thread receiver;
|
||||||
|
@ -962,9 +965,6 @@ struct drbd_conf {
|
||||||
|
|
||||||
int open_cnt;
|
int open_cnt;
|
||||||
u64 *p_uuid;
|
u64 *p_uuid;
|
||||||
struct drbd_epoch *current_epoch;
|
|
||||||
spinlock_t epoch_lock;
|
|
||||||
unsigned int epochs;
|
|
||||||
|
|
||||||
struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
|
struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
|
||||||
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
|
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
|
||||||
|
|
|
@ -2082,7 +2082,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
||||||
|
|
||||||
spin_lock_init(&mdev->al_lock);
|
spin_lock_init(&mdev->al_lock);
|
||||||
spin_lock_init(&mdev->peer_seq_lock);
|
spin_lock_init(&mdev->peer_seq_lock);
|
||||||
spin_lock_init(&mdev->epoch_lock);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mdev->active_ee);
|
INIT_LIST_HEAD(&mdev->active_ee);
|
||||||
INIT_LIST_HEAD(&mdev->sync_ee);
|
INIT_LIST_HEAD(&mdev->sync_ee);
|
||||||
|
@ -2142,9 +2141,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
|
||||||
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
|
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
|
||||||
mdev->tconn->receiver.t_state);
|
mdev->tconn->receiver.t_state);
|
||||||
|
|
||||||
/* no need to lock it, I'm the only thread alive */
|
|
||||||
if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
|
|
||||||
dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
|
|
||||||
mdev->al_writ_cnt =
|
mdev->al_writ_cnt =
|
||||||
mdev->bm_writ_cnt =
|
mdev->bm_writ_cnt =
|
||||||
mdev->read_cnt =
|
mdev->read_cnt =
|
||||||
|
@ -2377,7 +2373,6 @@ void drbd_minor_destroy(struct kref *kref)
|
||||||
kfree(mdev->p_uuid);
|
kfree(mdev->p_uuid);
|
||||||
/* mdev->p_uuid = NULL; */
|
/* mdev->p_uuid = NULL; */
|
||||||
|
|
||||||
kfree(mdev->current_epoch);
|
|
||||||
if (mdev->bitmap) /* should no longer be there. */
|
if (mdev->bitmap) /* should no longer be there. */
|
||||||
drbd_bm_cleanup(mdev);
|
drbd_bm_cleanup(mdev);
|
||||||
__free_page(mdev->md_io_page);
|
__free_page(mdev->md_io_page);
|
||||||
|
@ -2624,6 +2619,12 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
|
||||||
if (!tl_init(tconn))
|
if (!tl_init(tconn))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
|
||||||
|
if (!tconn->current_epoch)
|
||||||
|
goto fail;
|
||||||
|
INIT_LIST_HEAD(&tconn->current_epoch->list);
|
||||||
|
tconn->epochs = 1;
|
||||||
|
spin_lock_init(&tconn->epoch_lock);
|
||||||
tconn->write_ordering = WO_bdev_flush;
|
tconn->write_ordering = WO_bdev_flush;
|
||||||
|
|
||||||
tconn->cstate = C_STANDALONE;
|
tconn->cstate = C_STANDALONE;
|
||||||
|
@ -2649,6 +2650,7 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
|
||||||
return tconn;
|
return tconn;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
kfree(tconn->current_epoch);
|
||||||
tl_cleanup(tconn);
|
tl_cleanup(tconn);
|
||||||
free_cpumask_var(tconn->cpu_mask);
|
free_cpumask_var(tconn->cpu_mask);
|
||||||
drbd_free_socket(&tconn->meta);
|
drbd_free_socket(&tconn->meta);
|
||||||
|
@ -2663,6 +2665,10 @@ void conn_destroy(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
|
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
|
||||||
|
|
||||||
|
if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
|
||||||
|
conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
|
||||||
|
kfree(tconn->current_epoch);
|
||||||
|
|
||||||
idr_destroy(&tconn->volumes);
|
idr_destroy(&tconn->volumes);
|
||||||
|
|
||||||
free_cpumask_var(tconn->cpu_mask);
|
free_cpumask_var(tconn->cpu_mask);
|
||||||
|
@ -2744,13 +2750,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
|
||||||
mdev->read_requests = RB_ROOT;
|
mdev->read_requests = RB_ROOT;
|
||||||
mdev->write_requests = RB_ROOT;
|
mdev->write_requests = RB_ROOT;
|
||||||
|
|
||||||
mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
|
|
||||||
if (!mdev->current_epoch)
|
|
||||||
goto out_no_epoch;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mdev->current_epoch->list);
|
|
||||||
mdev->epochs = 1;
|
|
||||||
|
|
||||||
if (!idr_pre_get(&minors, GFP_KERNEL))
|
if (!idr_pre_get(&minors, GFP_KERNEL))
|
||||||
goto out_no_minor_idr;
|
goto out_no_minor_idr;
|
||||||
if (idr_get_new_above(&minors, mdev, minor, &minor_got))
|
if (idr_get_new_above(&minors, mdev, minor, &minor_got))
|
||||||
|
@ -2786,8 +2785,6 @@ out_idr_remove_minor:
|
||||||
idr_remove(&minors, minor_got);
|
idr_remove(&minors, minor_got);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
out_no_minor_idr:
|
out_no_minor_idr:
|
||||||
kfree(mdev->current_epoch);
|
|
||||||
out_no_epoch:
|
|
||||||
drbd_bm_cleanup(mdev);
|
drbd_bm_cleanup(mdev);
|
||||||
out_no_bitmap:
|
out_no_bitmap:
|
||||||
__free_page(mdev->md_io_page);
|
__free_page(mdev->md_io_page);
|
||||||
|
|
|
@ -271,7 +271,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
|
||||||
atomic_read(&mdev->rs_pending_cnt),
|
atomic_read(&mdev->rs_pending_cnt),
|
||||||
atomic_read(&mdev->unacked_cnt),
|
atomic_read(&mdev->unacked_cnt),
|
||||||
atomic_read(&mdev->ap_bio_cnt),
|
atomic_read(&mdev->ap_bio_cnt),
|
||||||
mdev->epochs,
|
mdev->tconn->epochs,
|
||||||
write_ordering_chars[mdev->tconn->write_ordering]
|
write_ordering_chars[mdev->tconn->write_ordering]
|
||||||
);
|
);
|
||||||
seq_printf(seq, " oos:%llu\n",
|
seq_printf(seq, " oos:%llu\n",
|
||||||
|
|
|
@ -1128,8 +1128,9 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
int epoch_size;
|
int epoch_size;
|
||||||
struct drbd_epoch *next_epoch;
|
struct drbd_epoch *next_epoch;
|
||||||
enum finish_epoch rv = FE_STILL_LIVE;
|
enum finish_epoch rv = FE_STILL_LIVE;
|
||||||
|
struct drbd_tconn *tconn = mdev->tconn;
|
||||||
|
|
||||||
spin_lock(&mdev->epoch_lock);
|
spin_lock(&tconn->epoch_lock);
|
||||||
do {
|
do {
|
||||||
next_epoch = NULL;
|
next_epoch = NULL;
|
||||||
|
|
||||||
|
@ -1151,18 +1152,18 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
atomic_read(&epoch->active) == 0 &&
|
atomic_read(&epoch->active) == 0 &&
|
||||||
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
|
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
|
||||||
if (!(ev & EV_CLEANUP)) {
|
if (!(ev & EV_CLEANUP)) {
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&tconn->epoch_lock);
|
||||||
drbd_send_b_ack(epoch->mdev, epoch->barrier_nr, epoch_size);
|
drbd_send_b_ack(epoch->mdev, epoch->barrier_nr, epoch_size);
|
||||||
spin_lock(&mdev->epoch_lock);
|
spin_lock(&tconn->epoch_lock);
|
||||||
}
|
}
|
||||||
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
|
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
|
||||||
dec_unacked(epoch->mdev);
|
dec_unacked(epoch->mdev);
|
||||||
|
|
||||||
if (mdev->current_epoch != epoch) {
|
if (tconn->current_epoch != epoch) {
|
||||||
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
|
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
|
||||||
list_del(&epoch->list);
|
list_del(&epoch->list);
|
||||||
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
|
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
|
||||||
mdev->epochs--;
|
tconn->epochs--;
|
||||||
kfree(epoch);
|
kfree(epoch);
|
||||||
|
|
||||||
if (rv == FE_STILL_LIVE)
|
if (rv == FE_STILL_LIVE)
|
||||||
|
@ -1183,7 +1184,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
||||||
epoch = next_epoch;
|
epoch = next_epoch;
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&tconn->epoch_lock);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
@ -1348,9 +1349,9 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
|
||||||
|
|
||||||
inc_unacked(mdev);
|
inc_unacked(mdev);
|
||||||
|
|
||||||
mdev->current_epoch->barrier_nr = p->barrier;
|
tconn->current_epoch->barrier_nr = p->barrier;
|
||||||
mdev->current_epoch->mdev = mdev;
|
tconn->current_epoch->mdev = mdev;
|
||||||
rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
|
rv = drbd_may_finish_epoch(mdev, tconn->current_epoch, EV_GOT_BARRIER_NR);
|
||||||
|
|
||||||
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
|
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
|
||||||
* the activity log, which means it would not be resynced in case the
|
* the activity log, which means it would not be resynced in case the
|
||||||
|
@ -1376,13 +1377,13 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
|
||||||
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
||||||
drbd_flush(tconn);
|
drbd_flush(tconn);
|
||||||
|
|
||||||
if (atomic_read(&mdev->current_epoch->epoch_size)) {
|
if (atomic_read(&tconn->current_epoch->epoch_size)) {
|
||||||
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
||||||
if (epoch)
|
if (epoch)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
epoch = mdev->current_epoch;
|
epoch = tconn->current_epoch;
|
||||||
wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
|
wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
|
||||||
|
|
||||||
D_ASSERT(atomic_read(&epoch->active) == 0);
|
D_ASSERT(atomic_read(&epoch->active) == 0);
|
||||||
|
@ -1398,16 +1399,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
|
||||||
atomic_set(&epoch->epoch_size, 0);
|
atomic_set(&epoch->epoch_size, 0);
|
||||||
atomic_set(&epoch->active, 0);
|
atomic_set(&epoch->active, 0);
|
||||||
|
|
||||||
spin_lock(&mdev->epoch_lock);
|
spin_lock(&tconn->epoch_lock);
|
||||||
if (atomic_read(&mdev->current_epoch->epoch_size)) {
|
if (atomic_read(&tconn->current_epoch->epoch_size)) {
|
||||||
list_add(&epoch->list, &mdev->current_epoch->list);
|
list_add(&epoch->list, &tconn->current_epoch->list);
|
||||||
mdev->current_epoch = epoch;
|
tconn->current_epoch = epoch;
|
||||||
mdev->epochs++;
|
tconn->epochs++;
|
||||||
} else {
|
} else {
|
||||||
/* The current_epoch got recycled while we allocated this one... */
|
/* The current_epoch got recycled while we allocated this one... */
|
||||||
kfree(epoch);
|
kfree(epoch);
|
||||||
}
|
}
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&tconn->epoch_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2103,7 +2104,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
|
||||||
|
|
||||||
err = wait_for_and_update_peer_seq(mdev, peer_seq);
|
err = wait_for_and_update_peer_seq(mdev, peer_seq);
|
||||||
drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
|
drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
|
||||||
atomic_inc(&mdev->current_epoch->epoch_size);
|
atomic_inc(&tconn->current_epoch->epoch_size);
|
||||||
err2 = drbd_drain_block(mdev, pi->size);
|
err2 = drbd_drain_block(mdev, pi->size);
|
||||||
if (!err)
|
if (!err)
|
||||||
err = err2;
|
err = err2;
|
||||||
|
@ -2131,11 +2132,11 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
|
||||||
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
||||||
peer_req->flags |= EE_MAY_SET_IN_SYNC;
|
peer_req->flags |= EE_MAY_SET_IN_SYNC;
|
||||||
|
|
||||||
spin_lock(&mdev->epoch_lock);
|
spin_lock(&tconn->epoch_lock);
|
||||||
peer_req->epoch = mdev->current_epoch;
|
peer_req->epoch = tconn->current_epoch;
|
||||||
atomic_inc(&peer_req->epoch->epoch_size);
|
atomic_inc(&peer_req->epoch->epoch_size);
|
||||||
atomic_inc(&peer_req->epoch->active);
|
atomic_inc(&peer_req->epoch->active);
|
||||||
spin_unlock(&mdev->epoch_lock);
|
spin_unlock(&tconn->epoch_lock);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
|
tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
|
||||||
|
@ -4359,6 +4360,11 @@ static void conn_disconnect(struct drbd_tconn *tconn)
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (!list_empty(&tconn->current_epoch->list))
|
||||||
|
conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
|
||||||
|
/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
|
||||||
|
atomic_set(&tconn->current_epoch->epoch_size, 0);
|
||||||
|
|
||||||
conn_info(tconn, "Connection closed\n");
|
conn_info(tconn, "Connection closed\n");
|
||||||
|
|
||||||
if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
|
if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
|
||||||
|
@ -4446,10 +4452,6 @@ static int drbd_disconnected(struct drbd_conf *mdev)
|
||||||
D_ASSERT(list_empty(&mdev->sync_ee));
|
D_ASSERT(list_empty(&mdev->sync_ee));
|
||||||
D_ASSERT(list_empty(&mdev->done_ee));
|
D_ASSERT(list_empty(&mdev->done_ee));
|
||||||
|
|
||||||
/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
|
|
||||||
atomic_set(&mdev->current_epoch->epoch_size, 0);
|
|
||||||
D_ASSERT(list_empty(&mdev->current_epoch->list));
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue