[DRBD-cvs] svn commit by lars - r2467 - trunk/drbd - fix stopping
of syncer in drbd_disconnect fix cleanup o
drbd-cvs at lists.linbit.com
drbd-cvs at lists.linbit.com
Wed Sep 27 12:29:13 CEST 2006
Author: lars
Date: 2006-09-27 12:29:12 +0200 (Wed, 27 Sep 2006)
New Revision: 2467
Modified:
trunk/drbd/drbd_main.c
trunk/drbd/drbd_receiver.c
trunk/drbd/drbd_worker.c
Log:
fix stopping of syncer in drbd_disconnect
fix cleanup on exit of worker thread
fix corruption after down/up/access
fix possible memory leak on module unload
Modified: trunk/drbd/drbd_main.c
===================================================================
--- trunk/drbd/drbd_main.c 2006-09-26 16:25:42 UTC (rev 2466)
+++ trunk/drbd/drbd_main.c 2006-09-27 10:29:12 UTC (rev 2467)
@@ -2227,10 +2227,16 @@
if(mdev->ee_hash) {
kfree(mdev->ee_hash);
mdev->ee_hash_s = 0;
+ mdev->ee_hash = NULL;
}
+ if(mdev->tl_hash) {
+ kfree(mdev->tl_hash);
+ mdev->tl_hash_s = 0;
+ mdev->tl_hash = NULL;
+ }
if(mdev->app_reads_hash) {
kfree(mdev->app_reads_hash);
- mdev->app_reads_hash = 0;
+ mdev->app_reads_hash = NULL;
}
if ( mdev->p_uuid ) {
kfree(mdev->p_uuid);
Modified: trunk/drbd/drbd_receiver.c
===================================================================
--- trunk/drbd/drbd_receiver.c 2006-09-26 16:25:42 UTC (rev 2466)
+++ trunk/drbd/drbd_receiver.c 2006-09-27 10:29:12 UTC (rev 2467)
@@ -2495,6 +2495,10 @@
atomic_set(&mdev->rs_pending_cnt,0);
wake_up(&mdev->cstate_wait);
+ /* make sure syncer is stopped and w_resume_next_sg queued */
+ del_timer_sync(&mdev->resync_timer);
+ set_bit(STOP_SYNC_TIMER,&mdev->flags);
+ resync_timer_fn((unsigned long)mdev);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue
@@ -2545,11 +2549,13 @@
if(mdev->ee_hash) {
kfree(mdev->ee_hash);
mdev->ee_hash = NULL;
+ mdev->ee_hash_s = 0;
}
if(mdev->tl_hash) {
kfree(mdev->tl_hash);
mdev->tl_hash = NULL;
+ mdev->tl_hash_s = 0;
}
if(mdev->cram_hmac_tfm) {
crypto_free_tfm(mdev->cram_hmac_tfm);
Modified: trunk/drbd/drbd_worker.c
===================================================================
--- trunk/drbd/drbd_worker.c 2006-09-26 16:25:42 UTC (rev 2466)
+++ trunk/drbd/drbd_worker.c 2006-09-27 10:29:12 UTC (rev 2467)
@@ -242,6 +242,7 @@
spin_unlock_irqrestore(&mdev->req_lock,flags);
+ /* harmless race: list_empty outside data.work.q_lock */
if(list_empty(&mdev->resync_work.list)) {
drbd_queue_work(&mdev->data.work,&mdev->resync_work);
} else INFO("Avoided requeue of resync_work\n");
@@ -955,47 +956,21 @@
}
}
- /* FIXME this should go into drbd_disconnect */
- del_timer_sync(&mdev->resync_timer);
- /* possible paranoia check: the STOP_SYNC_TIMER bit should be set
- * if and only if del_timer_sync returns true ... */
-
spin_lock_irq(&mdev->data.work.q_lock);
- if (test_and_clear_bit(STOP_SYNC_TIMER,&mdev->flags)) {
- mdev->resync_work.cb = w_resume_next_sg;
- if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->data.work,&mdev->resync_work);
- // else: already queued
- } else {
- /* timer already consumed that bit, or it was never set */
- if (list_empty(&mdev->resync_work.list)) {
- /* not queued, should be inactive */
- ERR_IF (mdev->resync_work.cb != w_resync_inactive)
- mdev->resync_work.cb = w_resync_inactive;
- } else {
- /* still queued; should be w_resume_next_sg */
- ERR_IF (mdev->resync_work.cb != w_resume_next_sg)
- mdev->resync_work.cb = w_resume_next_sg;
- }
- }
-
i = 0;
- again:
- list_splice_init(&mdev->data.work.q,&work_list);
- spin_unlock_irq(&mdev->data.work.q_lock);
+ while (!list_empty(&mdev->data.work.q)) {
+ list_splice_init(&mdev->data.work.q,&work_list);
+ spin_unlock_irq(&mdev->data.work.q_lock);
- while(!list_empty(&work_list)) {
- w = list_entry(work_list.next, struct drbd_work,list);
- list_del_init(&w->list);
- w->cb(mdev,w,1);
- i++; /* dead debugging code */
- }
+ while(!list_empty(&work_list)) {
+ w = list_entry(work_list.next, struct drbd_work,list);
+ list_del_init(&w->list);
+ w->cb(mdev,w,1);
+ i++; /* dead debugging code */
+ }
- drbd_thread_stop(&mdev->receiver);
-
- spin_lock_irq(&mdev->data.work.q_lock);
- if(!list_empty(&mdev->data.work.q))
- goto again;
+ spin_lock_irq(&mdev->data.work.q_lock);
+ }
sema_init(&mdev->data.work.s,0);
/* DANGEROUS race: if someone did queue his work within the spinlock,
* but up() ed outside the spinlock, we could get an up() on the
More information about the drbd-cvs
mailing list