[DRBD-cvs] drbd by phil; LGE pointed out that the double wait_eve...

drbd-user@lists.linbit.com drbd-user@lists.linbit.com
Thu, 6 May 2004 11:49:38 +0200 (CEST)


DRBD CVS committal

Author  : phil
Module  : drbd

Dir     : drbd/drbd


Modified Files:
      Tag: rel-0_7-branch
	drbd_receiver.c 


Log Message:
LGE pointed out that the double wait_event construct, that was used
in drbd_get_ee() could lead to problems. Fixed this.
Now drbd_get_ee() is interruptible by signals - This is a good thing,

===================================================================
RCS file: /var/lib/cvs/drbd/drbd/drbd/drbd_receiver.c,v
retrieving revision 1.97.2.143
retrieving revision 1.97.2.144
diff -u -3 -r1.97.2.143 -r1.97.2.144
--- drbd_receiver.c	5 May 2004 17:07:02 -0000	1.97.2.143
+++ drbd_receiver.c	6 May 2004 09:49:33 -0000	1.97.2.144
@@ -243,26 +243,15 @@
 
 STATIC int _drbd_process_ee(drbd_dev *mdev,struct list_head *head);
 
-static inline int _get_ee_cond(struct Drbd_Conf* mdev)
-{
-	int av;
-	spin_lock_irq(&mdev->ee_lock);
-	_drbd_process_ee(mdev,&mdev->done_ee);
-	av = !list_empty(&mdev->free_ee);
-	spin_unlock_irq(&mdev->ee_lock);
-	if(!av) {
-		if((mdev->ee_vacant+mdev->ee_in_use) < mdev->conf.max_buffers){
-			if(drbd_alloc_ee(mdev,GFP_TRY)) av = 1;
-		}
-	}
-	if(!av) drbd_kick_lo(mdev);
-	return av;
-}
-
+/**
+ * drbd_get_ee: Returns an Tl_epoch_entry; might sleep. Fails only if
+ * a signal comes in.
+ */
 struct Tl_epoch_entry* drbd_get_ee(drbd_dev *mdev)
 {
 	struct list_head *le;
 	struct Tl_epoch_entry* e;
+	DEFINE_WAIT(wait);
 
 	MUST_HOLD(&mdev->ee_lock);
 
@@ -272,10 +261,28 @@
 		spin_lock_irq(&mdev->ee_lock);
 	}
 
-	while(list_empty(&mdev->free_ee)) {
-		spin_unlock_irq(&mdev->ee_lock);
-		wait_event(mdev->ee_wait,_get_ee_cond(mdev));
-		spin_lock_irq(&mdev->ee_lock);
+	if(list_empty(&mdev->free_ee)) _drbd_process_ee(mdev,&mdev->done_ee);
+
+	if(list_empty(&mdev->free_ee)) {
+		for (;;) {
+			prepare_to_wait(&mdev->ee_wait, &wait, 
+					TASK_INTERRUPTIBLE);
+			if(!list_empty(&mdev->free_ee)) break;
+			if( ( mdev->ee_vacant+mdev->ee_in_use) < 
+			      mdev->conf.max_buffers ) {
+				if(drbd_alloc_ee(mdev,GFP_TRY)) break;
+			}
+			drbd_kick_lo(mdev);
+			spin_unlock_irq(&mdev->ee_lock);
+			schedule();
+			spin_lock_irq(&mdev->ee_lock);
+			if (signal_pending(current)) return 0;
+			finish_wait(&mdev->al_wait, &wait); 
+			// finish wait is inside, so that we are TASK_RUNNING 
+			// in _drbd_process_ee (which might sleep by itself.)
+			_drbd_process_ee(mdev,&mdev->done_ee);
+		}
+		finish_wait(&mdev->al_wait, &wait); 
 	}
 
 	le=mdev->free_ee.next;
@@ -741,6 +748,7 @@
 	spin_lock_irq(&mdev->ee_lock);
 	e=drbd_get_ee(mdev);
 	spin_unlock_irq(&mdev->ee_lock);
+	if(!e) return 0;
 
 	bio = &e->private_bio;
 
@@ -1021,7 +1029,10 @@
 
 	spin_lock_irq(&mdev->ee_lock);
 	e=drbd_get_ee(mdev);
-	// can we move it outside the lock?
+	if(!e) {
+		spin_unlock_irq(&mdev->ee_lock);
+		return FALSE;
+	}
 	e->block_id = p->block_id; // no meaning on this side, pr* on partner
 	list_add(&e->w.list,&mdev->read_ee);
 	spin_unlock_irq(&mdev->ee_lock);