[DRBD-cvs] svn commit by lars - r3002 - in branches/drbd-8.1/drbd: . linux - c075badaf241dc52e2f085f95727f24434758a26 DRBD: split "i

drbd-cvs at lists.linbit.com drbd-cvs at lists.linbit.com
Fri Jul 27 13:35:57 CEST 2007


Author: lars
Date: 2007-07-27 13:35:53 +0200 (Fri, 27 Jul 2007)
New Revision: 3002

Modified:
   branches/drbd-8.1/drbd/drbd_actlog.c
   branches/drbd-8.1/drbd/drbd_bitmap.c
   branches/drbd-8.1/drbd/drbd_int.h
   branches/drbd-8.1/drbd/drbd_main.c
   branches/drbd-8.1/drbd/drbd_nl.c
   branches/drbd-8.1/drbd/drbd_proc.c
   branches/drbd-8.1/drbd/drbd_receiver.c
   branches/drbd-8.1/drbd/drbd_req.c
   branches/drbd-8.1/drbd/drbd_req.h
   branches/drbd-8.1/drbd/drbd_worker.c
   branches/drbd-8.1/drbd/linux/drbd_config.h
   branches/drbd-8.1/drbd/lru_cache.c
Log:
c075badaf241dc52e2f085f95727f24434758a26 DRBD: split "if (cond) stmt;" into two lines everywhere.
ee62de7319cf503c465d76cf53ed682eb5385ed6 DRBD: got rid of useless if in "if (p) kfree(p);"


Modified: branches/drbd-8.1/drbd/drbd_actlog.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_actlog.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_actlog.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -48,7 +48,8 @@
 	bio->bi_bdev = bdev->md_bdev;
 	bio->bi_sector = sector;
 	ok = (bio_add_page(bio, page, size, 0) == size);
-	if (!ok) goto out;
+	if (!ok)
+		goto out;
 	init_completion(&event);
 	bio->bi_private = &event;
 	bio->bi_end_io = drbd_md_io_complete;
@@ -89,13 +90,15 @@
 	}
 
 	hardsect = drbd_get_hardsect(bdev->md_bdev);
-	if (hardsect == 0) hardsect = MD_HARDSECT;
+	if (hardsect == 0)
+		hardsect = MD_HARDSECT;
 
 	/* in case hardsect != 512 [ s390 only? ] */
 	if (hardsect != MD_HARDSECT) {
 		if (!mdev->md_io_tmpp) {
 			struct page *page = alloc_page(GFP_NOIO);
-			if (!page) return 0;
+			if (!page)
+				return 0;
 
 			WARN("Meta data's bdev hardsect = %d != %d\n",
 			     hardsect, MD_HARDSECT);
@@ -428,7 +431,8 @@
 	/* Find the valid transaction in the log */
 	for (i = 0; i <= mx; i++) {
 		rv = drbd_al_read_tr(mdev, bdev, buffer, i);
-		if (rv == 0) continue;
+		if (rv == 0)
+			continue;
 		if (rv == -1) {
 			up(&mdev->md_io_mutex);
 			return 0;
@@ -436,7 +440,8 @@
 		cnr = be32_to_cpu(buffer->tr_number);
 		/* INFO("index %d valid tnr=%d\n",i,cnr); */
 
-		if (cnr == -1) overflow = 1;
+		if (cnr == -1)
+			overflow = 1;
 
 		if (cnr < from_tnr && !overflow) {
 			from = i;
@@ -482,7 +487,8 @@
 			pos = be32_to_cpu(buffer->updates[j].pos);
 			extent_nr = be32_to_cpu(buffer->updates[j].extent);
 
-			if (extent_nr == LC_FREE) continue;
+			if (extent_nr == LC_FREE)
+				continue;
 
 			lc_set(mdev->act_log, extent_nr, pos);
 			active_extents++;
@@ -492,9 +498,11 @@
 		transactions++;
 
 cancel:
-		if (i == to) break;
+		if (i == to)
+			break;
 		i++;
-		if (i > mx) i = 0;
+		if (i > mx)
+			i = 0;
 	}
 
 	mdev->al_tr_number = to_tnr+1;
@@ -526,7 +534,8 @@
 	struct page *page;
 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 
-	if (bio->bi_size) return 1;
+	if (bio->bi_size)
+		return 1;
 
 	/* strange behaviour of some lower level drivers...
 	 * fail the request by clearing the uptodate flag,
@@ -536,13 +545,15 @@
 		error = -EIO;
 
 	drbd_chk_io_error(mdev, error, TRUE);
-	if (error && wc->error == 0) wc->error = error;
+	if (error && wc->error == 0)
+		wc->error = error;
 
 	if (atomic_dec_and_test(&wc->count))
 		complete(&wc->io_done);
 
 	page = bio->bi_io_vec[0].bv_page;
-	if (page) put_page(page);
+	if (page)
+		put_page(page);
 	bio_put(bio);
 	mdev->bm_writ_cnt++;
 	dec_local(mdev);
@@ -570,12 +581,14 @@
 
 	/* check if that enr is already covered by an already created bio. */
 	while ( (bio = bios[i]) ) {
-		if (bio->bi_sector == on_disk_sector) return 0;
+		if (bio->bi_sector == on_disk_sector)
+			return 0;
 		i++;
 	}
 
 	bio = bio_alloc(GFP_KERNEL, 1);
-	if (bio == NULL) return -ENOMEM;
+	if (bio == NULL)
+		return -ENOMEM;
 
 	bio->bi_bdev = mdev->bc->md_bdev;
 	bio->bi_sector = on_disk_sector;
@@ -585,7 +598,8 @@
 	if (*page_offset == PAGE_SIZE) {
 		np = alloc_page(__GFP_HIGHMEM);
 		/* no memory leak, bio gets cleaned up by caller */
-		if (np == NULL) return -ENOMEM;
+		if (np == NULL)
+			return -ENOMEM;
 		*page = np;
 		*page_offset = 0;
 		allocated_page = 1;
@@ -601,7 +615,8 @@
 	if (bio_add_page(bio, *page, MD_HARDSECT, *page_offset) != MD_HARDSECT)
 		return -EINVAL;
 
-	if (!allocated_page) get_page(*page);
+	if (!allocated_page)
+		get_page(*page);
 
 	*page_offset += MD_HARDSECT;
 
@@ -641,7 +656,8 @@
 	nr_elements = mdev->act_log->nr_elements;
 
 	bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL);
-	if (!bios) goto submit_one_by_one;
+	if (!bios)
+		goto submit_one_by_one;
 
 	atomic_set(&wc.count, 0);
 	init_completion(&wc.io_done);
@@ -650,7 +666,8 @@
 
 	for (i = 0; i < nr_elements; i++) {
 		enr = lc_entry(mdev->act_log, i)->lc_number;
-		if (enr == LC_FREE) continue;
+		if (enr == LC_FREE)
+			continue;
 		/* next statement also does atomic_inc wc.count */
 		if (atodb_prepare_unless_covered(mdev, bios, &page,
 						&page_offset,
@@ -665,7 +682,8 @@
 
 	/* all prepared, submit them */
 	for (i = 0; i < nr_elements; i++) {
-		if (bios[i] == NULL) break;
+		if (bios[i] == NULL)
+			break;
 		if (FAULT_ACTIVE( mdev, DRBD_FAULT_MD_WR )) {
 			bios[i]->bi_rw = WRITE;
 			bio_endio(bios[i], bios[i]->bi_size, -EIO);
@@ -687,14 +705,16 @@
 
 	dec_local(mdev);
 
-	if (wc.error) drbd_io_error(mdev, TRUE);
+	if (wc.error)
+		drbd_io_error(mdev, TRUE);
 	kfree(bios);
 	return;
 
  free_bios_submit_one_by_one:
 	/* free everything by calling the endio callback directly. */
 	for (i = 0; i < nr_elements; i++) {
-		if (bios[i] == NULL) break;
+		if (bios[i] == NULL)
+			break;
 		bios[i]->bi_size = 0;
 		atodb_endio(bios[i], MD_HARDSECT, 0);
 	}
@@ -705,7 +725,8 @@
 
 	for (i = 0; i < mdev->act_log->nr_elements; i++) {
 		enr = lc_entry(mdev->act_log, i)->lc_number;
-		if (enr == LC_FREE) continue;
+		if (enr == LC_FREE)
+			continue;
 		/* Really slow: if we have al-extents 16..19 active,
 		 * sector 4 will be written four times! Synchronous! */
 		drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT );
@@ -731,7 +752,8 @@
 
 	for (i = 0; i < mdev->act_log->nr_elements; i++) {
 		enr = lc_entry(mdev->act_log, i)->lc_number;
-		if (enr == LC_FREE) continue;
+		if (enr == LC_FREE)
+			continue;
 		add += drbd_bm_ALe_set_all(mdev, enr);
 	}
 
@@ -770,7 +792,8 @@
 
 	for (i = 0; i < mdev->act_log->nr_elements; i++) {
 		al_ext = lc_entry(mdev->act_log, i);
-		if (al_ext->lc_number == LC_FREE) continue;
+		if (al_ext->lc_number == LC_FREE)
+			continue;
 		wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
 	}
 
@@ -943,7 +966,8 @@
 		    (unsigned long long)sector, size, sbnr, ebnr);
 	    );
 
-	if (sbnr > ebnr) return;
+	if (sbnr > ebnr)
+		return;
 
 	/*
 	 * ok, (capacity & 7) != 0 sometimes, but who cares...
@@ -974,7 +998,8 @@
 		wake_up = 1;
 	}
 	spin_unlock_irqrestore(&mdev->al_lock, flags);
-	if (wake_up) wake_up(&mdev->al_wait);
+	if (wake_up)
+		wake_up(&mdev->al_wait);
 }
 
 /*
@@ -1059,12 +1084,14 @@
 			lc_changed(mdev->resync, (struct lc_element *)bm_ext);
 			wakeup = 1;
 		}
-		if (bm_ext->lce.refcnt == 1) mdev->resync_locked++;
+		if (bm_ext->lce.refcnt == 1)
+			mdev->resync_locked++;
 		set_bit(BME_NO_WRITES, &bm_ext->flags);
 	}
 	rs_flags = mdev->resync->flags;
 	spin_unlock_irq(&mdev->al_lock);
-	if (wakeup) wake_up(&mdev->al_wait);
+	if (wakeup)
+		wake_up(&mdev->al_wait);
 
 	if (!bm_ext) {
 		if (rs_flags & LC_STARVING)
@@ -1086,7 +1113,8 @@
 	else {
 		al_ext = lc_find(mdev->act_log, enr);
 		if (al_ext) {
-			if (al_ext->refcnt) rv = 1;
+			if (al_ext->refcnt)
+				rv = 1;
 		}
 	}
 	spin_unlock_irq(&mdev->al_lock);
@@ -1122,7 +1150,8 @@
 
 	sig = wait_event_interruptible( mdev->al_wait,
 			(bm_ext = _bme_get(mdev, enr)) );
-	if (sig) return 0;
+	if (sig)
+		return 0;
 
 	if (test_bit(BME_LOCKED, &bm_ext->flags)) return 1;
 
@@ -1262,7 +1291,8 @@
 	MTRACE(TraceTypeResync, TraceLvlAll,
 		INFO("need to try again for %u\n", enr);
 	);
-	if (bm_ext) mdev->resync_wenr = enr;
+	if (bm_ext)
+		mdev->resync_wenr = enr;
 	spin_unlock_irq(&mdev->al_lock);
 	return -EAGAIN;
 }
@@ -1323,7 +1353,8 @@
 		/* ok, ->resync is there. */
 		for (i = 0; i < mdev->resync->nr_elements; i++) {
 			bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
-			if (bm_ext->lce.lc_number == LC_FREE) continue;
+			if (bm_ext->lce.lc_number == LC_FREE)
+				continue;
 			bm_ext->lce.refcnt = 0; /* Rude but ok. */
 			bm_ext->rs_left = 0;
 			clear_bit(BME_LOCKED, &bm_ext->flags);
@@ -1360,7 +1391,8 @@
 		/* ok, ->resync is there. */
 		for (i = 0; i < mdev->resync->nr_elements; i++) {
 			bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
-			if (bm_ext->lce.lc_number == LC_FREE) continue;
+			if (bm_ext->lce.lc_number == LC_FREE)
+				continue;
 			if (bm_ext->lce.lc_number == mdev->resync_wenr) {
 				INFO("dropping %u in drbd_rs_del_all, aparently"
 				     " got 'synced' by application io\n",
@@ -1433,7 +1465,8 @@
 		ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
 	sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
 
-	if (sbnr > ebnr) return;
+	if (sbnr > ebnr)
+		return;
 
 	/*
 	 * ok, (capacity & 7) != 0 sometimes, but who cares...
@@ -1456,5 +1489,6 @@
 		wake_up = 1;
 	}
 	spin_unlock_irq(&mdev->al_lock);
-	if (wake_up) wake_up(&mdev->al_wait);
+	if (wake_up)
+		wake_up(&mdev->al_wait);
 }

Modified: branches/drbd-8.1/drbd/drbd_bitmap.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_bitmap.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_bitmap.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -444,7 +444,8 @@
 	unsigned long word, bits;
 	size_t n = number;
 
-	if (number == 0) return;
+	if (number == 0)
+		return;
 	ERR_IF(!b) return;
 	ERR_IF(!b->bm) return;
 	WARN_ON(offset        >= b->bm_words);
@@ -482,7 +483,8 @@
 	unsigned long word, bits;
 	size_t n = number;
 
-	if (number == 0) return;
+	if (number == 0)
+		return;
 	ERR_IF(!b) return;
 	ERR_IF(!b->bm) return;
 	WARN_ON(offset        >= b->bm_words);
@@ -518,7 +520,8 @@
 	struct drbd_bitmap *b = mdev->bitmap;
 	unsigned long *bm;
 
-	if (number == 0) return;
+	if (number == 0)
+		return;
 	ERR_IF(!b) return;
 	ERR_IF(!b->bm) return;
 	if ( (offset        >= b->bm_words) ||
@@ -712,7 +715,8 @@
 	 */
 	mdev->bitmap = NULL;
 
-	if (rw == WRITE)	bm_cpu_to_lel(b);
+	if (rw == WRITE)
+		bm_cpu_to_lel(b);
 
 	now = jiffies;
 	atomic_set(&b->bm_async_io, num_pages);

Modified: branches/drbd-8.1/drbd/drbd_int.h
===================================================================
--- branches/drbd-8.1/drbd/drbd_int.h	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_int.h	2007-07-27 11:35:53 UTC (rev 3002)
@@ -183,11 +183,12 @@
 # define D_ASSERT(exp)	if (!(exp)) \
 	 ERR("ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
 #endif
-#define ERR_IF(exp) if (({ \
-	int _b = (exp) != 0; \
-	if (_b) ERR("%s: (" #exp ") in %s:%d\n", \
-		__func__, __FILE__, __LINE__); \
-	 _b; \
+#define ERR_IF(exp) if (({				\
+	int _b = (exp) != 0;				\
+	if (_b)						\
+		ERR("%s: (" #exp ") in %s:%d\n",	\
+		__func__, __FILE__, __LINE__);		\
+	 _b;						\
 	}))
 
 /* Defines to control fault insertion */
@@ -1770,7 +1771,8 @@
 
 	atomic_inc(&mdev->net_cnt);
 	have_net_conf = mdev->state.conn >= Unconnected;
-	if (!have_net_conf) dec_net(mdev);
+	if (!have_net_conf)
+		dec_net(mdev);
 	return have_net_conf;
 }
 
@@ -1822,9 +1824,12 @@
 static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
 {
 	int mxb = drbd_get_max_buffers(mdev);
-	if (mdev->state.susp) return 0;
-	if (mdev->state.conn == WFBitMapS) return 0;
-	if (mdev->state.conn == WFBitMapT) return 0;
+	if (mdev->state.susp)
+		return 0;
+	if (mdev->state.conn == WFBitMapS)
+		return 0;
+	if (mdev->state.conn == WFBitMapT)
+		return 0;
 	/* since some older kernels don't have atomic_add_unless,
 	 * and we are within the spinlock anyways, we have this workaround.  */
 	if (atomic_read(&mdev->ap_bio_cnt) > mxb) return 0;
@@ -1866,7 +1871,8 @@
 	int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
 
 	D_ASSERT(ap_bio >= 0);
-	if (ap_bio < mxb) wake_up(&mdev->misc_wait);
+	if (ap_bio < mxb)
+		wake_up(&mdev->misc_wait);
 }
 
 static inline int seq_cmp(u32 a, u32 b)
@@ -1892,7 +1898,8 @@
 	m = seq_max(mdev->peer_seq, new_seq);
 	mdev->peer_seq = m;
 	spin_unlock(&mdev->peer_seq_lock);
-	if (m == new_seq) wake_up(&mdev->seq_wait);
+	if (m == new_seq)
+		wake_up(&mdev->seq_wait);
 }
 
 static inline int drbd_queue_order_type(struct drbd_conf *mdev)

Modified: branches/drbd-8.1/drbd/drbd_main.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_main.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_main.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -160,7 +160,8 @@
 	struct drbd_barrier *b;
 
 	b = kmalloc(sizeof(struct drbd_barrier), GFP_KERNEL);
-	if (!b) return 0;
+	if (!b)
+		return 0;
 	INIT_LIST_HEAD(&b->requests);
 	INIT_LIST_HEAD(&b->w.list);
 	b->next = 0;
@@ -335,7 +336,8 @@
 				ChgStateHard|ScheduleAfter);
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	if (!send) return ok;
+	if (!send)
+		return ok;
 
 	ok = drbd_send_state(mdev);
 	if (ok)
@@ -461,7 +463,8 @@
 		spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 		if (rv < SS_Success) {
-			if (f & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
+			if (f & ChgStateVerbose)
+				print_st_err(mdev, os, ns, rv);
 			return rv;
 		}
 
@@ -469,7 +472,8 @@
 		if ( !drbd_send_state_req(mdev, mask, val) ) {
 			drbd_state_unlock(mdev);
 			rv = SS_CW_FailedByPeer;
-			if (f & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
+			if (f & ChgStateVerbose)
+				print_st_err(mdev, os, ns, rv);
 			return rv;
 		}
 
@@ -479,7 +483,8 @@
 		if (rv < SS_Success) {
 			/* nearly dead code. */
 			drbd_state_unlock(mdev);
-			if (f & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
+			if (f & ChgStateVerbose)
+				print_st_err(mdev, os, ns, rv);
 			return rv;
 		}
 		spin_lock_irqsave(&mdev->req_lock, flags);
@@ -559,7 +564,8 @@
 		dec_net(mdev);
 	}
 
-	if (rv <= 0) /* already found a reason to abort */;
+	if (rv <= 0)
+		/* already found a reason to abort */;
 	else if (ns.role == Secondary && mdev->open_cnt)
 		rv = SS_DeviceInUse;
 
@@ -716,14 +722,19 @@
 			ns.susp = 1;
 
 	if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
-		if (ns.conn == SyncSource) ns.conn = PausedSyncS;
-		if (ns.conn == SyncTarget) ns.conn = PausedSyncT;
+		if (ns.conn == SyncSource)
+			ns.conn = PausedSyncS;
+		if (ns.conn == SyncTarget)
+			ns.conn = PausedSyncT;
 	} else {
-		if (ns.conn == PausedSyncS) ns.conn = SyncSource;
-		if (ns.conn == PausedSyncT) ns.conn = SyncTarget;
+		if (ns.conn == PausedSyncS)
+			ns.conn = SyncSource;
+		if (ns.conn == PausedSyncT)
+			ns.conn = SyncTarget;
 	}
 
-	if (ns.i == os.i) return SS_NothingToDo;
+	if (ns.i == os.i)
+		return SS_NothingToDo;
 
 	if ( !(flags & ChgStateHard) ) {
 		/*  pre-state-change checks ; only look at ns  */
@@ -747,7 +758,8 @@
 	}
 
 	if (rv < SS_Success) {
-		if (flags & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
+		if (flags & ChgStateVerbose)
+			print_st_err(mdev, os, ns, rv);
 		return rv;
 	}
 
@@ -859,13 +871,19 @@
 
 		if (test_bit(CRASHED_PRIMARY, &mdev->flags) ||
 		    mdev->state.role == Primary ||
-		    ( mdev->state.pdsk < Inconsistent &&
-		      mdev->state.peer == Primary ) )  mdf |= MDF_PrimaryInd;
-		if (mdev->state.conn > WFReportParams) mdf |= MDF_ConnectedInd;
-		if (mdev->state.disk > Inconsistent)   mdf |= MDF_Consistent;
-		if (mdev->state.disk > Outdated)       mdf |= MDF_WasUpToDate;
+		    (mdev->state.pdsk < Inconsistent &&
+		      mdev->state.peer == Primary))
+			mdf |= MDF_PrimaryInd;
+		if (mdev->state.conn > WFReportParams)
+			mdf |= MDF_ConnectedInd;
+		if (mdev->state.disk > Inconsistent)
+			mdf |= MDF_Consistent;
+		if (mdev->state.disk > Outdated)
+			mdf |= MDF_WasUpToDate;
 		if (mdev->state.pdsk <= Outdated &&
-		    mdev->state.pdsk >= Inconsistent)  mdf |= MDF_PeerOutDated;
+		    mdev->state.pdsk >= Inconsistent)
+			mdf |= MDF_PeerOutDated;
+
 		if (mdf != mdev->bc->md.flags) {
 			mdev->bc->md.flags = mdf;
 			drbd_md_mark_dirty(mdev);
@@ -1154,7 +1172,8 @@
 
 	if (thi->t_state == None) {
 		spin_unlock(&thi->t_lock);
-		if (restart) drbd_thread_start(thi);
+		if (restart)
+			drbd_thread_start(thi);
 		return;
 	}
 
@@ -1167,7 +1186,8 @@
 		thi->t_state = ns;
 		smp_mb();
 		if (thi->task != current) {
-			if (wait) init_completion(&thi->startstop);
+			if (wait)
+				init_completion(&thi->startstop);
 			force_sig(DRBD_SIGKILL, thi->task);
 		} else
 			D_ASSERT(!wait);
@@ -1491,7 +1511,8 @@
 	p.blksize  = blksize;
 	p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
 
-	if (!mdev->meta.socket || mdev->state.conn < Connected) return FALSE;
+	if (!mdev->meta.socket || mdev->state.conn < Connected)
+		return FALSE;
 	ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
 				(struct Drbd_Header *)&p, sizeof(p));
 	return ok;
@@ -1793,7 +1814,8 @@
 	struct msghdr msg;
 	int rv, sent = 0;
 
-	if (!sock) return -1000;
+	if (!sock)
+		return -1000;
 
 	/* THINK  if (signal_pending) return ... ? */
 
@@ -1854,7 +1876,8 @@
 			flush_signals(current);
 			rv = 0;
 		}
-		if (rv < 0) break;
+		if (rv < 0)
+			break;
 		sent += rv;
 		iov.iov_base += rv;
 		iov.iov_len  -= rv;
@@ -1884,7 +1907,8 @@
 	int rv = 0;
 
 	mdev = minor_to_mdev(MINOR(inode->i_rdev));
-	if (!mdev) return -ENODEV;
+	if (!mdev)
+		return -ENODEV;
 
 	spin_lock_irqsave(&mdev->req_lock, flags);
 	/* to have a stable mdev->state.role
@@ -1897,7 +1921,8 @@
 			rv = -EMEDIUMTYPE;
 	}
 
-	if (!rv) mdev->open_cnt++;
+	if (!rv)
+		mdev->open_cnt++;
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 	return rv;
@@ -1909,7 +1934,8 @@
 	struct drbd_conf *mdev;
 
 	mdev = minor_to_mdev(MINOR(inode->i_rdev));
-	if (!mdev) return -ENODEV;
+	if (!mdev)
+		return -ENODEV;
 
 	/*
 	printk(KERN_ERR DEVICE_NAME ": close(inode=%p,file=%p)"
@@ -1952,7 +1978,8 @@
 	}
 	spin_unlock_irq(&mdev->req_lock);
 
-	if (mdev->state.disk >= Inconsistent) drbd_kick_lo(mdev);
+	if (mdev->state.disk >= Inconsistent)
+		drbd_kick_lo(mdev);
 }
 
 void drbd_set_defaults(struct drbd_conf *mdev)
@@ -2130,10 +2157,14 @@
 
 	/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
 
-	if (drbd_ee_mempool) mempool_destroy(drbd_ee_mempool);
-	if (drbd_request_mempool) mempool_destroy(drbd_request_mempool);
-	if (drbd_ee_cache) kmem_cache_destroy(drbd_ee_cache);
-	if (drbd_request_cache) kmem_cache_destroy(drbd_request_cache);
+	if (drbd_ee_mempool)
+		mempool_destroy(drbd_ee_mempool);
+	if (drbd_request_mempool)
+		mempool_destroy(drbd_request_mempool);
+	if (drbd_ee_cache)
+		kmem_cache_destroy(drbd_ee_cache);
+	if (drbd_request_cache)
+		kmem_cache_destroy(drbd_request_cache);
 
 	drbd_ee_mempool      = NULL;
 	drbd_request_mempool = NULL;
@@ -2184,7 +2215,8 @@
 
 	for (i = 0; i < number; i++) {
 		page = alloc_page(GFP_HIGHUSER);
-		if (!page) goto Enomem;
+		if (!page)
+			goto Enomem;
 		set_page_private(page, (unsigned long)drbd_pp_pool);
 		drbd_pp_pool = page;
 	}
@@ -2229,7 +2261,8 @@
 			struct gendisk  **disk = &mdev->vdisk;
 			request_queue_t **q    = &mdev->rq_queue;
 
-			if (!mdev) continue;
+			if (!mdev)
+				continue;
 			drbd_free_resources(mdev);
 
 			if (*disk) {
@@ -2237,30 +2270,39 @@
 				put_disk(*disk);
 				*disk = NULL;
 			}
-			if (*q) blk_put_queue(*q);
+			if (*q)
+				blk_put_queue(*q);
 			*q = NULL;
 
 			D_ASSERT(mdev->open_cnt == 0);
-			if (mdev->this_bdev) bdput(mdev->this_bdev);
+			if (mdev->this_bdev)
+				bdput(mdev->this_bdev);
 
 			tl_cleanup(mdev);
-			if (mdev->bitmap) drbd_bm_cleanup(mdev);
-			if (mdev->resync) lc_free(mdev->resync);
+			if (mdev->bitmap)
+				drbd_bm_cleanup(mdev);
+			if (mdev->resync)
+				lc_free(mdev->resync);
 
 			rr = drbd_release_ee(mdev, &mdev->active_ee);
-			if (rr) ERR("%d EEs in active list found!\n", rr);
+			if (rr)
+				ERR("%d EEs in active list found!\n", rr);
 
 			rr = drbd_release_ee(mdev, &mdev->sync_ee);
-			if (rr) ERR("%d EEs in sync list found!\n", rr);
+			if (rr)
+				ERR("%d EEs in sync list found!\n", rr);
 
 			rr = drbd_release_ee(mdev, &mdev->read_ee);
-			if (rr) ERR("%d EEs in read list found!\n", rr);
+			if (rr)
+				ERR("%d EEs in read list found!\n", rr);
 
 			rr = drbd_release_ee(mdev, &mdev->done_ee);
-			if (rr) ERR("%d EEs in done list found!\n", rr);
+			if (rr)
+				ERR("%d EEs in done list found!\n", rr);
 
 			rr = drbd_release_ee(mdev, &mdev->net_ee);
-			if (rr) ERR("%d EEs in net list found!\n", rr);
+			if (rr)
+				ERR("%d EEs in net list found!\n", rr);
 
 			ERR_IF (!list_empty(&mdev->data.work.q)) {
 				struct list_head *lp;
@@ -2310,20 +2352,23 @@
 	request_queue_t *q;
 
 	mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
-	if (!mdev) goto Enomem;
+	if (!mdev)
+		goto Enomem;
 
 	mdev->minor = minor;
 
 	drbd_init_set_defaults(mdev);
 
 	q = blk_alloc_queue(GFP_KERNEL);
-	if (!q) goto Enomem;
+	if (!q)
+		goto Enomem;
 	mdev->rq_queue = q;
 	q->queuedata   = mdev;
 	q->max_segment_size = DRBD_MAX_SEGMENT_SIZE;
 
 	disk = alloc_disk(1);
-	if (!disk) goto Enomem;
+	if (!disk)
+		goto Enomem;
 	mdev->vdisk = disk;
 
 	set_disk_ro( disk, TRUE );
@@ -2347,21 +2392,24 @@
 	q->unplug_fn = drbd_unplug_fn;
 
 	mdev->md_io_page = alloc_page(GFP_KERNEL);
-	if (!mdev->md_io_page) goto Enomem;
+	if (!mdev->md_io_page)
+		goto Enomem;
 
 	if (drbd_bm_init(mdev)) goto Enomem;
 	/* no need to lock access, we are still initializing the module. */
 	if (!tl_init(mdev)) goto Enomem;
 
 	mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
-	if (!mdev->app_reads_hash) goto Enomem;
+	if (!mdev->app_reads_hash)
+		goto Enomem;
 
 	return mdev;
 
  Enomem:
 	if (mdev) {
-		if (mdev->app_reads_hash) kfree(mdev->app_reads_hash);
-		if (mdev->md_io_page) __free_page(mdev->md_io_page);
+		kfree(mdev->app_reads_hash);
+		if (mdev->md_io_page)
+			__free_page(mdev->md_io_page);
 		kfree(mdev);
 	}
 	return NULL;
@@ -2395,7 +2443,8 @@
 	}
 
 	err = drbd_nl_init();
-	if (err) return err;
+	if (err)
+		return err;
 
 	err = register_blkdev(DRBD_MAJOR, DEVICE_NAME);
 	if (err) {
@@ -2417,7 +2466,8 @@
 	drbd_proc = NULL; /* play safe for drbd_cleanup */
 	minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
 				GFP_KERNEL);
-	if (!minor_table) goto Enomem;
+	if (!minor_table)
+		goto Enomem;
 
 	err = drbd_create_mempools();
 	if (err)
@@ -2452,7 +2502,8 @@
 
 Enomem:
 	drbd_cleanup();
-	if (err == -ENOMEM) /* currently always the case */
+	if (err == -ENOMEM)
+		/* currently always the case */
 		printk(KERN_ERR DEVICE_NAME ": ran out of memory\n");
 	else
 		printk(KERN_ERR DEVICE_NAME ": initialization failure\n");
@@ -2461,7 +2512,8 @@
 
 void drbd_free_bc(struct drbd_backing_dev *bc)
 {
-	if (bc == NULL) return;
+	if (bc == NULL)
+		return;
 
 	bd_release(bc->backing_bdev);
 	bd_release(bc->md_bdev);
@@ -2731,7 +2783,8 @@
 
 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val)
 {
-	if (mdev->bc->md.uuid[Bitmap] == 0 && val == 0) return;
+	if (mdev->bc->md.uuid[Bitmap] == 0 && val == 0)
+		return;
 
 	if (val == 0) {
 		drbd_uuid_move_history(mdev);
@@ -2743,7 +2796,8 @@
 		       drbd_print_uuid(mdev, Bitmap);
 			);
 	} else {
-		if (mdev->bc->md.uuid[Bitmap]) WARN("bm UUID already set");
+		if (mdev->bc->md.uuid[Bitmap])
+			WARN("bm UUID already set");
 
 		mdev->bc->md.uuid[Bitmap] = val;
 		mdev->bc->md.uuid[Bitmap] &= ~((u64)1);
@@ -3025,15 +3079,16 @@
 	}
 }
 
-#define PSM(A) \
-do { \
-	if (mask.A) { \
-		int i = snprintf(p, len, " " #A "( %s )", \
-				A##s_to_name(val.A)); \
-		if (i >= len) return op; \
-		p += i; \
-		len -= i; \
-	} \
+#define PSM(A)							\
+do {								\
+	if (mask.A) {						\
+		int i = snprintf(p, len, " " #A "( %s )",	\
+				A##s_to_name(val.A));		\
+		if (i >= len)					\
+			return op;				\
+		p += i;						\
+		len -= i;					\
+	}							\
 } while (0)
 
 char *dump_st(char *p, int len, union drbd_state_t mask, union drbd_state_t val)

Modified: branches/drbd-8.1/drbd/drbd_nl.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_nl.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_nl.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -199,7 +199,8 @@
 
 	D_ASSERT( fp > DontCare );
 
-	if (fp == Stonith) drbd_request_state(mdev, NS(susp, 1));
+	if (fp == Stonith)
+		drbd_request_state(mdev, NS(susp, 1));
 
 	r = drbd_khelper(mdev, "outdate-peer");
 
@@ -276,7 +277,8 @@
 			continue;
 		}
 
-		if (r == SS_NothingToDo) goto fail;
+		if (r == SS_NothingToDo)
+			goto fail;
 		if (r == SS_PrimaryNOP) {
 			nps = drbd_try_outdate_peer(mdev);
 
@@ -300,12 +302,14 @@
 		}
 		if (r < SS_Success) {
 			r = drbd_request_state(mdev, mask, val);
-			if (r < SS_Success) goto fail;
+			if (r < SS_Success)
+				goto fail;
 		}
 		break;
 	}
 
-	if (forced) WARN("Forced to conisder local data as UpToDate!\n");
+	if (forced)
+		WARN("Forced to conisder local data as UpToDate!\n");
 
 	fsync_bdev(mdev->this_bdev);
 
@@ -345,14 +349,16 @@
 		}
 	}
 
-	if ((new_role == Secondary) && inc_local(mdev) ) {
+	if ((new_role == Secondary) && inc_local(mdev) )
+	{
 		drbd_al_to_on_disk_bm(mdev);
 		dec_local(mdev);
 	}
 
 	if (mdev->state.conn >= WFReportParams) {
 		/* if this was forced, we should consider sync */
-		if (forced) drbd_send_uuids(mdev);
+		if (forced)
+			drbd_send_uuids(mdev);
 		drbd_send_state(mdev);
 	}
 
@@ -497,7 +503,8 @@
 		INFO("size = %s (%llu KB)\n", ppsize(ppb, size>>1),
 		     (unsigned long long)size>>1);
 	}
-	if (rv < 0) goto out;
+	if (rv < 0)
+		goto out;
 
 	la_size_changed = (la_size != mdev->bc->md.la_size_sect);
 
@@ -542,11 +549,15 @@
 	} else {
 		if (la_size) {
 			size = la_size;
-			if (m_size && m_size < size) size = m_size;
-			if (p_size && p_size < size) size = p_size;
+			if (m_size && m_size < size)
+				size = m_size;
+			if (p_size && p_size < size)
+				size = p_size;
 		} else {
-			if (m_size) size = m_size;
-			if (p_size) size = p_size;
+			if (m_size)
+				size = m_size;
+			if (p_size)
+				size = p_size;
 		}
 	}
 
@@ -612,7 +623,8 @@
 		lc_free(n);
 		return -EBUSY;
 	} else {
-		if (t) lc_free(t);
+		if (t)
+			lc_free(t);
 	}
 	drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
 	return 0;
@@ -651,7 +663,8 @@
 	 * should be
 	 * t->max_segment_size = min_not_zero(...,...)
 	 * workaround here: */
-	if (q->max_segment_size == 0) q->max_segment_size = max_seg_s;
+	if (q->max_segment_size == 0)
+		q->max_segment_size = max_seg_s;
 
 	MTRACE(TraceTypeRq, TraceLvlSummary,
 	       DUMPI(q->max_sectors);
@@ -958,7 +971,8 @@
 	rv = _drbd_set_state(mdev, ns, ChgStateVerbose);
 	ns = mdev->state;
 	spin_unlock_irq(&mdev->req_lock);
-	if (rv == SS_Success) after_state_ch(mdev, os, ns, ChgStateVerbose);
+	if (rv == SS_Success)
+		after_state_ch(mdev, os, ns, ChgStateVerbose);
 
 	if (rv < SS_Success)
 		goto unlock_bm;
@@ -984,16 +998,21 @@
 	drbd_force_state(mdev, NS(disk, Diskless));
 	drbd_md_sync(mdev);
  release_bdev2_fail:
-	if (nbc) bd_release(nbc->md_bdev);
+	if (nbc)
+		bd_release(nbc->md_bdev);
  release_bdev_fail:
-	if (nbc) bd_release(nbc->backing_bdev);
+	if (nbc)
+		bd_release(nbc->backing_bdev);
  fail:
 	if (nbc) {
-		if (nbc->lo_file) fput(nbc->lo_file);
-		if (nbc->md_file) fput(nbc->md_file);
+		if (nbc->lo_file)
+			fput(nbc->lo_file);
+		if (nbc->md_file)
+			fput(nbc->md_file);
 		kfree(nbc);
 	}
-	if (resync_lru) lc_free(resync_lru);
+	if (resync_lru)
+		lc_free(resync_lru);
 
 	reply->ret_code = retcode;
 	return 0;
@@ -1079,7 +1098,8 @@
 	retcode = NoError;
 	for (i = 0; i < minor_count; i++) {
 		odev = minor_to_mdev(i);
-		if (!odev || odev == mdev) continue;
+		if (!odev || odev == mdev)
+			continue;
 		if ( inc_net(odev)) {
 			if ( M_ADDR(new_conf) == M_ADDR(odev->net_conf) &&
 			    M_PORT(new_conf) == M_PORT(odev->net_conf) )
@@ -1090,7 +1110,8 @@
 				retcode = OAAlreadyInUse;
 
 			dec_net(odev);
-			if (retcode != NoError) goto fail;
+			if (retcode != NoError)
+				goto fail;
 		}
 	}
 #undef M_ADDR
@@ -1167,13 +1188,13 @@
 	mdev->recv_cnt = 0;
 
 	if (new_tl_hash) {
-		if (mdev->tl_hash) kfree(mdev->tl_hash);
+		kfree(mdev->tl_hash);
 		mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
 		mdev->tl_hash = new_tl_hash;
 	}
 
 	if (new_ee_hash) {
-		if (mdev->ee_hash) kfree(mdev->ee_hash);
+		kfree(mdev->ee_hash);
 		mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
 		mdev->ee_hash = new_ee_hash;
 	}
@@ -1188,10 +1209,10 @@
 	return 0;
 
 fail:
-	if (tfm) crypto_free_hash(tfm);
-	if (new_tl_hash) kfree(new_tl_hash);
-	if (new_ee_hash) kfree(new_ee_hash);
-	if (new_conf) kfree(new_conf);
+	crypto_free_hash(tfm);
+	kfree(new_tl_hash);
+	kfree(new_ee_hash);
+	kfree(new_conf);
 
 	reply->ret_code = retcode;
 	return 0;
@@ -1205,8 +1226,10 @@
 	/* silently. */
 	retcode = _drbd_request_state(mdev, NS(conn, Disconnecting), 0);
 
-	if (retcode == SS_NothingToDo) goto done;
-	else if (retcode == SS_AlreadyStandAlone) goto done;
+	if (retcode == SS_NothingToDo)
+		goto done;
+	else if (retcode == SS_AlreadyStandAlone)
+		goto done;
 	else if (retcode == SS_PrimaryNOP) {
 		/* Our statche checking code wants to see the peer outdated. */
 		retcode = drbd_request_state(mdev, NS2(conn, Disconnecting,
@@ -1224,7 +1247,8 @@
 						NS(conn, StandAlone));
 	}
 
-	if (retcode < SS_Success) goto fail;
+	if (retcode < SS_Success)
+		goto fail;
 
 	if ( wait_event_interruptible( mdev->misc_wait,
 				      mdev->state.conn == StandAlone) ) {
@@ -1316,7 +1340,8 @@
 				retcode = SyncAfterCycle;
 				goto fail;
 			}
-			if (odev->sync_conf.after == -1) break; /* no cycles. */
+			if (odev->sync_conf.after == -1)
+				break; /* no cycles. */
 			odev = minor_to_mdev(odev->sync_conf.after);
 		}
 	}
@@ -1536,8 +1561,9 @@
 		spin_unlock_irq(&drbd_pp_lock);
 
 		if (mdev) {
-			if (mdev->app_reads_hash) kfree(mdev->app_reads_hash);
-			if (mdev->md_io_page) __free_page(mdev->md_io_page);
+			kfree(mdev->app_reads_hash);
+			if (mdev->md_io_page)
+				__free_page(mdev->md_io_page);
 			kfree(mdev);
 			mdev = NULL;
 		}

Modified: branches/drbd-8.1/drbd/drbd_proc.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_proc.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_proc.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -127,7 +127,8 @@
 		return;
 	}
 
-	if (!dt) dt++;
+	if (!dt)
+		dt++;
 	db = mdev->rs_mark_left - rs_left;
 	rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
 
@@ -145,7 +146,8 @@
 	/* mean speed since syncer started
 	 * we do account for PausedSync periods */
 	dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
-	if (dt <= 0) dt = 1;
+	if (dt <= 0)
+		dt = 1;
 	db = mdev->rs_total - rs_left;
 	dbdt = Bit2KB(db/dt);
 	if (dbdt > 1000)

Modified: branches/drbd-8.1/drbd/drbd_receiver.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_receiver.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_receiver.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -109,7 +109,8 @@
 		}
 		spin_unlock_irqrestore(&drbd_pp_lock, flags);
 
-		if (page) break;
+		if (page)
+			break;
 
 		/* hm. pool was empty. try to allocate from kernel.
 		 * don't wait, if none is available, though.
@@ -160,7 +161,8 @@
 
 	atomic_dec(&mdev->pp_in_use);
 
-	if (free_it) __free_page(page);
+	if (free_it)
+		__free_page(page);
 
 	/*
 	 * FIXME
@@ -512,7 +514,8 @@
 
 	for (;;) {
 		rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
-		if (rv == size) break;
+		if (rv == size)
+			break;
 
 		/* Note:
 		 * ECONNRESET	other side closed the connection
@@ -539,7 +542,8 @@
 
 	set_fs(oldfs);
 
-	if (rv != size) drbd_force_state(mdev, NS(conn, BrokenPipe));
+	if (rv != size)
+		drbd_force_state(mdev, NS(conn, BrokenPipe));
 
 	return rv;
 }
@@ -680,7 +684,8 @@
 		for (try = 0;;) {
 			/* 3 tries, this should take less than a second! */
 			s = drbd_try_connect(mdev);
-			if (s || ++try >= 3) break;
+			if (s || ++try >= 3)
+				break;
 			/* give the other side time to call bind() & listen() */
 			set_current_state(TASK_INTERRUPTIBLE);
 			schedule_timeout(HZ / 10);
@@ -707,17 +712,20 @@
 			}
 		}
 
-		if (sock && msock) break;
+		if (sock && msock)
+			break;
 
 		s = drbd_wait_for_connect(mdev);
 		if (s) {
 			switch (drbd_recv_fp(mdev, s)) {
 			case HandShakeS:
-				if (sock) sock_release(sock);
+				if (sock)
+					sock_release(sock);
 				sock = s;
 				break;
 			case HandShakeM:
-				if (msock) sock_release(msock);
+				if (msock)
+					sock_release(msock);
 				msock = s;
 				set_bit(DISCARD_CONCURRENT, &mdev->flags);
 				break;
@@ -727,13 +735,16 @@
 			}
 		}
 
-		if (mdev->state.conn <= Disconnecting) return -1;
+		if (mdev->state.conn <= Disconnecting)
+			return -1;
 		if (signal_pending(current)) {
 			flush_signals(current);
 			smp_rmb();
 			if (get_t_state(&mdev->receiver) == Exiting) {
-				if (sock) sock_release(sock);
-				if (msock) sock_release(msock);
+				if (sock)
+					sock_release(sock);
+				if (msock)
+					sock_release(msock);
 				return -1;
 			}
 		}
@@ -772,7 +783,8 @@
 	D_ASSERT(mdev->asender.task == NULL);
 
 	h = drbd_do_handshake(mdev);
-	if (h <= 0) return h;
+	if (h <= 0)
+		return h;
 
 	if (mdev->cram_hmac_tfm) {
 		if (!drbd_do_auth(mdev)) {
@@ -874,7 +886,8 @@
 	int ds, i, rr;
 
 	e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_KERNEL);
-	if (!e) return 0;
+	if (!e)
+		return 0;
 	bio = e->private_bio;
 	ds = data_size;
 	bio_for_each_segment(bvec, bio, i) {
@@ -994,7 +1007,8 @@
 	struct Tl_epoch_entry *e;
 
 	e = read_in_block(mdev, ID_SYNCER, sector, data_size);
-	if (!e) return FALSE;
+	if (!e)
+		return FALSE;
 
 	dec_rs_pending(mdev);
 
@@ -1057,7 +1071,8 @@
 	 * still no race with drbd_fail_pending_reads */
 	ok = recv_dless_read(mdev, req, sector, data_size);
 
-	if (ok) req_mod(req, data_received, 0);
+	if (ok)
+		req_mod(req, data_received, 0);
 	/* else: nothing. handled from drbd_disconnect...
 	 * I don't think we may complete this just yet
 	 * in case we are "on-disconnect: freeze" */
@@ -1098,7 +1113,8 @@
 		 * verify there are no pending write request to that area.
 		 */
 		ok = recv_resync_read(mdev, sector, data_size);
-		if (!ok) dec_local(mdev);
+		if (!ok)
+			dec_local(mdev);
 	} else {
 		if (DRBD_ratelimit(5*HZ, 5))
 			ERR("Can not write resync data to local disk.\n");
@@ -1373,7 +1389,8 @@
 				}
 			}
 #undef OVERLAPS
-			if (!have_conflict) break;
+			if (!have_conflict)
+				break;
 
 			/* Discard Ack only for the _first_ iteration */
 			if (first && discard && have_unacked) {
@@ -1659,15 +1676,17 @@
 			if (ch_peer == 0) { rv =  1; break; }
 			if (ch_self == 0) { rv = -1; break; }
 		}
-		if (mdev->net_conf->after_sb_0p == DiscardZeroChg) break;
+		if (mdev->net_conf->after_sb_0p == DiscardZeroChg)
+			break;
 	case DiscardLeastChg:
-		if	( ch_self < ch_peer ) rv = -1;
-		else if (ch_self > ch_peer) rv =  1;
-		else /* ( ch_self == ch_peer ) */ {
-			/* Well, then use something else. */
+		if	( ch_self < ch_peer )
+			rv = -1;
+		else if (ch_self > ch_peer)
+			rv =  1;
+		else /* ( ch_self == ch_peer ) */
+		     /* Well, then use something else. */
 			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
 				? -1 : 1;
-		}
 		break;
 	case DiscardLocal:
 		rv = -1;
@@ -1698,8 +1717,10 @@
 		break;
 	case Consensus:
 		hg = drbd_asb_recover_0p(mdev);
-		if (hg == -1 && mdev->state.role == Secondary) rv = hg;
-		if (hg == 1  && mdev->state.role == Primary)   rv = hg;
+		if (hg == -1 && mdev->state.role == Secondary)
+			rv = hg;
+		if (hg == 1  && mdev->state.role == Primary)
+			rv = hg;
 		break;
 	case Violently:
 		rv = drbd_asb_recover_0p(mdev);
@@ -1826,23 +1847,27 @@
 
 	*rule_nr = 5;
 	peer = mdev->p_uuid[Bitmap] & ~((u64)1);
-	if (self == peer) return -1;
+	if (self == peer)
+		return -1;
 
 	*rule_nr = 6;
 	for ( i = History_start ; i <= History_end ; i++ ) {
 		peer = mdev->p_uuid[i] & ~((u64)1);
-		if (self == peer) return -2;
+		if (self == peer)
+			return -2;
 	}
 
 	*rule_nr = 7;
 	self = mdev->bc->md.uuid[Bitmap] & ~((u64)1);
 	peer = mdev->p_uuid[Current] & ~((u64)1);
-	if (self == peer) return 1;
+	if (self == peer)
+		return 1;
 
 	*rule_nr = 8;
 	for ( i = History_start ; i <= History_end ; i++ ) {
 		self = mdev->bc->md.uuid[i] & ~((u64)1);
-		if (self == peer) return 2;
+		if (self == peer)
+			return 2;
 	}
 
 	*rule_nr = 9;
@@ -1855,7 +1880,8 @@
 		self = mdev->p_uuid[i] & ~((u64)1);
 		for ( j = History_start ; j <= History_end ; j++ ) {
 			peer = mdev->p_uuid[j] & ~((u64)1);
-			if (self == peer) return -100;
+			if (self == peer)
+				return -100;
 		}
 	}
 
@@ -1873,7 +1899,8 @@
 	enum drbd_disk_state mydisk;
 
 	mydisk = mdev->state.disk;
-	if (mydisk == Negotiating) mydisk = mdev->new_state_tmp.disk;
+	if (mydisk == Negotiating)
+		mydisk = mdev->new_state_tmp.disk;
 
 	hg = drbd_uuid_compare(mdev, &rule_nr);
 
@@ -1894,7 +1921,8 @@
 	    (peer_disk == Inconsistent && mydisk > Inconsistent) )  {
 		int f = (hg == -100) || abs(hg) == 2;
 		hg = mydisk > Inconsistent ? 1 : -1;
-		if (f) hg = hg*2;
+		if (f)
+			hg = hg*2;
 		INFO("Becoming sync %s due to disk states.\n",
 		     hg > 0 ? "source" : "target");
 	}
@@ -2013,7 +2041,8 @@
 	    self == DiscardRemote || self == DiscardLocal ) return 1;
 
 	/* everything else is valid if they are equal on both sides. */
-	if (peer == self) return 0;
+	if (peer == self)
+		return 0;
 
 	/* everything es is invalid. */
 	return 1;
@@ -2100,7 +2129,8 @@
 	const char *s, sector_t a, sector_t b)
 {
 	sector_t d;
-	if (a == 0 || b == 0) return;
+	if (a == 0 || b == 0)
+		return;
 	d = (a > b) ? (a - b) : (b - a);
 	if ( d > (a>>3) || d > (b>>3))
 		WARN("Considerable difference in %s: %llus vs. %llus\n", s,
@@ -2184,7 +2214,8 @@
 				mdev->state.peer, mdev->state.pdsk);
 		dec_local(mdev);
 
-		if (nconn == conn_mask) return FALSE;
+		if (nconn == conn_mask)
+			return FALSE;
 
 		if (drbd_request_state(mdev, NS(conn, nconn)) < SS_Success) {
 			drbd_force_state(mdev, NS(conn, Disconnecting));
@@ -2228,7 +2259,7 @@
 	for (i = Current; i < EXT_UUID_SIZE; i++)
 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
 
-	if (mdev->p_uuid) kfree(mdev->p_uuid);
+	kfree(mdev->p_uuid);
 	mdev->p_uuid = p_uuid;
 
 	return TRUE;
@@ -2310,7 +2341,8 @@
 	oconn = nconn = mdev->state.conn;
 	spin_unlock_irq(&mdev->req_lock);
 
-	if (nconn == WFReportParams) nconn = Connected;
+	if (nconn == WFReportParams)
+		nconn = Connected;
 
 	if (mdev->p_uuid && oconn <= Connected &&
 	    peer_state.disk >= Negotiating &&
@@ -2319,11 +2351,13 @@
 				peer_state.role, peer_state.disk);
 		dec_local(mdev);
 
-		if (nconn == conn_mask) return FALSE;
+		if (nconn == conn_mask)
+			return FALSE;
 	}
 
 	spin_lock_irq(&mdev->req_lock);
-	if (mdev->state.conn != oconn) goto retry;
+	if (mdev->state.conn != oconn)
+		goto retry;
 	os = mdev->state;
 	ns.i = mdev->state.i;
 	ns.conn = nconn;
@@ -2331,9 +2365,11 @@
 	ns.pdsk = peer_state.disk;
 	ns.peer_isp = ( peer_state.aftr_isp | peer_state.user_isp );
 	if ((nconn == Connected || nconn == WFBitMapS) &&
-	   ns.disk == Negotiating ) ns.disk = UpToDate;
+	   ns.disk == Negotiating )
+		ns.disk = UpToDate;
 	if ((nconn == Connected || nconn == WFBitMapT) &&
-	   ns.pdsk == Negotiating ) ns.pdsk = UpToDate;
+	   ns.pdsk == Negotiating )
+		ns.pdsk = UpToDate;
 	rv = _drbd_set_state(mdev, ns, ChgStateVerbose | ChgStateHard);
 	spin_unlock_irq(&mdev->req_lock);
 
@@ -2413,7 +2449,8 @@
 		num_words = min_t(size_t, BM_PACKET_WORDS, bm_words-bm_i );
 		want = num_words * sizeof(long);
 		ERR_IF(want != h->length) goto out;
-		if (want == 0) break;
+		if (want == 0)
+			break;
 		if (drbd_recv(mdev, buffer, want) != want)
 			goto out;
 
@@ -2429,7 +2466,8 @@
 		drbd_start_resync(mdev, SyncSource);
 	} else if (mdev->state.conn == WFBitMapT) {
 		ok = drbd_send_bitmap(mdev);
-		if (!ok) goto out;
+		if (!ok)
+			goto out;
 		ok = drbd_request_state(mdev, NS(conn, WFSyncUUID));
 		D_ASSERT( ok == SS_Success );
 	} else {
@@ -2465,7 +2503,8 @@
 
 int receive_UnplugRemote(struct drbd_conf *mdev, struct Drbd_Header *h)
 {
-	if (mdev->state.disk >= Inconsistent) drbd_kick_lo(mdev);
+	if (mdev->state.disk >= Inconsistent)
+		drbd_kick_lo(mdev);
 	return TRUE; /* cannot fail. */
 }
 
@@ -2749,10 +2788,12 @@
 	int rv;
 
 	rv = drbd_send_handshake(mdev);
-	if (!rv) goto break_c_loop;
+	if (!rv)
+		goto break_c_loop;
 
 	rv = drbd_recv_header(mdev, &p->head);
-	if (!rv) goto break_c_loop;
+	if (!rv)
+		goto break_c_loop;
 
 	if (p->head.command != HandShake) {
 		ERR( "expected HandShake packet, received: %s (0x%04x)\n",
@@ -2855,10 +2896,12 @@
 	get_random_bytes(my_challenge, CHALLENGE_LEN);
 
 	rv = drbd_send_cmd2(mdev, AuthChallenge, my_challenge, CHALLENGE_LEN);
-	if (!rv) goto fail;
+	if (!rv)
+		goto fail;
 
 	rv = drbd_recv_header(mdev, &p);
-	if (!rv) goto fail;
+	if (!rv)
+		goto fail;
 
 	if (p.command != AuthChallenge) {
 		ERR( "expected AuthChallenge packet, received: %s (0x%04x)\n",
@@ -2908,10 +2951,12 @@
 	}
 
 	rv = drbd_send_cmd2(mdev, AuthResponse, response, resp_size);
-	if (!rv) goto fail;
+	if (!rv)
+		goto fail;
 
 	rv = drbd_recv_header(mdev, &p);
-	if (!rv) goto fail;
+	if (!rv)
+		goto fail;
 
 	if (p.command != AuthResponse) {
 		ERR( "expected AuthResponse packet, received: %s (0x%04x)\n",
@@ -2959,9 +3004,9 @@
 		     resp_size, mdev->net_conf->cram_hmac_alg);
 
  fail:
-	if (peers_ch) kfree(peers_ch);
-	if (response) kfree(response);
-	if (right_response) kfree(right_response);
+	kfree(peers_ch);
+	kfree(response);
+	kfree(right_response);
 
 	return rv;
 }
@@ -3245,7 +3290,8 @@
 			spin_lock_irq(&mdev->req_lock);
 			empty = list_empty(&mdev->done_ee);
 			spin_unlock_irq(&mdev->req_lock);
-			if (empty) break;
+			if (empty)
+				break;
 			clear_bit(SIGNAL_ASENDER, &mdev->flags);
 			flush_signals(current);
 		}

Modified: branches/drbd-8.1/drbd/drbd_req.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_req.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_req.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -200,7 +200,8 @@
 	/* we need to do the conflict detection stuff,
 	 * if we have the ee_hash (two_primaries) and
 	 * this has been on the network */
-	if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
+	if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL)
+	{
 		const sector_t sector = req->sector;
 		const int size = req->size;
 
@@ -270,9 +271,12 @@
 	 *	the receiver,
 	 *	the bio_endio completion callbacks.
 	 */
-	if (s & RQ_NET_QUEUED) return;
-	if (s & RQ_NET_PENDING) return;
-	if (s & RQ_LOCAL_PENDING) return;
+	if (s & RQ_NET_QUEUED)
+		return;
+	if (s & RQ_NET_PENDING)
+		return;
+	if (s & RQ_LOCAL_PENDING)
+		return;
 
 	if (req->master_bio) {
 		/* this is data_received (remote read)
@@ -655,7 +659,8 @@
 	case connection_lost_while_pending:
 		/* transfer log cleanup after connection loss */
 		/* assert something? */
-		if (req->rq_state & RQ_NET_PENDING) dec_ap_pending(mdev);
+		if (req->rq_state & RQ_NET_PENDING)
+			dec_ap_pending(mdev);
 		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
 		req->rq_state |= RQ_NET_DONE;
 		/* if it is still queued, we may not complete it here.
@@ -687,7 +692,8 @@
 
 	case neg_acked:
 		/* assert something? */
-		if (req->rq_state & RQ_NET_PENDING) dec_ap_pending(mdev);
+		if (req->rq_state & RQ_NET_PENDING)
+			dec_ap_pending(mdev);
 		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
 		/* FIXME THINK! is it DONE now, or is it not? */
 		req->rq_state |= RQ_NET_DONE;
@@ -733,9 +739,12 @@
 	unsigned long sbnr, ebnr, bnr;
 	sector_t esector, nr_sectors;
 
-	if (mdev->state.disk == UpToDate) return 1;
-	if (mdev->state.disk >= Outdated) return 0;
-	if (mdev->state.disk <  Inconsistent) return 0;
+	if (mdev->state.disk == UpToDate)
+		return 1;
+	if (mdev->state.disk >= Outdated)
+		return 0;
+	if (mdev->state.disk <  Inconsistent)
+		return 0;
 	/* state.disk == Inconsistent   We will have a look at the BitMap */
 	nr_sectors = drbd_get_capacity(mdev->this_bdev);
 	esector = sector + (size>>9) -1;
@@ -956,8 +965,10 @@
 
 	/* mark them early for readability.
 	 * this just sets some state flags. */
-	if (remote) _req_mod(req, to_be_send, 0);
-	if (local)  _req_mod(req, to_be_submitted, 0);
+	if (remote)
+		_req_mod(req, to_be_send, 0);
+	if (local)
+		_req_mod(req, to_be_submitted, 0);
 
 	/* check this request on the colison detection hash tables.
 	 * if we have a conflict, just complete it here.
@@ -976,7 +987,8 @@
 			dec_local(mdev);
 			local = 0;
 		}
-		if (remote) dec_ap_pending(mdev);
+		if (remote)
+			dec_ap_pending(mdev);
 		dump_bio(mdev, req->master_bio, 1);
 		/* THINK: do we want to fail it (-EIO), or pretend success? */
 		bio_endio(req->master_bio, req->master_bio->bi_size, 0);
@@ -999,7 +1011,7 @@
 			_req_mod(req, queue_for_net_read, 0);
 	}
 	spin_unlock_irq(&mdev->req_lock);
-	if (b) kfree(b); /* if someone else has beaten us to it... */
+	kfree(b); /* if someone else has beaten us to it... */
 
 	if (local) {
 		/* FIXME what ref count do we have to ensure the backing_bdev
@@ -1022,7 +1034,7 @@
 	return 0;
 
 fail_and_free_req:
-	if (b) kfree(b);
+	kfree(b);
 	bio_endio(bio, bio->bi_size, err);
 	drbd_req_free(req);
 	return 0;
@@ -1107,7 +1119,8 @@
 		/* rather error out here than BUG in bio_split */
 		ERR("bio would need to, but cannot, be split: "
 		    "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
-		    bio->bi_vcnt, bio->bi_idx, bio->bi_size, bio->bi_sector);
+		    bio->bi_vcnt, bio->bi_idx, bio->bi_size,
+		    (unsigned long long)bio->bi_sector);
 		bio_endio(bio, bio->bi_size, -EINVAL);
 		return 0;
 	} else {
@@ -1161,9 +1174,11 @@
 
 	limit = DRBD_MAX_SEGMENT_SIZE
 	      - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
-	if (limit < 0) limit = 0;
+	if (limit < 0)
+		limit = 0;
 	if (bio_size == 0) {
-		if (limit <= bvec->bv_len) limit = bvec->bv_len;
+		if (limit <= bvec->bv_len)
+			limit = bvec->bv_len;
 	} else if (limit && inc_local(mdev)) {
 		request_queue_t * const b =
 			mdev->bc->backing_bdev->bd_disk->queue;

Modified: branches/drbd-8.1/drbd/drbd_req.h
===================================================================
--- branches/drbd-8.1/drbd/drbd_req.h	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_req.h	2007-07-27 11:35:53 UTC (rev 3002)
@@ -235,7 +235,8 @@
 	struct drbd_request *req;
 
 	hlist_for_each_entry(req, n, slot, colision) {
-		if ((unsigned long)req == (unsigned long)id) {
+		if ((unsigned long)req == (unsigned long)id)
+		{
 			if (req->sector != sector) {
 				ERR("_ack_id_to_req: found req %p but it has "
 				    "wrong sector (%llus versus %llus)\n", req,
@@ -268,7 +269,8 @@
 	struct drbd_request *req;
 
 	hlist_for_each_entry(req, n, slot, colision) {
-		if ((unsigned long)req == (unsigned long)id) {
+		if ((unsigned long)req == (unsigned long)id)
+		{
 			D_ASSERT(req->sector == sector);
 			return req;
 		}

Modified: branches/drbd-8.1/drbd/drbd_worker.c
===================================================================
--- branches/drbd-8.1/drbd/drbd_worker.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/drbd_worker.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -64,7 +64,8 @@
  */
 int drbd_md_io_complete(struct bio *bio, unsigned int bytes_done, int error)
 {
-	if (bio->bi_size) return 1;
+	if (bio->bi_size)
+		return 1;
 	/* error parameter ignored:
 	 * drbd_md_sync_page_io explicitly tests bio_uptodate(bio); */
 
@@ -88,7 +89,8 @@
 	/* We are called each time a part of the bio is finished, but
 	 * we are only interested when the whole bio is finished, therefore
 	 * return as long as bio->bio_size is positive.  */
-	if (bio->bi_size) return 1;
+	if (bio->bi_size)
+		return 1;
 	if (!error && !uptodate) {
 		/* strange behaviour of some lower level drivers...
 		 * fail the request by clearing the uptodate flag,
@@ -134,7 +136,8 @@
 	mdev = e->mdev;
 
 	/* see above */
-	if (bio->bi_size) return 1;
+	if (bio->bi_size)
+		return 1;
 	if (!error && !uptodate) {
 		/* strange behaviour of some lower level drivers...
 		 * fail the request by clearing the uptodate flag,
@@ -169,20 +172,25 @@
 	 * done from "drbd_process_done_ee" within the appropriate w.cb
 	 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
 
-	if (!is_syncer_req) mdev->epoch_size++;
+	if (!is_syncer_req)
+		mdev->epoch_size++;
 
 	do_wake = is_syncer_req
 		? list_empty(&mdev->sync_ee)
 		: list_empty(&mdev->active_ee);
 
-	if (error) __drbd_chk_io_error(mdev, FALSE);
+	if (error)
+		__drbd_chk_io_error(mdev, FALSE);
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	if (is_syncer_req) drbd_rs_complete_io(mdev, e_sector);
+	if (is_syncer_req)
+		drbd_rs_complete_io(mdev, e_sector);
 
-	if (do_wake) wake_up(&mdev->ee_wait);
+	if (do_wake)
+		wake_up(&mdev->ee_wait);
 
-	if (do_al_complete_io) drbd_al_complete_io(mdev, e_sector);
+	if (do_al_complete_io)
+		drbd_al_complete_io(mdev, e_sector);
 
 	wake_asender(mdev);
 	dec_local(mdev);
@@ -201,7 +209,8 @@
 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 
 	/* see above */
-	if (bio->bi_size) return 1;
+	if (bio->bi_size)
+		return 1;
 	if (!error && !uptodate) {
 		/* strange behaviour of some lower level drivers...
 		 * fail the request by clearing the uptodate flag,
@@ -482,7 +491,8 @@
 	}
 
 	dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
-	if (dt <= 0) dt = 1;
+	if (dt <= 0)
+		dt = 1;
 	db = mdev->rs_total;
 	dbdt = Bit2KB(db/dt);
 	mdev->rs_paused /= HZ;
@@ -690,7 +700,8 @@
 
 int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 {
-	if (cancel) return 1;
+	if (cancel)
+		return 1;
 	return drbd_send_short_cmd(mdev, UnplugRemote);
 }
 
@@ -777,7 +788,8 @@
 	struct drbd_conf *odev = mdev;
 
 	while (1) {
-		if (odev->sync_conf.after == -1) return 1;
+		if (odev->sync_conf.after == -1)
+			return 1;
 		odev = minor_to_mdev(odev->sync_conf.after);
 		ERR_IF(!odev) return 1;
 		if ( (odev->state.conn >= SyncSource &&
@@ -800,7 +812,8 @@
 
 	for (i = 0; i < minor_count; i++) {
 		odev = minor_to_mdev(i);
-		if (!odev) continue;
+		if (!odev)
+			continue;
 		if (!_drbd_may_sync_now(odev))
 			rv |= ( _drbd_set_state(_NS(odev, aftr_isp, 1),
 						ChgStateHard|ScheduleAfter)
@@ -960,13 +973,15 @@
 
 		if (down_trylock(&mdev->data.work.s)) {
 			down(&mdev->data.mutex);
-			if (mdev->data.socket)drbd_tcp_flush(mdev->data.socket);
+			if (mdev->data.socket)
+				drbd_tcp_flush(mdev->data.socket);
 			up(&mdev->data.mutex);
 
 			intr = down_interruptible(&mdev->data.work.s);
 
 			down(&mdev->data.mutex);
-			if (mdev->data.socket) drbd_tcp_cork(mdev->data.socket);
+			if (mdev->data.socket)
+				drbd_tcp_cork(mdev->data.socket);
 			up(&mdev->data.mutex);
 		}
 

Modified: branches/drbd-8.1/drbd/linux/drbd_config.h
===================================================================
--- branches/drbd-8.1/drbd/linux/drbd_config.h	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/linux/drbd_config.h	2007-07-27 11:35:53 UTC (rev 3002)
@@ -22,7 +22,7 @@
 
 extern const char *drbd_buildtag(void);
 
-#define REL_VERSION "8.0.4"
+#define REL_VERSION "8.1.0-nqty" /* not quite there yet */
 #define API_VERSION 86
 #define PRO_VERSION 86
 

Modified: branches/drbd-8.1/drbd/lru_cache.c
===================================================================
--- branches/drbd-8.1/drbd/lru_cache.c	2007-07-25 12:54:21 UTC (rev 3001)
+++ branches/drbd-8.1/drbd/lru_cache.c	2007-07-27 11:35:53 UTC (rev 3002)
@@ -121,7 +121,8 @@
 	BUG_ON(!lc);
 	BUG_ON(!lc->nr_elements);
 	hlist_for_each_entry(e, n, lc->slot + lc_hash_fn(lc, enr), colision) {
-		if (e->lc_number == enr) return e;
+		if (e->lc_number == enr)
+			return e;
 	}
 	return NULL;
 }
@@ -224,7 +225,8 @@
 	e = lc_find(lc, enr);
 	if (e) {
 		++lc->hits;
-		if (e->refcnt++ == 0) lc->used++;
+		if (e->refcnt++ == 0)
+			lc->used++;
 		list_move(&e->list, &lc->in_use); /* Not evictable... */
 		RETURN(e);
 	}
@@ -281,7 +283,8 @@
 	e = lc_find(lc, enr);
 	if (e) {
 		++lc->hits;
-		if (e->refcnt++ == 0) lc->used++;
+		if (e->refcnt++ == 0)
+			lc->used++;
 		list_move(&e->list, &lc->in_use); /* Not evictable... */
 	}
 	RETURN(e);
@@ -335,7 +338,8 @@
 {
 	struct lc_element *e;
 
-	if (index < 0 || index >= lc->nr_elements) return;
+	if (index < 0 || index >= lc->nr_elements)
+		return;
 
 	e = lc_entry(lc, index);
 	e->lc_number = enr;



More information about the drbd-cvs mailing list