[DRBD-cvs] svn commit by lars - r2999 - in branches/drbd-8.0-for-linus/drbd: . linux - 3aa0336d147a3c7ead5c2f71509ebdb5fb774020 got rid of war

drbd-cvs at lists.linbit.com drbd-cvs at lists.linbit.com
Tue Jul 24 19:02:32 CEST 2007


Author: lars
Date: 2007-07-24 19:02:29 +0200 (Tue, 24 Jul 2007)
New Revision: 2999

Modified:
   branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
   branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
   branches/drbd-8.0-for-linus/drbd/drbd_int.h
   branches/drbd-8.0-for-linus/drbd/drbd_main.c
   branches/drbd-8.0-for-linus/drbd/drbd_nl.c
   branches/drbd-8.0-for-linus/drbd/drbd_proc.c
   branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
   branches/drbd-8.0-for-linus/drbd/drbd_req.c
   branches/drbd-8.0-for-linus/drbd/drbd_req.h
   branches/drbd-8.0-for-linus/drbd/drbd_worker.c
   branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h
   branches/drbd-8.0-for-linus/drbd/linux/drbd.h
   branches/drbd-8.0-for-linus/drbd/lru_cache.c
   branches/drbd-8.0-for-linus/drbd/lru_cache.h
Log:
3aa0336d147a3c7ead5c2f71509ebdb5fb774020 got rid of warnings for some '#if 0'ed code
476323d033c1fc06cff0968faf65bf0b560d598d wrapped many long lines; a few are left, unfortunately
dad50ae97f11ab4ee3aa037545065021c82ca637 cleanup some of "declaring multiple variables together should be avoided"
cd9230bb862e29dd5568a4649a8717ae55a8d0cd unindent labels


Modified: branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_actlog.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_actlog.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -41,6 +41,8 @@
 {
 	struct bio *bio = bio_alloc(GFP_NOIO, 1);
 	struct completion event;
+	const int do_fail = FAULT_ACTIVE(mdev,
+		(rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD);
 	int ok;
 
 	bio->bi_bdev = bdev->md_bdev;
@@ -51,7 +53,7 @@
 	bio->bi_private = &event;
 	bio->bi_end_io = drbd_md_io_complete;
 
-	if (FAULT_ACTIVE(mdev, (rw & WRITE)? DRBD_FAULT_MD_WR:DRBD_FAULT_MD_RD)) {
+	if (do_fail) {
 		bio->bi_rw |= rw;
 		bio_endio(bio, bio->bi_size, -EIO);
 	} else {
@@ -72,7 +74,8 @@
 int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
 			 sector_t sector, int rw)
 {
-	int hardsect, mask, ok, offset = 0;
+	int hardsect, mask, ok;
+	int offset = 0;
 	struct page *iop = mdev->md_io_page;
 
 	D_ASSERT(semaphore_is_locked(&mdev->md_io_mutex));
@@ -116,7 +119,8 @@
 						   sector, READ, hardsect);
 
 			if (unlikely(!ok)) {
-				ERR("drbd_md_sync_page_io(,%llus,READ [hardsect!=512]) failed!\n",
+				ERR("drbd_md_sync_page_io(,%llus,"
+				    "READ [hardsect!=512]) failed!\n",
 				    (unsigned long long)sector);
 				return 0;
 			}
@@ -190,7 +194,8 @@
 	unsigned long     al_flags = 0;
 
 	spin_lock_irq(&mdev->al_lock);
-	bm_ext = (struct bm_extent *) lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+	bm_ext = (struct bm_extent *)
+		lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
 	if (unlikely(bm_ext != NULL)) {
 		if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
 			spin_unlock_irq(&mdev->al_lock);
@@ -317,10 +322,6 @@
 	buffer->updates[0].pos = cpu_to_be32(n);
 	buffer->updates[0].extent = cpu_to_be32(new_enr);
 
-#if 0	/* Use this printf with the test_al.pl program */
-	ERR("T%03d S%03d=E%06d\n", mdev->al_tr_number, n, new_enr);
-#endif
-
 	xor_sum ^= new_enr;
 
 	mx = min_t(int, AL_EXTENTS_PT,
@@ -338,18 +339,21 @@
 		xor_sum ^= LC_FREE;
 	}
 	mdev->al_tr_cycle += AL_EXTENTS_PT;
-	if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) mdev->al_tr_cycle = 0;
+	if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
+		mdev->al_tr_cycle = 0;
 
 	buffer->xor_sum = cpu_to_be32(xor_sum);
 
-	sector = mdev->bc->md.md_offset + mdev->bc->md.al_offset + mdev->al_tr_pos;
+	sector =  mdev->bc->md.md_offset
+		+ mdev->bc->md.al_offset + mdev->al_tr_pos;
 
 	if (!drbd_md_sync_page_io(mdev, mdev->bc, sector, WRITE)) {
 		drbd_chk_io_error(mdev, 1, TRUE);
 		drbd_io_error(mdev, TRUE);
 	}
 
-	if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
+	if (++mdev->al_tr_pos >
+	    div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
 		mdev->al_tr_pos = 0;
 
 	D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
@@ -401,11 +405,17 @@
 int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 {
 	struct al_transaction *buffer;
-	int from = -1, to = -1, i, cnr, overflow = 0, rv;
-	u32 from_tnr = -1, to_tnr = 0;
+	int i;
+	int rv;
+	int mx;
+	int cnr;
 	int active_extents = 0;
 	int transactions = 0;
-	int mx;
+	int overflow = 0;
+	int from = -1;
+	int to = -1;
+	u32 from_tnr = -1;
+	u32 to_tnr = 0;
 
 	mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
 
@@ -481,7 +491,7 @@
 
 		transactions++;
 
-	cancel:
+cancel:
 		if (i == to) break;
 		i++;
 		if (i > mx) i = 0;
@@ -489,7 +499,8 @@
 
 	mdev->al_tr_number = to_tnr+1;
 	mdev->al_tr_pos = to;
-	if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
+	if (++mdev->al_tr_pos >
+	    div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
 		mdev->al_tr_pos = 0;
 
 	/* ok, we are done with it */
@@ -549,11 +560,13 @@
 			     unsigned int enr,
 			     struct drbd_atodb_wait *wc)
 {
-	int i = 0, allocated_page = 0;
 	struct bio *bio;
 	struct page *np;
-	sector_t on_disk_sector = enr + mdev->bc->md.md_offset + mdev->bc->md.bm_offset;
+	sector_t on_disk_sector = enr + mdev->bc->md.md_offset
+				      + mdev->bc->md.bm_offset;
 	int offset;
+	int i = 0;
+	int allocated_page = 0;
 
 	/* check if that enr is already covered by an already created bio. */
 	while ( (bio = bios[i]) ) {
@@ -770,7 +783,8 @@
 
 	if ( !inc_local_if_state(mdev, Attaching) ) {
 		if (DRBD_ratelimit(5*HZ, 5))
-			WARN("Can not update on disk bitmap, local IO disabled.\n");
+			WARN("Can not update on disk bitmap, "
+			     "local IO disabled.\n");
 		return 1;
 	}
 
@@ -779,12 +793,15 @@
 
 	kfree(udw);
 
-	if (drbd_bm_total_weight(mdev) <= mdev->rs_failed &&
-	   ( mdev->state.conn == SyncSource || mdev->state.conn == SyncTarget ||
-	     mdev->state.conn == PausedSyncS || mdev->state.conn == PausedSyncT ) ) {
-		drbd_bm_lock(mdev);
-		drbd_resync_finished(mdev);
-		drbd_bm_unlock(mdev);
+	if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
+		switch (mdev->state.conn) {
+		case SyncSource:  case SyncTarget:
+		case PausedSyncS: case PausedSyncT:
+			drbd_bm_lock(mdev);
+			drbd_resync_finished(mdev);
+			drbd_bm_unlock(mdev);
+		default: /* nothing to do */;
+		}
 	}
 	drbd_bcast_sync_progress(mdev);
 
@@ -821,9 +838,11 @@
 			else
 				ext->rs_failed += count;
 			if (ext->rs_left < ext->rs_failed) {
-				ERR("BAD! sector=%llus enr=%u rs_left=%d rs_failed=%d count=%d\n",
+				ERR("BAD! sector=%llus enr=%u rs_left=%d "
+				    "rs_failed=%d count=%d\n",
 				     (unsigned long long)sector,
-				     ext->lce.lc_number, ext->rs_left, ext->rs_failed, count);
+				     ext->lce.lc_number, ext->rs_left,
+				     ext->rs_failed, count);
 				dump_stack();
 				/* FIXME brrrgs. should never happen! */
 				drbd_force_state(mdev, NS(conn, Disconnecting));
@@ -885,7 +904,8 @@
  * called by worker on SyncTarget and receiver on SyncSource.
  *
  */
-void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line)
+void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
+		       const char *file, const unsigned int line)
 {
 	/* Is called from worker and receiver context _only_ */
 	unsigned long sbnr, ebnr, lbnr, bnr;
@@ -936,7 +956,8 @@
 	if (count) {
 		/* we need the lock for drbd_try_clear_on_disk_bm */
 		if (jiffies - mdev->rs_mark_time > HZ*10) {
-			/* should be roling marks, but we estimate only anyways. */
+			/* should be roling marks,
+			 * but we estimate only anyways. */
 			if ( mdev->rs_mark_left != drbd_bm_total_weight(mdev) &&
 			    mdev->state.conn != PausedSyncT &&
 			    mdev->state.conn != PausedSyncS ) {
@@ -948,8 +969,8 @@
 			drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
 			dec_local(mdev);
 		}
-		/* just wake_up unconditional now,
-		 * various lc_chaged(), lc_put() in drbd_try_clear_on_disk_bm(). */
+		/* just wake_up unconditional now, various lc_chaged(),
+		 * lc_put() in drbd_try_clear_on_disk_bm(). */
 		wake_up = 1;
 	}
 	spin_unlock_irqrestore(&mdev->al_lock, flags);
@@ -958,12 +979,14 @@
 
 /*
  * this is intended to set one request worth of data out of sync.
- * affects at least 1 bit, and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits.
+ * affects at least 1 bit,
+ * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits.
  *
  * called by tl_clear and drbd_send_dblock (==drbd_make_request).
  * so this can be _any_ process.
  */
-void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line)
+void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
+			    const char *file, const unsigned int line)
 {
 	unsigned long sbnr, ebnr, lbnr;
 	sector_t esector, nr_sectors;
@@ -987,7 +1010,8 @@
 	*/
 
 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
-		ERR("sector: %llus, size: %d\n", (unsigned long long)sector, size);
+		ERR("sector: %llus, size: %d\n",
+			(unsigned long long)sector, size);
 		return;
 	}
 
@@ -1005,7 +1029,8 @@
 	ebnr = BM_SECT_TO_BIT(esector);
 
 	MTRACE(TraceTypeResync, TraceLvlMetrics,
-	       INFO("drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
+	       INFO("drbd_set_out_of_sync: sector=%llus size=%u "
+		    "sbnr=%lu ebnr=%lu\n",
 		    (unsigned long long)sector, size, sbnr, ebnr);
 	    );
 
@@ -1162,7 +1187,8 @@
 			INFO("dropping %u, aparently got 'synced' "
 			     "by application io\n", mdev->resync_wenr);
 		);
-		bm_ext = (struct bm_extent *)lc_find(mdev->resync, mdev->resync_wenr);
+		bm_ext = (struct bm_extent *)
+			lc_find(mdev->resync, mdev->resync_wenr);
 		if (bm_ext) {
 			D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
 			D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
@@ -1216,7 +1242,7 @@
 		mdev->resync_locked++;
 		goto check_al;
 	}
-  check_al:
+check_al:
 	MTRACE(TraceTypeResync, TraceLvlAll,
 		INFO("checking al for %u\n", enr);
 	);
@@ -1227,12 +1253,12 @@
 			goto try_again;
 	}
 	set_bit(BME_LOCKED, &bm_ext->flags);
-  proceed:
+proceed:
 	mdev->resync_wenr = LC_FREE;
 	spin_unlock_irq(&mdev->al_lock);
 	return 0;
 
-  try_again:
+try_again:
 	MTRACE(TraceTypeResync, TraceLvlAll,
 		INFO("need to try again for %u\n", enr);
 	);
@@ -1262,7 +1288,8 @@
 
 	if (bm_ext->lce.refcnt == 0) {
 		spin_unlock_irqrestore(&mdev->al_lock, flags);
-		ERR("drbd_rs_complete_io(,%llu [=%u]) called, but refcnt is 0!?\n",
+		ERR("drbd_rs_complete_io(,%llu [=%u]) called, "
+		    "but refcnt is 0!?\n",
 		    (unsigned long long)sector, enr);
 		return;
 	}
@@ -1335,8 +1362,8 @@
 			bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
 			if (bm_ext->lce.lc_number == LC_FREE) continue;
 			if (bm_ext->lce.lc_number == mdev->resync_wenr) {
-				INFO("dropping %u in drbd_rs_del_all, "
-				     "aparently got 'synced' by application io\n",
+				INFO("dropping %u in drbd_rs_del_all, aparently"
+				     " got 'synced' by application io\n",
 				     mdev->resync_wenr);
 				D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
 				D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
@@ -1424,8 +1451,8 @@
 			dec_local(mdev);
 		}
 
-		/* just wake_up unconditional now,
-		 * various lc_chaged(), lc_put() in drbd_try_clear_on_disk_bm(). */
+		/* just wake_up unconditional now, various lc_chaged(),
+		 * lc_put() in drbd_try_clear_on_disk_bm(). */
 		wake_up = 1;
 	}
 	spin_unlock_irq(&mdev->al_lock);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -302,8 +302,10 @@
 int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
 {
 	struct drbd_bitmap *b = mdev->bitmap;
-	unsigned long bits, bytes, words, *nbm, *obm = 0;
-	int err = 0, growing;
+	unsigned long *nbm, *obm = NULL;
+	unsigned long bits, bytes, words;
+	int err = 0;
+	int growing;
 
 	ERR_IF(!b) return -ENOMEM;
 
@@ -338,7 +340,8 @@
 		*/
 		words = ALIGN(bits, 64) >> LN2_BPL;
 
-		D_ASSERT((u64)bits <= (((u64)mdev->bc->md.md_size_sect-MD_BM_OFFSET) << 12));
+		D_ASSERT((u64)bits <=
+			(((u64)mdev->bc->md.md_size_sect-MD_BM_OFFSET) << 12));
 
 		if (words == b->bm_words) {
 			/* optimize: capacity has changed,
@@ -357,7 +360,8 @@
 			bytes = (words+1)*sizeof(long);
 			nbm = vmalloc(bytes);
 			if (!nbm) {
-				ERR("bitmap: failed to vmalloc %lu bytes\n", bytes);
+				ERR("bitmap: failed to vmalloc %lu bytes\n",
+					bytes);
 				err = -ENOMEM;
 				goto out;
 			}
@@ -369,7 +373,8 @@
 		if (obm) {
 			bm_set_surplus(b);
 			D_ASSERT(b->bm[b->bm_words] == DRBD_MAGIC);
-			memcpy(nbm, obm, min_t(size_t, b->bm_words, words)*sizeof(long));
+			memcpy(nbm, obm, min_t(size_t, b->bm_words, words)
+								*sizeof(long));
 		}
 		growing = words > b->bm_words;
 		if (growing) {
@@ -548,7 +553,8 @@
 	spin_unlock_irq(&b->bm_lock);
 }
 
-int drbd_bm_async_io_complete(struct bio *bio, unsigned int bytes_done, int error)
+int drbd_bm_async_io_complete(struct bio *bio,
+	unsigned int bytes_done, int error)
 {
 	struct drbd_bitmap *b = bio->bi_private;
 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -578,17 +584,21 @@
 	return 0;
 }
 
-void drbd_bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw)
+void drbd_bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b,
+			   int page_nr, int rw)
 {
 	/* we are process context. we always get a bio */
 	/* THINK: do we need GFP_NOIO here? */
 	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
-	struct page *page = vmalloc_to_page((char *)(b->bm) + (PAGE_SIZE*page_nr));
+	struct page *page = vmalloc_to_page((char *)(b->bm)
+						+ (PAGE_SIZE*page_nr));
 	unsigned int len;
-	sector_t on_disk_sector = mdev->bc->md.md_offset + mdev->bc->md.bm_offset;
+	sector_t on_disk_sector =
+		mdev->bc->md.md_offset + mdev->bc->md.bm_offset;
 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
 
-	/* this might happen with very small flexible external meta data device */
+	/* this might happen with very small
+	 * flexible external meta data device */
 	len = min_t(unsigned int, PAGE_SIZE,
 		(drbd_md_last_sector(mdev->bc) - on_disk_sector + 1)<<9);
 
@@ -615,8 +625,10 @@
  */
 int drbd_bm_read_sect(struct drbd_conf *mdev, unsigned long enr)
 {
-	sector_t on_disk_sector = mdev->bc->md.md_offset + mdev->bc->md.bm_offset + enr;
-	int bm_words, num_words, offset, err  = 0;
+	sector_t on_disk_sector = mdev->bc->md.md_offset
+				+ mdev->bc->md.bm_offset + enr;
+	int bm_words, num_words, offset;
+	int err = 0;
 
 	down(&mdev->md_io_mutex);
 	if (drbd_md_sync_page_io(mdev, mdev->bc, on_disk_sector, READ)) {
@@ -770,8 +782,10 @@
  */
 int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr)
 {
-	sector_t on_disk_sector = enr + mdev->bc->md.md_offset + mdev->bc->md.bm_offset;
-	int bm_words, num_words, offset, err  = 0;
+	sector_t on_disk_sector = enr + mdev->bc->md.md_offset
+				      + mdev->bc->md.bm_offset;
+	int bm_words, num_words, offset;
+	int err = 0;
 
 	down(&mdev->md_io_mutex);
 	bm_words  = drbd_bm_words(mdev);
@@ -925,7 +939,8 @@
 
 /* returns number of bits actually changed (0->1)
  * wants bitnr, not sector */
-int drbd_bm_set_bits_in_irq(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_set_bits_in_irq(struct drbd_conf *mdev,
+	const unsigned long s, const unsigned long e)
 {
 	struct drbd_bitmap *b = mdev->bitmap;
 	unsigned long bitnr;

Modified: branches/drbd-8.0-for-linus/drbd/drbd_int.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_int.h	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_int.h	2007-07-24 17:02:29 UTC (rev 2999)
@@ -185,7 +185,8 @@
 #endif
 #define ERR_IF(exp) if (({ \
 	int _b = (exp) != 0; \
-	if (_b) ERR("%s: (" #exp ") in %s:%d\n", __func__, __FILE__, __LINE__); \
+	if (_b) ERR("%s: (" #exp ") in %s:%d\n", \
+		__func__, __FILE__, __LINE__); \
 	 _b; \
 	}))
 
@@ -203,7 +204,8 @@
 };
 
 #ifdef DRBD_ENABLE_FAULTS
-extern unsigned int _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
+extern unsigned int
+_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
 static inline int
 drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
     return (fault_rate &&
@@ -682,10 +684,10 @@
 	WORK_PENDING,		/* completion flag for drbd_disconnect */
 	STOP_SYNC_TIMER,	/* tell timer to cancel itself */
 	UNPLUG_QUEUED,		/* only relevant with kernel 2.4 */
-	UNPLUG_REMOTE,		/* whether sending a "UnplugRemote" makes sense */
-	MD_DIRTY,		/* current gen counts and flags not yet on disk */
+	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
+	MD_DIRTY,		/* current uuids and flags not yet on disk */
 	DISCARD_CONCURRENT,	/* Set on one node, cleared on the peer! */
-	USE_DEGR_WFC_T,		/* Use degr-wfc-timeout instead of wfc-timeout. */
+	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
 	CLUSTER_ST_CHANGE,	/* Cluster wide state change going on... */
 	CL_ST_CHG_SUCCESS,
 	CL_ST_CHG_FAIL,
@@ -751,7 +753,8 @@
 #define INTEGER(pn, pr, member) int member;
 #define INT64(pn, pr, member) __u64 member;
 #define BIT(pn, pr, member) unsigned member : 1;
-#define STRING(pn, pr, member, len) unsigned char member[len]; int member ## _len;
+#define STRING(pn, pr, member, len) \
+	unsigned char member[len]; int member ## _len;
 #include "linux/drbd_nl.h"
 
 struct drbd_backing_dev {
@@ -790,7 +793,9 @@
 	struct timer_list resync_timer;
 	struct timer_list md_sync_timer;
 
-	union drbd_state_t new_state_tmp; /* Used after attach while negotiating new disk state. */
+	/* Used after attach while negotiating new disk state. */
+	union drbd_state_t new_state_tmp;
+
 	union drbd_state_t state;
 	wait_queue_head_t misc_wait;
 	wait_queue_head_t state_wait;  /* upon each state change. */
@@ -804,7 +809,7 @@
 	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
 	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
 	atomic_t unacked_cnt;	 /* Need to send replys for */
-	atomic_t local_cnt;	 /* Waiting for local disk to signal completion */
+	atomic_t local_cnt;	 /* Waiting for local completion */
 	atomic_t net_cnt;	 /* Users of net_conf */
 	spinlock_t req_lock;
 	struct drbd_barrier *unused_spare_barrier; /* for pre-allocation */
@@ -931,14 +936,17 @@
 
 extern void drbd_init_set_defaults(struct drbd_conf *mdev);
 extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
-			     union drbd_state_t mask, union drbd_state_t val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state_t, union drbd_state_t);
-extern int _drbd_request_state(struct drbd_conf *, union drbd_state_t, union drbd_state_t,
-			       enum chg_state_flags);
-extern int _drbd_set_state(struct drbd_conf *, union drbd_state_t, enum chg_state_flags );
-extern void print_st_err(struct drbd_conf *, union drbd_state_t, union drbd_state_t, int );
-extern void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os, union drbd_state_t ns,
-			   enum chg_state_flags);
+			union drbd_state_t mask, union drbd_state_t val);
+extern void drbd_force_state(struct drbd_conf *, union drbd_state_t,
+			union drbd_state_t);
+extern int _drbd_request_state(struct drbd_conf *, union drbd_state_t,
+			union drbd_state_t, enum chg_state_flags);
+extern int _drbd_set_state(struct drbd_conf *, union drbd_state_t,
+			enum chg_state_flags );
+extern void print_st_err(struct drbd_conf *, union drbd_state_t,
+			union drbd_state_t, int );
+extern void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
+			union drbd_state_t ns, enum chg_state_flags);
 extern int  drbd_thread_start(struct Drbd_thread *thi);
 extern void _drbd_thread_stop(struct Drbd_thread *thi, int restart, int wait);
 extern void drbd_thread_signal(struct Drbd_thread *thi);
@@ -946,39 +954,42 @@
 extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
 		       unsigned int set_size);
 extern void tl_clear(struct drbd_conf *mdev);
-extern struct drbd_barrier *_tl_add_barrier(struct drbd_conf *, struct drbd_barrier *);
+extern struct drbd_barrier *_tl_add_barrier(struct drbd_conf *,
+			struct drbd_barrier *);
 extern void drbd_free_sock(struct drbd_conf *mdev);
 extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
-		     void *buf, size_t size, unsigned msg_flags);
+			void *buf, size_t size, unsigned msg_flags);
 extern int drbd_send_protocol(struct drbd_conf *mdev);
 extern int drbd_send_uuids(struct drbd_conf *mdev);
 extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
 extern int drbd_send_sizes(struct drbd_conf *mdev);
 extern int drbd_send_state(struct drbd_conf *mdev);
 extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
-			  enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h,
-			  size_t size, unsigned msg_flags);
+			enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h,
+			size_t size, unsigned msg_flags);
 #define USE_DATA_SOCKET 1
 #define USE_META_SOCKET 0
 extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
-			  enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h, size_t size);
+			enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h,
+			size_t size);
 extern int drbd_send_cmd2(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
-			  char *data, size_t size);
+			char *data, size_t size);
 extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
 extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
-			   u32 set_size);
+			u32 set_size);
 extern int drbd_send_ack(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
-			 struct Tl_epoch_entry *e);
+			struct Tl_epoch_entry *e);
 extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
-			    struct Drbd_BlockRequest_Packet *rp);
+			struct Drbd_BlockRequest_Packet *rp);
 extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
-			    struct Drbd_Data_Packet *dp);
+			struct Drbd_Data_Packet *dp);
 extern int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
-			   int offset, size_t size);
+			int offset, size_t size);
 extern int drbd_send_block(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
 			   struct Tl_epoch_entry *e);
 extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
-extern int _drbd_send_barrier(struct drbd_conf *mdev, struct drbd_barrier *barrier);
+extern int _drbd_send_barrier(struct drbd_conf *mdev,
+			struct drbd_barrier *barrier);
 extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
 			      sector_t sector, int size, u64 block_id);
 extern int drbd_send_bitmap(struct drbd_conf *mdev);
@@ -1010,14 +1021,16 @@
 /* The following numbers are sectors */
 #define MD_AL_OFFSET 8	    /* 8 Sectors after start of meta area */
 #define MD_AL_MAX_SIZE 64   /* = 32 kb LOG  ~ 3776 extents ~ 14 GB Storage */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE) /* Allows up to about 3.8TB */
+/* Allows up to about 3.8TB */
+#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
 
-#define MD_HARDSECT_B	 9     /* Since the smalles IO unit is usually 512 byte */
+/* Since the smalles IO unit is usually 512 byte */
+#define MD_HARDSECT_B	 9
 #define MD_HARDSECT	 (1<<MD_HARDSECT_B)
 
 /* activity log */
-#define AL_EXTENTS_PT	 ((MD_HARDSECT-12)/8-1) /* 61 ; Extents per 512B sector */
-#define AL_EXTENT_SIZE_B 22	 /* One extent represents 4M Storage */
+#define AL_EXTENTS_PT ((MD_HARDSECT-12)/8-1) /* 61 ; Extents per 512B sector */
+#define AL_EXTENT_SIZE_B 22		 /* One extent represents 4M Storage */
 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SIZE_B)
 
 #if BITS_PER_LONG == 32
@@ -1147,7 +1160,8 @@
 extern int  drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr);
 extern int  drbd_bm_read(struct drbd_conf *mdev);
 extern int  drbd_bm_write(struct drbd_conf *mdev);
-extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr);
+extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
+		unsigned long al_enr);
 extern size_t	     drbd_bm_words(struct drbd_conf *mdev);
 extern sector_t      drbd_bm_capacity(struct drbd_conf *mdev);
 extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev);
@@ -1155,23 +1169,19 @@
 extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
 extern int drbd_bm_rs_done(struct drbd_conf *mdev);
 /* for receive_bitmap */
-extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
-				unsigned long *buffer);
+extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
+		size_t number, unsigned long *buffer);
 /* for _drbd_send_bitmap and drbd_bm_write_sect */
-extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
-				unsigned long *buffer);
-/*
- * only used by drbd_bm_read_sect
-extern void drbd_bm_set_lel(struct drbd_conf *mdev, size_t offset, size_t number,
-				unsigned long* buffer);
-*/
+extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
+		size_t number, unsigned long *buffer);
 
 extern void __drbd_bm_lock(struct drbd_conf *mdev, char *file, int line);
 extern void drbd_bm_unlock(struct drbd_conf *mdev);
 #define drbd_bm_lock(mdev)    __drbd_bm_lock(mdev, __FILE__, __LINE__ )
 
 extern void _drbd_bm_recount_bits(struct drbd_conf *mdev, char *file, int line);
-#define drbd_bm_recount_bits(mdev) _drbd_bm_recount_bits(mdev,	__FILE__, __LINE__ )
+#define drbd_bm_recount_bits(mdev) \
+	_drbd_bm_recount_bits(mdev, __FILE__, __LINE__ )
 /* drbd_main.c */
 
 /* needs to be included here,
@@ -1260,7 +1270,8 @@
 /* Bio printing support */
 extern void _dump_bio(struct drbd_conf *mdev, struct bio *bio, int complete);
 
-static inline void dump_bio(struct drbd_conf *mdev, struct bio *bio, int complete)
+static inline void dump_bio(struct drbd_conf *mdev,
+		struct bio *bio, int complete)
 {
 	MTRACE(TraceTypeRq, TraceLvlSummary,
 	       _dump_bio(mdev, bio, complete);
@@ -1269,7 +1280,8 @@
 
 /* Packet dumping support */
 extern void _dump_packet(struct drbd_conf *mdev, struct socket *sock,
-			 int recv, union Drbd_Polymorph_Packet *p, char *file, int line);
+			 int recv, union Drbd_Polymorph_Packet *p,
+			 char *file, int line);
 
 static inline void
 dump_packet(struct drbd_conf *mdev, struct socket *sock,
@@ -1298,10 +1310,12 @@
 
 /* drbd_nl.c */
 extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *);
+extern sector_t drbd_new_dev_size(struct drbd_conf *,
+		struct drbd_backing_dev *);
 extern int drbd_determin_dev_size(struct drbd_conf *);
 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int);
-extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force);
+extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
+		int force);
 extern int drbd_ioctl(struct inode *inode, struct file *file,
 		      unsigned int cmd, unsigned long arg);
 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
@@ -1316,8 +1330,8 @@
 extern void suspend_other_sg(struct drbd_conf *mdev);
 extern int drbd_resync_finished(struct drbd_conf *mdev);
 /* maybe rather drbd_main.c ? */
-extern int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
-				sector_t sector, int rw);
+extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
+		struct drbd_backing_dev *bdev, sector_t sector, int rw);
 /* worker callbacks */
 extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
 extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
@@ -1343,8 +1357,10 @@
 					    unsigned int data_size,
 					    unsigned int gfp_mask);
 extern void drbd_free_ee(struct drbd_conf *mdev, struct Tl_epoch_entry *e);
-extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head);
-extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head);
+extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+		struct list_head *head);
+extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+		struct list_head *head);
 extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
 extern void _drbd_clear_done_ee(struct drbd_conf *mdev);
 
@@ -1391,12 +1407,15 @@
 extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
 extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
 extern int drbd_rs_del_all(struct drbd_conf *mdev);
-extern void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size);
+extern void drbd_rs_failed_io(struct drbd_conf *mdev,
+		sector_t sector, int size);
 extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
-extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line);
+extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
+		int size, const char *file, const unsigned int line);
 #define drbd_set_in_sync(mdev, sector, size) \
 	__drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__ )
-extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, const char *file, const unsigned int line);
+extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
+		int size, const char *file, const unsigned int line);
 #define drbd_set_out_of_sync(mdev, sector, size) \
 	__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__ )
 extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
@@ -1421,26 +1440,28 @@
 #define user_isp_mask 1
 #define aftr_isp_mask 1
 
-#define NS(T, S) ({ union drbd_state_t mask; mask.i = 0; mask.T = T##_mask; mask; }), \
-		({ union drbd_state_t val; val.i = 0; val.T = (S); val; })
+#define NS(T, S) \
+	({ union drbd_state_t mask; mask.i = 0; mask.T = T##_mask; mask; }), \
+	({ union drbd_state_t val; val.i = 0; val.T = (S); val; })
 #define NS2(T1, S1, T2, S2) \
-		({ union drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
-		  mask.T2 = T2##_mask; mask; }), \
-		({ union drbd_state_t val; val.i = 0; val.T1 = (S1); \
-		  val.T2 = (S2); val; })
+	({ union drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
+	  mask.T2 = T2##_mask; mask; }), \
+	({ union drbd_state_t val; val.i = 0; val.T1 = (S1); \
+	  val.T2 = (S2); val; })
 #define NS3(T1, S1, T2, S2, T3, S3) \
-		({ union drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
-		  mask.T2 = T2##_mask; mask.T3 = T3##_mask; mask; }), \
-		({ union drbd_state_t val; val.i = 0; val.T1 = (S1); \
-		  val.T2 = (S2); val.T3 = (S3); val; })
+	({ union drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
+	  mask.T2 = T2##_mask; mask.T3 = T3##_mask; mask; }), \
+	({ union drbd_state_t val; val.i = 0; val.T1 = (S1); \
+	  val.T2 = (S2); val.T3 = (S3); val; })
 
-#define _NS(D, T, S) D, ({ union drbd_state_t ns; ns.i = D->state.i; ns.T = (S); ns; })
+#define _NS(D, T, S) \
+	D, ({ union drbd_state_t ns; ns.i = D->state.i; ns.T = (S); ns; })
 #define _NS2(D, T1, S1, T2, S2) \
-		D, ({ union drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
-		ns.T2 = (S2); ns; })
+	D, ({ union drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
+	ns.T2 = (S2); ns; })
 #define _NS3(D, T1, S1, T2, S2, T3, S3) \
-		D, ({ union drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
-		ns.T2 = (S2); ns.T3 = (S3); ns; })
+	D, ({ union drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
+	ns.T2 = (S2); ns.T3 = (S3); ns; })
 
 static inline void drbd_state_lock(struct drbd_conf *mdev)
 {
@@ -1486,7 +1507,8 @@
 	}
 }
 
-static inline void drbd_chk_io_error(struct drbd_conf *mdev, int error, int forcedetach)
+static inline void drbd_chk_io_error(struct drbd_conf *mdev,
+	int error, int forcedetach)
 {
 	if (error) {
 		unsigned long flags;
@@ -1617,7 +1639,8 @@
 	wake_asender(mdev);
 }
 
-static inline int drbd_send_short_cmd(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd)
+static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
+	enum Drbd_Packet_Cmd cmd)
 {
 	struct Drbd_Header h;
 	return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
@@ -1767,7 +1790,8 @@
  * inc_local: Returns TRUE when local IO is possible. If it returns
  * TRUE you should call dec_local() after IO is completed.
  */
-static inline int inc_local_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+static inline int inc_local_if_state(struct drbd_conf *mdev,
+	enum drbd_disk_state mins)
 {
 	int io_allowed;
 
@@ -1819,8 +1843,8 @@
 
 	/* we wait here
 	 *    as long as the device is suspended
-	 *    until the bitmap is no longer on the fly during connection handshake
-	 *    as long as we would exeed the max_buffer limit.
+	 *    until the bitmap is no longer on the fly during connection
+	 *    handshake as long as we would exeed the max_buffer limit.
 	 *
 	 * to avoid races with the reconnect code,
 	 * we need to atomic_inc within the spinlock. */
@@ -1905,7 +1929,9 @@
 {
 	if (!mdev->bc->backing_bdev) {
 		if (DRBD_ratelimit(5*HZ, 5)) {
-			ERR("backing_bdev==NULL in drbd_kick_lo! The following call trace is for debuggin purposes only. Don't worry.\n");
+			ERR("backing_bdev==NULL in drbd_kick_lo! "
+			    "The following call trace is for "
+			    "debuggin purposes only. Don't worry.\n");
 			dump_stack();
 		}
 	} else {

Modified: branches/drbd-8.0-for-linus/drbd/drbd_main.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_main.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_main.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -74,7 +74,8 @@
 int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
 void md_sync_timer_fn(unsigned long data);
 
-MODULE_AUTHOR("Philipp Reisner <phil at linbit.com>, Lars Ellenberg <lars at linbit.com>");
+MODULE_AUTHOR("Philipp Reisner <phil at linbit.com>, "
+	      "Lars Ellenberg <lars at linbit.com>");
 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
@@ -93,10 +94,14 @@
 int fault_rate;
 int fault_count;
 int fault_devs;
-module_param(enable_faults, int, 0664);	/* bitmap of enabled faults */
-module_param(fault_rate, int, 0664);	/* fault rate % value - applies to all enabled faults */
-module_param(fault_count, int, 0664);	/* count of faults inserted */
-module_param(fault_devs, int, 0644);	/* bitmap of devices to insert faults on */
+/* bitmap of enabled faults */
+module_param(enable_faults, int, 0664);
+/* fault rate % value - applies to all enabled faults */
+module_param(fault_rate, int, 0664);
+/* count of faults inserted */
+module_param(fault_count, int, 0664);
+/* bitmap of devices to insert faults on */
+module_param(fault_devs, int, 0644);
 #endif
 
 /* module parameter, defined */
@@ -117,7 +122,8 @@
  * to run. Default is /sbin/drbdadm */
 char usermode_helper[80] = "/sbin/drbdadm";
 
-module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
+module_param_string(usermode_helper, usermode_helper,
+	sizeof(usermode_helper), 0644);
 
 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
  * as member "struct gendisk *vdisk;"
@@ -183,7 +189,8 @@
  * It returns the previously newest barrier
  * (not the just created barrier) to the caller.
  */
-struct drbd_barrier *_tl_add_barrier(struct drbd_conf *mdev, struct drbd_barrier *new)
+struct drbd_barrier *_tl_add_barrier(struct drbd_conf *mdev,
+	struct drbd_barrier *new)
 {
 	struct drbd_barrier *newest_before;
 
@@ -309,7 +316,8 @@
 {
 	enum io_error_handler eh;
 	unsigned long flags;
-	int send, ok = 1;
+	int send;
+	int ok = 1;
 
 	eh = PassOn;
 	if (inc_local_if_state(mdev, Failed)) {
@@ -355,7 +363,8 @@
  * Returns TRUE if this state change should be preformed as a cluster wide
  * transaction. Of course it returns 0 as soon as the connection is lost.
  */
-int cl_wide_st_chg(struct drbd_conf *mdev, union drbd_state_t os, union drbd_state_t ns)
+int cl_wide_st_chg(struct drbd_conf *mdev,
+	union drbd_state_t os, union drbd_state_t ns)
 {
 	return ( os.conn >= Connected && ns.conn >= Connected &&
 		 ( ( os.role != Primary && ns.role == Primary ) ||
@@ -378,21 +387,26 @@
 	rv = _drbd_set_state(mdev, ns, f);
 	ns = mdev->state;
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
-	if (rv == SS_Success && !(f&ScheduleAfter)) after_state_ch(mdev, os, ns, f);
+	if (rv == SS_Success && !(f&ScheduleAfter))
+		after_state_ch(mdev, os, ns, f);
 
 	return rv;
 }
 
-void drbd_force_state(struct drbd_conf *mdev, union drbd_state_t mask, union drbd_state_t val)
+void drbd_force_state(struct drbd_conf *mdev,
+	union drbd_state_t mask, union drbd_state_t val)
 {
 	drbd_change_state(mdev, ChgStateHard, mask, val);
 }
 
 int is_valid_state(struct drbd_conf *mdev, union drbd_state_t ns);
-int is_valid_state_transition(struct drbd_conf *, union drbd_state_t, union drbd_state_t);
-int drbd_send_state_req(struct drbd_conf *, union drbd_state_t, union drbd_state_t);
+int is_valid_state_transition(struct drbd_conf *,
+	union drbd_state_t, union drbd_state_t);
+int drbd_send_state_req(struct drbd_conf *,
+	union drbd_state_t, union drbd_state_t);
 
-enum set_st_err _req_st_cond(struct drbd_conf *mdev, union drbd_state_t mask, union drbd_state_t val)
+enum set_st_err _req_st_cond(struct drbd_conf *mdev,
+	union drbd_state_t mask, union drbd_state_t val)
 {
 	union drbd_state_t os, ns;
 	unsigned long flags;
@@ -413,7 +427,8 @@
 		rv = is_valid_state(mdev, ns);
 		if (rv == SS_Success) {
 			rv = is_valid_state_transition(mdev, ns, os);
-			if (rv == SS_Success) rv = 0; /* cont waiting, otherwise fail. */
+			if (rv == SS_Success)
+				rv = 0; /* cont waiting, otherwise fail. */
 		}
 	}
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
@@ -427,7 +442,8 @@
  * transition this function even does a cluster wide transaction.
  * It has a cousin named drbd_request_state(), which is always verbose.
  */
-int _drbd_request_state(struct drbd_conf *mdev, union drbd_state_t mask, union drbd_state_t val,
+int _drbd_request_state(struct drbd_conf *mdev,
+	union drbd_state_t mask, union drbd_state_t val,
 		       enum chg_state_flags f)
 {
 	unsigned long flags;
@@ -457,7 +473,8 @@
 			return rv;
 		}
 
-		wait_event(mdev->state_wait, (rv = _req_st_cond(mdev, mask, val)));
+		wait_event(mdev->state_wait,
+			(rv = _req_st_cond(mdev, mask, val)));
 
 		if (rv < SS_Success) {
 			/* nearly dead code. */
@@ -475,7 +492,8 @@
 	ns = mdev->state;
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	if (rv == SS_Success && !(f&ScheduleAfter)) after_state_ch(mdev, os, ns, f);
+	if (rv == SS_Success && !(f&ScheduleAfter))
+		after_state_ch(mdev, os, ns, f);
 
 	return rv;
 }
@@ -497,7 +515,8 @@
 	    );
 }
 
-void print_st_err(struct drbd_conf *mdev, union drbd_state_t os, union drbd_state_t ns, int err)
+void print_st_err(struct drbd_conf *mdev,
+	union drbd_state_t os, union drbd_state_t ns, int err)
 {
 	ERR("State change failed: %s\n", set_st_err_name(err));
 	print_st(mdev, " state", os);
@@ -571,7 +590,8 @@
 	return rv;
 }
 
-int is_valid_state_transition(struct drbd_conf *mdev, union drbd_state_t ns, union drbd_state_t os)
+int is_valid_state_transition(struct drbd_conf *mdev,
+	union drbd_state_t ns, union drbd_state_t os)
 {
 	int rv = SS_Success;
 
@@ -587,10 +607,12 @@
 	return rv;
 }
 
-int _drbd_set_state(struct drbd_conf *mdev, union drbd_state_t ns, enum chg_state_flags flags)
+int _drbd_set_state(struct drbd_conf *mdev,
+	union drbd_state_t ns, enum chg_state_flags flags)
 {
 	union drbd_state_t os;
-	int rv = SS_Success, warn_sync_abort = 0;
+	int rv = SS_Success;
+	int warn_sync_abort = 0;
 	enum fencing_policy fp;
 
 	MUST_HOLD(&mdev->req_lock);
@@ -815,8 +837,8 @@
 	return 1;
 }
 
-void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os, union drbd_state_t ns,
-		    enum chg_state_flags flags)
+void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
+	union drbd_state_t ns, enum chg_state_flags flags)
 {
 	enum fencing_policy fp;
 	u32 mdf;
@@ -884,22 +906,27 @@
 	}
 
 	/* Lost contact to peer's copy of the data */
-	if ( (os.pdsk >= Inconsistent && os.pdsk != DUnknown && os.pdsk != Outdated) &&
-	     (ns.pdsk < Inconsistent || ns.pdsk == DUnknown || ns.pdsk == Outdated) ) {
+	if ( (os.pdsk >= Inconsistent &&
+	      os.pdsk != DUnknown &&
+	      os.pdsk != Outdated)
+	&&   (ns.pdsk < Inconsistent ||
+	      ns.pdsk == DUnknown ||
+	      ns.pdsk == Outdated) ) {
 		kfree(mdev->p_uuid);
 		mdev->p_uuid = NULL;
 		if (inc_local(mdev)) {
-			if (ns.role == Primary && mdev->bc->md.uuid[Bitmap] == 0) {
-				/* Only do it if we have not yet done it... */
+			/* generate new uuid, unless we did already */
+			if (ns.role == Primary &&
+			    mdev->bc->md.uuid[Bitmap] == 0)
 				drbd_uuid_new_current(mdev);
-			}
+
+			/* Note: The condition ns.peer == Primary implies
+			 * that we are connected. Otherwise it would
+			 * be ns.peer == Unknown.
+			 * So this means our peer lost its disk.
+			 * No rotation into BitMap-UUID! A FullSync is
+			 * required after a primary detached from its disk! */
 			if (ns.peer == Primary) {
-				/* Note: The condition ns.peer == Primary implies
-				   that we are connected. Otherwise it would
-				   be ns.peer == Unknown. */
-				/* Our peer lost its disk.
-				   Not rotation into BitMap-UUID! A FullSync is
-				   required after a primary detached from it disk! */
 				u64 uuid;
 				INFO("Creating new current UUID [no BitMap]\n");
 				get_random_bytes(&uuid, sizeof(u64));
@@ -1102,7 +1129,8 @@
 			ERR("Couldn't start thread (%d)\n", pid);
 			return FALSE;
 		}
-		wait_for_completion(&thi->startstop); /* waits until thi->task is set */
+		/* waits until thi->task is set */
+		wait_for_completion(&thi->startstop);
 		D_ASSERT(thi->task);
 		D_ASSERT(get_t_state(thi) == Running);
 	} else {
@@ -1239,8 +1267,10 @@
 
 	dump_packet(mdev, mdev->data.socket, 0, (void *)&h, __FILE__, __LINE__);
 
-	ok = ( sizeof(h) == drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0) );
-	ok = ok && ( size == drbd_send(mdev, mdev->data.socket, data, size, 0) );
+	ok = ( sizeof(h) ==
+		drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0) );
+	ok = ok && ( size ==
+		drbd_send(mdev, mdev->data.socket, data, size, 0) );
 
 	drbd_put_data_sock(mdev);
 
@@ -1253,7 +1283,8 @@
 
 	p.rate      = cpu_to_be32(sc->rate);
 
-	return drbd_send_cmd(mdev, USE_DATA_SOCKET, SyncParam, (struct Drbd_Header *)&p, sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, SyncParam,
+				(struct Drbd_Header *)&p, sizeof(p));
 }
 
 int drbd_send_protocol(struct drbd_conf *mdev)
@@ -1346,7 +1377,8 @@
 			     (struct Drbd_Header *)&p, sizeof(p));
 }
 
-int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state_t mask, union drbd_state_t val)
+int drbd_send_state_req(struct drbd_conf *mdev,
+	union drbd_state_t mask, union drbd_state_t val)
 {
 	struct Drbd_Req_State_Packet p;
 
@@ -1372,7 +1404,8 @@
 int _drbd_send_bitmap(struct drbd_conf *mdev)
 {
 	int want;
-	int ok = TRUE, bm_i = 0;
+	int ok = TRUE;
+	int bm_i = 0;
 	size_t bm_words, num_words;
 	unsigned long *buffer;
 	struct Drbd_Header *p;
@@ -1386,10 +1419,11 @@
 	if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
 		drbd_bm_set_all(mdev);
 		drbd_bm_write(mdev);
-		if (unlikely(mdev->state.disk <= Failed )) {
-			/* write_bm did fail! Leave full sync flag set in Meta Data
-			 * but otherwise process as per normal - need to tell other
-			 * side that a full resync is required! */
+
+		/* if write_bm did fail, Leave full sync flag set in Meta Data
+		 * but otherwise process as per normal - need to tell other
+		 * side that a full resync is required! */
+		if (unlikely(mdev->state.disk <= Failed)) {
 			ERR("Failed to write bitmap to disk!\n");
 		} else {
 			drbd_md_clear_flag(mdev, MDF_FullSync);
@@ -1434,7 +1468,8 @@
 	p.barrier  = barrier_nr;
 	p.set_size = cpu_to_be32(set_size);
 
-	ok = drbd_send_cmd(mdev, USE_META_SOCKET, BarrierAck, (struct Drbd_Header *)&p, sizeof(p));
+	ok = drbd_send_cmd(mdev, USE_META_SOCKET, BarrierAck,
+			(struct Drbd_Header *)&p, sizeof(p));
 	return ok;
 }
 
@@ -1457,14 +1492,16 @@
 	p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
 
 	if (!mdev->meta.socket || mdev->state.conn < Connected) return FALSE;
-	ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, (struct Drbd_Header *)&p, sizeof(p));
+	ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
+				(struct Drbd_Header *)&p, sizeof(p));
 	return ok;
 }
 
 int drbd_send_ack_dp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
 		     struct Drbd_Data_Packet *dp)
 {
-	const int header_size = sizeof(struct Drbd_Data_Packet) - sizeof(struct Drbd_Header);
+	const int header_size = sizeof(struct Drbd_Data_Packet)
+			      - sizeof(struct Drbd_Header);
 	int data_size  = ((struct Drbd_Header *)dp)->length - header_size;
 
 	return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
@@ -1477,7 +1514,8 @@
 	return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
 }
 
-int drbd_send_ack(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd, struct Tl_epoch_entry *e)
+int drbd_send_ack(struct drbd_conf *mdev,
+	enum Drbd_Packet_Cmd cmd, struct Tl_epoch_entry *e)
 {
 	return _drbd_send_ack(mdev, cmd,
 			      cpu_to_be64(e->sector),
@@ -1497,7 +1535,8 @@
 
 	/* FIXME BIO_RW_SYNC ? */
 
-	ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, (struct Drbd_Header *)&p, sizeof(p));
+	ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
+				(struct Drbd_Header *)&p, sizeof(p));
 	return ok;
 }
 
@@ -1530,25 +1569,26 @@
 }
 
 /* The idea of sendpage seems to be to put some kind of reference
-   to the page into the skb, and to hand it over to the NIC. In
-   this process get_page() gets called.
-
-   As soon as the page was really sent over the network put_page()
-   gets called by some part of the network layer. [ NIC driver? ]
-
-   [ get_page() / put_page() increment/decrement the count. If count
-     reaches 0 the page will be freed. ]
-
-   This works nicely with pages from FSs.
-   But this means that in protocol A we might signal IO completion too early !
-
-   In order not to corrupt data during a resync we must make sure
-   that we do not reuse our own buffer pages (EEs) to early, therefore
-   we have the net_ee list.
-
-   XFS seems to have problems, still, it submits pages with page_count == 0!
-   As a workaround, we disable sendpage on pages with page_count == 0 or PageSlab.
-*/
+ * to the page into the skb, and to hand it over to the NIC. In
+ * this process get_page() gets called.
+ *
+ * As soon as the page was really sent over the network put_page()
+ * gets called by some part of the network layer. [ NIC driver? ]
+ *
+ * [ get_page() / put_page() increment/decrement the count. If count
+ *   reaches 0 the page will be freed. ]
+ *
+ * This works nicely with pages from FSs.
+ * But this means that in protocol A we might signal IO completion too early!
+ *
+ * In order not to corrupt data during a resync we must make sure
+ * that we do not reuse our own buffer pages (EEs) to early, therefore
+ * we have the net_ee list.
+ *
+ * XFS seems to have problems, still, it submits pages with page_count == 0!
+ * As a workaround, we disable sendpage on pages
+ * with page_count == 0 or PageSlab.
+ */
 int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
 		   int offset, size_t size)
 {
@@ -1563,7 +1603,7 @@
 {
 	mm_segment_t oldfs = get_fs();
 	int sent, ok;
-	int len   = size;
+	int len = size;
 
 #ifdef SHOW_SENDPAGE_USAGE
 	unsigned long now = jiffies;
@@ -1623,7 +1663,7 @@
 	} while (len > 0 /* THINK && mdev->cstate >= Connected*/);
 	set_fs(oldfs);
 
-  out:
+out:
 	ok = (len == 0);
 	if (likely(ok))
 		mdev->send_cnt += size>>9;
@@ -1657,7 +1697,8 @@
 
 	p.head.magic   = BE_DRBD_MAGIC;
 	p.head.command = cpu_to_be16(Data);
-	p.head.length  = cpu_to_be16(sizeof(p)-sizeof(struct Drbd_Header)+req->size);
+	p.head.length  = cpu_to_be16(sizeof(p)
+			-sizeof(struct Drbd_Header)+req->size);
 
 	p.sector   = cpu_to_be64(req->sector);
 	p.block_id = (unsigned long)req;
@@ -1675,7 +1716,8 @@
 	p.dp_flags = cpu_to_be32(dp_flags);
 	dump_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
 	set_bit(UNPLUG_REMOTE, &mdev->flags);
-	ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE);
+	ok = (sizeof(p) ==
+		drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
 	if (ok) {
 		if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
 			ok = _drbd_send_bio(mdev, req->master_bio);
@@ -1699,7 +1741,8 @@
 
 	p.head.magic   = BE_DRBD_MAGIC;
 	p.head.command = cpu_to_be16(cmd);
-	p.head.length  = cpu_to_be16( sizeof(p)-sizeof(struct Drbd_Header) + e->size);
+	p.head.length  = cpu_to_be16( sizeof(p)
+			-sizeof(struct Drbd_Header) + e->size);
 
 	p.sector   = cpu_to_be64(e->sector);
 	p.block_id = e->block_id;
@@ -1713,7 +1756,8 @@
 		return 0;
 
 	dump_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
-	ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE);
+	ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
+					sizeof(p), MSG_MORE);
 	if (ok)
 		ok = _drbd_send_zc_bio(mdev, e->private_bio);
 
@@ -1843,7 +1887,8 @@
 	if (!mdev) return -ENODEV;
 
 	spin_lock_irqsave(&mdev->req_lock, flags);
-	/* to have a stable mdev->state.role and no race with updating open_cnt */
+	/* to have a stable mdev->state.role
+	 * and no race with updating open_cnt */
 
 	if (mdev->state.role != Primary) {
 		if (file->f_mode & FMODE_WRITE)
@@ -1901,7 +1946,8 @@
 			 * XXX this might be a good addition to drbd_queue_work
 			 * anyways, to detect "double queuing" ... */
 			if (list_empty(&mdev->unplug_work.list))
-				drbd_queue_work(&mdev->data.work, &mdev->unplug_work);
+				drbd_queue_work(&mdev->data.work,
+						&mdev->unplug_work);
 		}
 	}
 	spin_unlock_irq(&mdev->req_lock);
@@ -1913,7 +1959,7 @@
 {
 	mdev->sync_conf.after      = DRBD_AFTER_DEF;
 	mdev->sync_conf.rate       = DRBD_RATE_DEF;
-	mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF; /* 512 MB active set */
+	mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
 	mdev->state = (union drbd_state_t) {
 		{ Secondary, Unknown, StandAlone, Diskless, DUnknown, 0 } };
 }
@@ -2146,7 +2192,7 @@
 
 	return 0;
 
-  Enomem:
+Enomem:
 	drbd_destroy_mempools(); /* in case we allocated some */
 	return -ENOMEM;
 }
@@ -2330,9 +2376,11 @@
 	       THIS_MODULE, THIS_MODULE->module_core);
 #endif
 
+	/* FIXME should be a compile time assert */
 	if (sizeof(struct Drbd_HandShake_Packet) != 80) {
 		printk(KERN_ERR DEVICE_NAME
-		       ": never change the size or layout of the HandShake packet.\n");
+		       ": never change the size or layout "
+		       "of the HandShake packet.\n");
 		return -EINVAL;
 	}
 
@@ -2367,7 +2415,8 @@
 	init_waitqueue_head(&drbd_pp_wait);
 
 	drbd_proc = NULL; /* play safe for drbd_cleanup */
-	minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count, GFP_KERNEL);
+	minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
+				GFP_KERNEL);
 	if (!minor_table) goto Enomem;
 
 	err = drbd_create_mempools();
@@ -2395,12 +2444,13 @@
 	       "Version: " REL_VERSION " (api:%d/proto:%d)\n",
 	       API_VERSION, PRO_VERSION);
 	printk(KERN_INFO DEVICE_NAME ": %s\n", drbd_buildtag());
-	printk(KERN_INFO DEVICE_NAME": registered as block device major %d\n", DRBD_MAJOR);
+	printk(KERN_INFO DEVICE_NAME": registered as block device major %d\n",
+		DRBD_MAJOR);
 	printk(KERN_INFO DEVICE_NAME": minor_table @ 0x%p\n", minor_table);
 
 	return 0; /* Success! */
 
-  Enomem:
+Enomem:
 	drbd_cleanup();
 	if (err == -ENOMEM) /* currently always the case */
 		printk(KERN_ERR DEVICE_NAME ": ran out of memory\n");
@@ -2506,15 +2556,6 @@
 	D_ASSERT(drbd_md_ss__(mdev, mdev->bc) == mdev->bc->md.md_offset);
 	sector = mdev->bc->md.md_offset;
 
-#if 0
-	/* FIXME sooner or later I'd like to use the MD_DIRTY flag everywhere,
-	 * so we can avoid unneccessary md writes.
-	 */
-	ERR_IF (!test_bit(MD_DIRTY, &mdev->flags)) {
-		dump_stack();
-	}
-#endif
-
 	if (drbd_md_sync_page_io(mdev, mdev->bc, sector, WRITE)) {
 		clear_bit(MD_DIRTY, &mdev->flags);
 	} else {
@@ -2802,14 +2843,16 @@
 	static struct fault_random_state rrs = {0, 0};
 
 	unsigned int ret = (
-		(fault_devs == 0 || ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
+		(fault_devs == 0 ||
+			((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
 		(((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
 
 	if (ret) {
 		fault_count++;
 
 		if (printk_ratelimit())
-			WARN("***Simulating %s failure\n", _drbd_fault_str(type));
+			WARN("***Simulating %s failure\n",
+				_drbd_fault_str(type));
 	}
 
 	return ret;
@@ -2835,7 +2878,8 @@
 /* Pretty print a UUID value */
 void
 drbd_print_uuid(struct drbd_conf *mdev, unsigned int idx) {
-	INFO(" uuid[%s] now %016llX\n", _drbd_uuid_str(idx), mdev->bc->md.uuid[idx]);
+	INFO(" uuid[%s] now %016llX\n",
+		_drbd_uuid_str(idx), mdev->bc->md.uuid[idx]);
 }
 
 
@@ -2886,8 +2930,11 @@
 	int count;
 
 	/* verify size parameter */
-	if (size != sizeof(char) && size != sizeof(short) && size != sizeof(int)) {
-		printk(KERN_DEBUG "drbd_print_buffer: ERROR invalid size %d\n", size);
+	if (size != sizeof(char) &&
+	    size != sizeof(short) &&
+	    size != sizeof(int)) {
+		printk(KERN_DEBUG "drbd_print_buffer: "
+			"ERROR invalid size %d\n", size);
 		return;
 	}
 
@@ -2896,7 +2943,8 @@
 
 	/* Adjust start/end to be on appropriate boundary for size */
 	buffer = (const char *)((long)buffer & ~sizemask);
-	pend   = (const unsigned char *)(((long)buffer + length + sizemask) & ~sizemask);
+	pend   = (const unsigned char *)
+		(((long)buffer + length + sizemask) & ~sizemask);
 
 	if (flags & DBGPRINT_BUFFADDR) {
 		/* Move start back to nearest multiple of line size,
@@ -2908,7 +2956,8 @@
 	}
 
 	/* Set value of start VA to print if addresses asked for */
-	pstart_va = (const unsigned char *)buffer_va - ((const unsigned char *)buffer-pstart);
+	pstart_va = (const unsigned char *)buffer_va
+		 - ((const unsigned char *)buffer-pstart);
 
 	/* Calculate end position to nicely align right hand side */
 	pend_str = pstart + (((pend-pstart) + LINE_SIZE-1) & ~(LINE_SIZE-1));
@@ -3036,7 +3085,8 @@
 
 	switch (cmd) {
 	case HandShake:
-		INFOP("%s (protocol %u)\n", cmdname(cmd), be32_to_cpu(p->HandShake.protocol_version));
+		INFOP("%s (protocol %u)\n", cmdname(cmd),
+			be32_to_cpu(p->HandShake.protocol_version));
 		break;
 
 	case ReportBitMap: /* don't report this */
@@ -3065,7 +3115,8 @@
 	case DiscardAck:
 	case NegAck:
 	case NegRSDReply:
-		INFOP("%s (sector %llus, size %u, id %s, seq %u)\n", cmdname(cmd),
+		INFOP("%s (sector %llus, size %u, id %s, seq %u)\n",
+			cmdname(cmd),
 		      (long long)be64_to_cpu(p->BlockAck.sector),
 		      be32_to_cpu(p->BlockAck.blksize),
 		      _dump_block_id(p->BlockAck.block_id, tmp),
@@ -3088,7 +3139,9 @@
 		break;
 
 	case ReportUUIDs:
-		INFOP("%s Curr:%016llX, Bitmap:%016llX, HisSt:%016llX, HisEnd:%016llX\n", cmdname(cmd),
+		INFOP("%s Curr:%016llX, Bitmap:%016llX, "
+		      "HisSt:%016llX, HisEnd:%016llX\n",
+		      cmdname(cmd),
 		      be64_to_cpu(p->GenCnt.uuid[Current]),
 		      be64_to_cpu(p->GenCnt.uuid[Bitmap]),
 		      be64_to_cpu(p->GenCnt.uuid[History_start]),
@@ -3096,7 +3149,9 @@
 		break;
 
 	case ReportSizes:
-		INFOP("%s (d %lluMiB, u %lluMiB, c %lldMiB, max bio %x, q order %x)\n", cmdname(cmd),
+		INFOP("%s (d %lluMiB, u %lluMiB, c %lldMiB, "
+		      "max bio %x, q order %x)\n",
+		      cmdname(cmd),
 		      (long long)(be64_to_cpu(p->Sizes.d_size)>>(20-9)),
 		      (long long)(be64_to_cpu(p->Sizes.u_size)>>(20-9)),
 		      (long long)(be64_to_cpu(p->Sizes.c_size)>>(20-9)),
@@ -3176,7 +3231,8 @@
 				drbd_print_buffer("    ", DBGPRINT_BUFFADDR, 1,
 						  bvec_buf,
 						  faddr,
-						  (bvec->bv_len <= 0x80)? bvec->bv_len : 0x80);
+						  (bvec->bv_len <= 0x80)
+						  ? bvec->bv_len : 0x80);
 
 				bvec_kunmap_irq(bvec_buf, &flags);
 

Modified: branches/drbd-8.0-for-linus/drbd/drbd_nl.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_nl.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_nl.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -47,7 +47,8 @@
 
 /* Generate the tag_list to struct functions */
 #define PACKET(name, number, fields) \
-int name ## _from_tags (struct drbd_conf *mdev, unsigned short *tags, struct name *arg) \
+int name ## _from_tags (struct drbd_conf *mdev, \
+	unsigned short *tags, struct name *arg) \
 { \
 	int tag; \
 	int dlen; \
@@ -88,7 +89,8 @@
 /* Generate the struct to tag_list functions */
 #define PACKET(name, number, fields) \
 unsigned short* \
-name ## _to_tags (struct drbd_conf *mdev, struct name *arg, unsigned short *tags) \
+name ## _to_tags (struct drbd_conf *mdev, \
+	struct name *arg, unsigned short *tags) \
 { \
 	fields \
 	return tags; \
@@ -208,8 +210,8 @@
 	case 4: /* peer is outdated */
 		nps = Outdated;
 		break;
-	case 5: /* peer was down, we will(have) create(d) a new UUID anyways... */
-		/* If we would be more strict, we would return DUnknown here. */
+	case 5: /* peer was down, we will(have) create(d) a new UUID anyways.
+		 * If we would be more strict, we would return DUnknown here. */
 		nps = Outdated;
 		break;
 	case 6: /* Peer is primary, voluntarily outdate myself */
@@ -292,11 +294,12 @@
 			/* Maybe the peer is detected as dead very soon... */
 			set_current_state(TASK_INTERRUPTIBLE);
 			schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
-			if (try == 1) try++; /* only a single retry in this case. */
+			if (try == 1)
+				try++; /* only a single retry in this case. */
 			continue;
 		}
 		if (r < SS_Success) {
-			r = drbd_request_state(mdev, mask, val); /* Be verbose. */
+			r = drbd_request_state(mdev, mask, val);
 			if (r < SS_Success) goto fail;
 		}
 		break;
@@ -373,7 +376,8 @@
 		return 0;
 	}
 
-	reply->ret_code = drbd_set_role(mdev, Primary, primary_args.overwrite_peer);
+	reply->ret_code =
+		drbd_set_role(mdev, Primary, primary_args.overwrite_peer);
 
 	return 0;
 }
@@ -475,7 +479,9 @@
 			/* currently there is only one error: ENOMEM! */
 			size = drbd_bm_capacity(mdev)>>1;
 			if (size == 0) {
-				ERR("OUT OF MEMORY! Could not allocate bitmap! Set device size => 0\n");
+				ERR("OUT OF MEMORY! "
+				    "Could not allocate bitmap! "
+				    "Set device size => 0\n");
 			} else {
 				/* FIXME this is problematic,
 				 * if we in fact are smaller now! */
@@ -514,7 +520,7 @@
 			dec_local(mdev);
 		}
 	}
-  out:
+out:
 	lc_unlock(mdev->act_log);
 
 	return rv;
@@ -608,7 +614,7 @@
 	} else {
 		if (t) lc_free(t);
 	}
-	drbd_md_mark_dirty(mdev);	/* we changed mdev->act_log->nr_elemens */
+	drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
 	return 0;
 }
 
@@ -861,7 +867,8 @@
 	drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE);
 	/*
 	 * FIXME currently broken.
-	 * drbd_set_recv_tcq(mdev,drbd_queue_order_type(mdev)==QUEUE_ORDERED_TAG);
+	 * drbd_set_recv_tcq(mdev,
+	 *	drbd_queue_order_type(mdev)==QUEUE_ORDERED_TAG);
 	 */
 
 	/* If I am currently not Primary,
@@ -888,7 +895,8 @@
 	drbd_determin_dev_size(mdev);
 
 	if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
-		INFO("Assuming that all blocks are out of sync (aka FullSync)\n");
+		INFO("Assuming that all blocks are out of sync "
+		     "(aka FullSync)\n");
 		drbd_bm_set_all(mdev);
 		if (unlikely(drbd_bm_write(mdev) < 0)) {
 			retcode = MDIOError;
@@ -1053,7 +1061,8 @@
 		goto fail;
 	}
 
-	if (new_conf->two_primaries && (new_conf->wire_protocol != DRBD_PROT_C)) {
+	if (new_conf->two_primaries
+	&& (new_conf->wire_protocol != DRBD_PROT_C)) {
 		retcode = ProtocolCRequired;
 		goto fail;
 	};
@@ -1090,7 +1099,8 @@
 #undef O_PORT
 
 	if (new_conf->cram_hmac_alg[0] != 0) {
-		snprintf(hmac_name, HMAC_NAME_L, "hmac(%s)", new_conf->cram_hmac_alg);
+		snprintf(hmac_name, HMAC_NAME_L, "hmac(%s)",
+			new_conf->cram_hmac_alg);
 		tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
 		if (IS_ERR(tfm)) {
 			tfm = NULL;
@@ -1098,7 +1108,8 @@
 			goto fail;
 		}
 
-		if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_HASH ) {
+		if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
+						!= CRYPTO_ALG_TYPE_HASH) {
 			retcode = CRAMAlgNotDigest;
 			goto fail;
 		}
@@ -1176,7 +1187,7 @@
 	reply->ret_code = retcode;
 	return 0;
 
-  fail:
+fail:
 	if (tfm) crypto_free_hash(tfm);
 	if (new_tl_hash) kfree(new_tl_hash);
 	if (new_ee_hash) kfree(new_ee_hash);
@@ -1191,7 +1202,8 @@
 {
 	int retcode;
 
-	retcode = _drbd_request_state(mdev, NS(conn, Disconnecting), 0);	/* silently. */
+	/* silently. */
+	retcode = _drbd_request_state(mdev, NS(conn, Disconnecting), 0);
 
 	if (retcode == SS_NothingToDo) goto done;
 	else if (retcode == SS_AlreadyStandAlone) goto done;
@@ -1208,7 +1220,8 @@
 		 * simply go away, and let the peer try to outdate us with its
 		 * 'outdate-peer' handler later. */
 		if (retcode == SS_IsDiskLess)
-			retcode = drbd_request_state(mdev, NS(conn, StandAlone));
+			retcode = drbd_request_state(mdev,
+						NS(conn, StandAlone));
 	}
 
 	if (retcode < SS_Success) goto fail;
@@ -1408,11 +1421,13 @@
 	if (mdev->state.disk < Outdated)
 		retcode = -999;
 	else
-		retcode = _drbd_set_state(_NS(mdev, disk, Outdated), ChgStateVerbose);
+		retcode = _drbd_set_state(_NS(mdev, disk, Outdated),
+						ChgStateVerbose);
 
 	ns = mdev->state;
 	spin_unlock_irq(&mdev->req_lock);
-	if (retcode == SS_Success) after_state_ch(mdev, os, ns, ChgStateVerbose);
+	if (retcode == SS_Success)
+		after_state_ch(mdev, os, ns, ChgStateVerbose);
 
 	if (retcode == -999) {
 		retcode = DiskLowerThanOutdated;
@@ -1487,8 +1502,8 @@
 }
 
 
-int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
-				    struct drbd_nl_cfg_reply *reply)
+int drbd_nl_get_timeout_flag(struct drbd_conf *mdev,
+	struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply)
 {
 	unsigned short *tl;
 
@@ -1564,8 +1579,9 @@
 				    sizeof(struct get_state_tag_len_struct) },
 	[ P_get_uuids ]		= { &drbd_nl_get_uuids,
 				    sizeof(struct get_uuids_tag_len_struct) },
-	[ P_get_timeout_flag ]	= { &drbd_nl_get_timeout_flag,
-				    sizeof(struct get_timeout_flag_tag_len_struct)},
+	[ P_get_timeout_flag ]	=
+		{ &drbd_nl_get_timeout_flag,
+		  sizeof(struct get_timeout_flag_tag_len_struct)},
 
 };
 
@@ -1610,7 +1626,8 @@
 	}
 	reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
 
-	reply->packet_type = cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
+	reply->packet_type =
+		cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
 	reply->minor = nlp->drbd_minor;
 	reply->ret_code = NoError; /* Might by modified by cm->function. */
 	/* reply->tag_list; might be modified by cm->fucntion. */
@@ -1646,7 +1663,8 @@
 		    sizeof(struct get_state_tag_len_struct)+
 		    sizeof(short int)];
 	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-	struct drbd_nl_cfg_reply *reply = (struct drbd_nl_cfg_reply *)cn_reply->data;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
 	unsigned short *tl = reply->tag_list;
 
 	/* WARN("drbd_bcast_state() got called\n"); */
@@ -1679,7 +1697,8 @@
 		    sizeof(struct call_helper_tag_len_struct)+
 		    sizeof(short int)];
 	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-	struct drbd_nl_cfg_reply *reply = (struct drbd_nl_cfg_reply *)cn_reply->data;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
 	unsigned short *tl = reply->tag_list;
 	int str_len;
 
@@ -1717,7 +1736,8 @@
 		    sizeof(struct sync_progress_tag_len_struct)+
 		    sizeof(short int)];
 	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-	struct drbd_nl_cfg_reply *reply = (struct drbd_nl_cfg_reply *)cn_reply->data;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
 	unsigned short *tl = reply->tag_list;
 	int res;
 	unsigned long rs_left;
@@ -1727,8 +1747,9 @@
 
 		rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
 		if (rs_left > mdev->rs_total) {
-			ERR("logic bug? rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
-					rs_left, mdev->rs_total, mdev->rs_failed);
+			ERR("logic bug? "
+			    "rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
+				rs_left, mdev->rs_total, mdev->rs_failed);
 			res = 1000;
 		} else {
 			res = (rs_left >> 10)*1000/((mdev->rs_total >> 10) + 1);
@@ -1801,7 +1822,8 @@
 {
 	char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
 	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
-	struct drbd_nl_cfg_reply *reply = (struct drbd_nl_cfg_reply *)cn_reply->data;
+	struct drbd_nl_cfg_reply *reply =
+		(struct drbd_nl_cfg_reply *)cn_reply->data;
 	int rr;
 
 	cn_reply->id = req->id;

Modified: branches/drbd-8.0-for-linus/drbd/drbd_proc.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_proc.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_proc.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -157,7 +157,7 @@
 	seq_printf(seq, " K/sec\n");
 }
 
-#if 0
+#ifdef DRBD_DUMP_RESYNC_DETAIL
 void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
 {
 	struct bm_extent *bme = (struct bm_extent *)e;
@@ -246,7 +246,7 @@
 		if (mdev->act_log)
 			lc_printf_stats(seq, mdev->act_log);
 
-#if 0
+#ifdef DRBD_DUMP_RESYNC_DETAIL
 		if (mdev->resync) {
 			lc_dump(mdev->resync, seq, "rs_left",
 				resync_dump_detail);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_receiver.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_receiver.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -64,57 +64,6 @@
 }
 #endif
 
-
-#if 0
-#define CHECK_LIST_LIMIT 1000
-void check_list(struct drbd_conf *mdev, struct list_head *list, char *t)
-{
-	struct list_head *le, *la;
-	int forward = 0, backward = 0;
-
-	le = list;
-	do {
-		la = le;
-		le = le->next;
-		if (le->prev != la) {
-			printk(KERN_ERR DEVICE_NAME
-			       "%d: %s list fucked.\n",
-			       mdev_to_minor(mdev), t);
-			break;
-		}
-		if (forward++ > CHECK_LIST_LIMIT) {
-			printk(KERN_ERR DEVICE_NAME
-			       "%d: %s forward > 1000\n",
-			       mdev_to_minor(mdev), t);
-			break;
-		}
-	} while (le != list);
-
-	le = list;
-	do {
-		la = le;
-		le = le->prev;
-		if (le->next != la) {
-			printk(KERN_ERR DEVICE_NAME
-			       "%d: %s list fucked.\n",
-			       mdev_to_minor(mdev), t);
-			break;
-		}
-		if (backward++ > CHECK_LIST_LIMIT) {
-			printk(KERN_ERR DEVICE_NAME
-			       "%d: %s backward > 1000\n",
-			       mdev_to_minor(mdev), t);
-			break;
-		}
-	} while (le != list);
-
-	if (forward != backward) {
-		printk(KERN_ERR DEVICE_NAME "%d: forward=%d, backward=%d\n",
-		       mdev_to_minor(mdev), forward, backward);
-	}
-}
-#endif
-
 #define GFP_TRY	( __GFP_HIGHMEM | __GFP_NOWARN )
 
 /**
@@ -165,7 +114,8 @@
 		/* hm. pool was empty. try to allocate from kernel.
 		 * don't wait, if none is available, though.
 		 */
-		if ( atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers ) {
+		if (atomic_read(&mdev->pp_in_use)
+					< mdev->net_conf->max_buffers) {
 			page = alloc_page(GFP_TRY);
 			if (page)
 				break;
@@ -464,7 +414,8 @@
 	DEFINE_WAIT(wait);
 	MUST_HOLD(&mdev->req_lock);
 
-	/* avoids spin_lock/unlock and calling prepare_to_wait in the fast path */
+	/* avoids spin_lock/unlock
+	 * and calling prepare_to_wait in the fast path */
 	while (!list_empty(head)) {
 		prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(&mdev->req_lock);
@@ -503,9 +454,9 @@
 
 	return newsock;
 
-      out_release:
+out_release:
 	sock_release(newsock);
-      out:
+out:
 	if (err != -EAGAIN && err != -EINTR)
 		ERR("accept failed! %d\n", err);
 	return 0;
@@ -682,7 +633,8 @@
 int drbd_do_handshake(struct drbd_conf *mdev);
 int drbd_do_auth(struct drbd_conf *mdev);
 
-int drbd_send_fp(struct drbd_conf *mdev, struct socket *sock, enum Drbd_Packet_Cmd cmd)
+int drbd_send_fp(struct drbd_conf *mdev,
+	struct socket *sock, enum Drbd_Packet_Cmd cmd)
 {
 	struct Drbd_Header *h = (struct Drbd_Header *) &mdev->data.sbuf.head;
 
@@ -717,14 +669,16 @@
 	D_ASSERT(mdev->state.conn >= Unconnected);
 	D_ASSERT(!mdev->data.socket);
 
-	if (drbd_request_state(mdev, NS(conn, WFConnection)) < SS_Success ) return 0;
+	if (drbd_request_state(mdev, NS(conn, WFConnection)) < SS_Success)
+		return 0;
 	clear_bit(DISCARD_CONCURRENT, &mdev->flags);
 
 	sock  = NULL;
 	msock = NULL;
 
 	do {
-		for (try = 0;;) { /* 3 tries, this should take less than a second! */
+		for (try = 0;;) {
+			/* 3 tries, this should take less than a second! */
 			s = drbd_try_connect(mdev);
 			if (s || ++try >= 3) break;
 			/* give the other side time to call bind() & listen() */
@@ -813,7 +767,8 @@
 	mdev->meta.socket = msock;
 	mdev->last_received = jiffies;
 
-	if (drbd_request_state(mdev, NS(conn, WFReportParams)) < SS_Success) return 0;
+	if (drbd_request_state(mdev, NS(conn, WFReportParams)) < SS_Success)
+		return 0;
 	D_ASSERT(mdev->asender.task == NULL);
 
 	h = drbd_do_handshake(mdev);
@@ -996,7 +951,8 @@
 			     expect);
 		kunmap(bvec->bv_page);
 		if (rr != expect) {
-			WARN("short read receiving data reply: read %d expected %d\n",
+			WARN("short read receiving data reply: "
+			     "read %d expected %d\n",
 			     rr, expect);
 			return 0;
 		}
@@ -1057,7 +1013,8 @@
 	       INFO("submit EE (RS)WRITE sec=%llus size=%u ee=%p\n",
 		    (unsigned long long)e->sector, e->size, e);
 	       );
-	drbd_generic_make_request(mdev, WRITE, DRBD_FAULT_RS_WR, e->private_bio);
+	drbd_generic_make_request(mdev, WRITE, DRBD_FAULT_RS_WR,
+					e->private_bio);
 	/* accounting done in endio */
 
 	maybe_kick_lo(mdev);
@@ -1180,8 +1137,9 @@
 			/* FIXME I think we should send a NegAck regardless of
 			 * which protocol is in effect.
 			 * In which case we would need to make sure that any
-			 * NegAck is sent. basically that means that drbd_process_done_ee
-			 * may not list_del() the ee before this callback did run...
+			 * NegAck is sent. Basically that means that
+			 * drbd_process_done_ee may not list_del() the ee
+			 * before this callback did run...
 			 * maybe even move the list_del(e) in here... */
 			ok  = drbd_send_ack(mdev, NegAck, e);
 			ok &= drbd_io_error(mdev, FALSE);
@@ -1299,7 +1257,8 @@
 		 * corresponding dec_local done either below (on error),
 		 * or in drbd_endio_write_sec. */
 		if (DRBD_ratelimit(5*HZ, 5))
-			ERR("Can not write mirrored data block to local disk.\n");
+			ERR("Can not write mirrored data block "
+			    "to local disk.\n");
 		spin_lock(&mdev->peer_seq_lock);
 		if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
 			mdev->peer_seq++;
@@ -1367,7 +1326,8 @@
 		 * if no conflicting request is found:
 		 *    submit.
 		 *
-		 * if any conflicting request is found that has not yet been acked,
+		 * if any conflicting request is found
+		 * that has not yet been acked,
 		 * AND I have the "discard concurrent writes" flag:
 		 *	 queue (via done_ee) the DiscardAck; OUT.
 		 *
@@ -1395,19 +1355,20 @@
 		for (;;) {
 			int have_unacked = 0;
 			int have_conflict = 0;
-			prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
+			prepare_to_wait(&mdev->misc_wait, &wait,
+				TASK_INTERRUPTIBLE);
 			hlist_for_each_entry(i, n, slot, colision) {
 				if (OVERLAPS) {
-					if (first) {
-						/* only ALERT on first iteration,
-						 * we may be woken up early... */
+					/* only ALERT on first iteration,
+					 * we may be woken up early... */
+					if (first)
 						ALERT("%s[%u] Concurrent local write detected!"
 						      "	new: %llus +%u; pending: %llus +%u\n",
 						      current->comm, current->pid,
 						      (unsigned long long)sector, size,
 						      (unsigned long long)i->sector, i->size);
-					}
-					if (i->rq_state & RQ_NET_PENDING) ++have_unacked;
+					if (i->rq_state & RQ_NET_PENDING)
+						++have_unacked;
 					++have_conflict;
 				}
 			}
@@ -1523,7 +1484,8 @@
 		 * maybe rather move it into the e_end_block callback,
 		 * where it would be sent as soon as possible).
 		 */
-		(void)drbd_send_b_ack(mdev, cpu_to_be32(barrier_nr), epoch_size);
+		(void)drbd_send_b_ack(mdev,
+					cpu_to_be32(barrier_nr), epoch_size);
 	}
 
 	switch (mdev->net_conf->wire_protocol) {
@@ -1554,13 +1516,14 @@
 		    (unsigned long long)e->sector, e->size, e);
 	       );
 	/* FIXME drbd_al_begin_io in case we have two primaries... */
-	drbd_generic_make_request(mdev, WRITE, DRBD_FAULT_DT_WR, e->private_bio);
+	drbd_generic_make_request(mdev, WRITE, DRBD_FAULT_DT_WR,
+					e->private_bio);
 	/* accounting done in endio */
 
 	maybe_kick_lo(mdev);
 	return TRUE;
 
-  out_interrupted:
+out_interrupted:
 	/* yes, the epoch_size now is imbalanced.
 	 * but we drop the connection anyways, so we don't have a chance to
 	 * receive a barrier... atomic_inc(&mdev->epoch_size); */
@@ -1576,7 +1539,8 @@
 	struct Tl_epoch_entry *e;
 	int size;
 	unsigned int fault_type;
-	struct Drbd_BlockRequest_Packet *p = (struct Drbd_BlockRequest_Packet *)h;
+	struct Drbd_BlockRequest_Packet *p =
+		(struct Drbd_BlockRequest_Packet *)h;
 
 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
 
@@ -1599,7 +1563,8 @@
 
 	if (!inc_local_if_state(mdev, UpToDate)) {
 		if (DRBD_ratelimit(5*HZ, 5))
-			ERR("Can not satisfy peer's read request, no local data.\n");
+			ERR("Can not satisfy peer's read request, "
+			    "no local data.\n");
 		drbd_send_ack_rp(mdev, h->command == DataRequest ? NegDReply :
 				 NegRSDReply , p);
 		return TRUE;
@@ -1687,7 +1652,8 @@
 		     "Using discard-least-changes instead\n");
 	case DiscardZeroChg:
 		if (ch_peer == 0 && ch_self == 0) {
-			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) ? -1 : 1;
+			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+				? -1 : 1;
 			break;
 		} else {
 			if (ch_peer == 0) { rv =  1; break; }
@@ -1699,7 +1665,8 @@
 		else if (ch_self > ch_peer) rv =  1;
 		else /* ( ch_self == ch_peer ) */ {
 			/* Well, then use something else. */
-			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) ? -1 : 1;
+			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+				? -1 : 1;
 		}
 		break;
 	case DiscardLocal:
@@ -1898,8 +1865,8 @@
 /* drbd_sync_handshake() returns the new conn state on success, or
    conn_mask (-1) on failure.
  */
-enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
-					enum drbd_disk_state peer_disk)
+enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev,
+	enum drbd_role peer_role, enum drbd_disk_state peer_disk)
 {
 	int hg, rule_nr;
 	enum drbd_conns rv = conn_mask;
@@ -1933,7 +1900,8 @@
 	}
 
 	if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp) ) {
-		int pcount = (mdev->state.role == Primary) + (peer_role == Primary);
+		int pcount = (mdev->state.role == Primary)
+			   + (peer_role == Primary);
 		int forced = (hg == -100);
 
 		switch (pcount) {
@@ -1948,7 +1916,8 @@
 			break;
 		}
 		if ( abs(hg) < 100 ) {
-			WARN("Split-Brain detected, %d primaries, automatically solved. Sync from %s node\n",
+			WARN("Split-Brain detected, %d primaries, "
+			     "automatically solved. Sync from %s node\n",
 			     pcount, (hg < 0) ? "peer":"this");
 			if (forced) {
 				WARN("Doing a full sync, since"
@@ -1967,7 +1936,8 @@
 			hg = 1;
 
 		if ( abs(hg) < 100 )
-			WARN("Split-Brain detected, manually solved. Sync from %s node\n",
+			WARN("Split-Brain detected, manually solved. "
+			     "Sync from %s node\n",
 			     (hg < 0) ? "peer":"this");
 	}
 
@@ -2126,7 +2096,8 @@
 }
 
 /* warn if the arguments differ by more than 12.5% */
-static void warn_if_differ_considerably(struct drbd_conf *mdev, const char *s, sector_t a, sector_t b)
+static void warn_if_differ_considerably(struct drbd_conf *mdev,
+	const char *s, sector_t a, sector_t b)
 {
 	sector_t d;
 	if (a == 0 || b == 0) return;
@@ -2195,9 +2166,9 @@
 	if (inc_local(mdev)) {
 		drbd_bm_lock(mdev);
 		/*
-		 * you may get a flip-flop connection established/connection loss,
-		 * in case both really have different usize uppon first connect!
-		 * try to solve it thus:
+		 * you may get a flip-flop connection established/connection
+		 * loss, in case both really have different usize uppon first
+		 * connect!  try to solve it thus:
 		 ***/
 
 		drbd_determin_dev_size(mdev);
@@ -2209,7 +2180,8 @@
 	}
 
 	if (mdev->p_uuid && mdev->state.conn <= Connected && inc_local(mdev)) {
-		nconn = drbd_sync_handshake(mdev, mdev->state.peer, mdev->state.pdsk);
+		nconn = drbd_sync_handshake(mdev,
+				mdev->state.peer, mdev->state.pdsk);
 		dec_local(mdev);
 
 		if (nconn == conn_mask) return FALSE;
@@ -2343,7 +2315,8 @@
 	if (mdev->p_uuid && oconn <= Connected &&
 	    peer_state.disk >= Negotiating &&
 	    inc_local_if_state(mdev, Negotiating) ) {
-		nconn = drbd_sync_handshake(mdev, peer_state.role, peer_state.disk);
+		nconn = drbd_sync_handshake(mdev,
+				peer_state.role, peer_state.disk);
 		dec_local(mdev);
 
 		if (nconn == conn_mask) return FALSE;
@@ -2371,10 +2344,11 @@
 
 	if (oconn > WFReportParams) {
 		if (nconn > Connected && peer_state.conn <= Connected) {
-			/* we want resync, peer has not yet decided to sync... */
+			/* we want resync, peer has not yet decided to sync */
 			drbd_send_uuids(mdev);
 			drbd_send_state(mdev);
-		} else if (nconn == Connected && peer_state.disk == Negotiating) {
+		} else if (nconn == Connected &&
+					peer_state.disk == Negotiating) {
 			/* peer is waiting for us to respond... */
 			drbd_send_state(mdev);
 		}
@@ -2396,7 +2370,8 @@
 	struct Drbd_SyncUUID_Packet *p = (struct Drbd_SyncUUID_Packet *)h;
 
 	wait_event( mdev->misc_wait,
-		    mdev->state.conn < Connected || mdev->state.conn == WFSyncUUID);
+		    mdev->state.conn < Connected ||
+		    mdev->state.conn == WFSyncUUID);
 
 	/* D_ASSERT( mdev->state.conn == WFSyncUUID ); */
 
@@ -2533,7 +2508,8 @@
 
 		if (header->command < MAX_CMD)
 			handler = drbd_cmd_handler[header->command];
-		else if (MayIgnore < header->command && header->command < MAX_OPT_CMD)
+		else if (MayIgnore < header->command
+		     && header->command < MAX_OPT_CMD)
 			handler = drbd_opt_cmd_handler[header->command-MayIgnore];
 		else if (header->command > MAX_OPT_CMD)
 			handler = receive_skip;
@@ -2553,7 +2529,8 @@
 			break;
 		}
 
-		dump_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf, __FILE__, __LINE__);
+		dump_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf,
+				__FILE__, __LINE__);
 	}
 }
 
@@ -2767,7 +2744,8 @@
 {
 	/* ASSERT current == mdev->receiver ... */
 	struct Drbd_HandShake_Packet *p = &mdev->data.rbuf.HandShake;
-	const int expect = sizeof(struct Drbd_HandShake_Packet)-sizeof(struct Drbd_Header);
+	const int expect = sizeof(struct Drbd_HandShake_Packet)
+			  -sizeof(struct Drbd_Header);
 	int rv;
 
 	rv = drbd_send_handshake(mdev);
@@ -2795,7 +2773,8 @@
 		return 0;
 	}
 
-	dump_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf, __FILE__, __LINE__);
+	dump_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf,
+			__FILE__, __LINE__);
 
 	p->protocol_version = be32_to_cpu(p->protocol_version);
 
@@ -2806,8 +2785,8 @@
 			      "Peer wants protocol version: %u\n",
 			      p->protocol_version );
 		}
-		INFO( "Handshake successful: DRBD Network Protocol version %u\n",
-		      PRO_VERSION );
+		INFO("Handshake successful: "
+		     "DRBD Network Protocol version %u\n", PRO_VERSION);
 	} /* else if ( p->protocol_version == (PRO_VERSION-1) ) {
 		// not yet; but next time :)
 		INFO( "Handshake successful: DRBD Protocol version %u\n",
@@ -3228,17 +3207,21 @@
 	int empty;
 
 	static struct asender_cmd asender_tbl[] = {
-	[Ping]		= { sizeof(struct Drbd_Header),	    got_Ping },
-	[PingAck]	= { sizeof(struct Drbd_Header),	    got_PingAck },
-	[RecvAck]	= { sizeof(struct Drbd_BlockAck_Packet),   got_BlockAck },
-	[WriteAck]	= { sizeof(struct Drbd_BlockAck_Packet),   got_BlockAck },
-	[RSWriteAck]	= { sizeof(struct Drbd_BlockAck_Packet),   got_BlockAck },
-	[DiscardAck]	= { sizeof(struct Drbd_BlockAck_Packet),   got_BlockAck },
-	[NegAck]	= { sizeof(struct Drbd_BlockAck_Packet),   got_NegAck },
-	[NegDReply]	= { sizeof(struct Drbd_BlockAck_Packet),   got_NegDReply },
-	[NegRSDReply]	= { sizeof(struct Drbd_BlockAck_Packet),   got_NegRSDReply},
-	[BarrierAck]	= { sizeof(struct Drbd_BarrierAck_Packet), got_BarrierAck },
-	[StateChgReply] = { sizeof(struct Drbd_RqS_Reply_Packet),  got_RqSReply },
+	[Ping]		= { sizeof(struct Drbd_Header), got_Ping },
+	[PingAck]	= { sizeof(struct Drbd_Header),	got_PingAck },
+	[RecvAck]	= { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
+	[WriteAck]	= { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
+	[RSWriteAck]	= { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
+	[DiscardAck]	= { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
+	[NegAck]	= { sizeof(struct Drbd_BlockAck_Packet), got_NegAck },
+	[NegDReply]	=
+		{ sizeof(struct Drbd_BlockAck_Packet), got_NegDReply },
+	[NegRSDReply]	=
+		{ sizeof(struct Drbd_BlockAck_Packet), got_NegRSDReply},
+	[BarrierAck]	=
+		{ sizeof(struct Drbd_BarrierAck_Packet), got_BarrierAck },
+	[StateChgReply] =
+		{ sizeof(struct Drbd_RqS_Reply_Packet), got_RqSReply },
 	};
 
 	sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
@@ -3318,13 +3301,15 @@
 			}
 			expect = asender_tbl[cmd].pkt_size;
 			ERR_IF(len != expect-sizeof(struct Drbd_Header)) {
-				dump_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
+				dump_packet(mdev, mdev->meta.socket, 1,
+					(void *)h, __FILE__, __LINE__);
 				DUMPI(expect);
 			}
 		}
 		if (received == expect) {
 			D_ASSERT(cmd != -1);
-			dump_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
+			dump_packet(mdev, mdev->meta.socket, 1, (void *)h,
+					__FILE__, __LINE__);
 			if (!asender_tbl[cmd].process(mdev, h)) goto err;
 
 			buf	 = h;
@@ -3335,7 +3320,7 @@
 	}
 
 	if (0) {
-	err:
+err:
 		clear_bit(SIGNAL_ASENDER, &mdev->flags);
 		if (mdev->state.conn >= Connected)
 			drbd_force_state(mdev, NS(conn, NetworkFailure));

Modified: branches/drbd-8.0-for-linus/drbd/drbd_req.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_req.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_req.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -77,7 +77,8 @@
 		[send_canceled] = "send_canceled",
 		[send_failed] = "send_failed",
 		[handed_over_to_network] = "handed_over_to_network",
-		[connection_lost_while_pending] = "connection_lost_while_pending",
+		[connection_lost_while_pending] =
+					"connection_lost_while_pending",
 		[recv_acked_by_peer] = "recv_acked_by_peer",
 		[write_acked_by_peer] = "write_acked_by_peer",
 		[neg_acked] = "neg_acked",
@@ -93,8 +94,10 @@
 }
 
 # ifdef ENABLE_DYNAMIC_TRACE
-#  define print_rq_state(R, T) MTRACE(TraceTypeRq, TraceLvlMetrics, _print_rq_state(R, T);)
-#  define print_req_mod(T, W)  MTRACE(TraceTypeRq, TraceLvlMetrics, _print_req_mod(T, W);)
+#  define print_rq_state(R, T) \
+	MTRACE(TraceTypeRq, TraceLvlMetrics, _print_rq_state(R, T);)
+#  define print_req_mod(T, W)  \
+	MTRACE(TraceTypeRq, TraceLvlMetrics, _print_req_mod(T, W);)
 # else
 #  define print_rq_state(R, T) _print_rq_state(R, T)
 #  define print_req_mod(T, W)  _print_req_mod(T, W)
@@ -105,7 +108,8 @@
 #define print_req_mod(T, W)
 #endif
 
-static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
+static void _req_is_done(struct drbd_conf *mdev,
+	struct drbd_request *req, const int rw)
 {
 	const unsigned long s = req->rq_state;
 	/* if it was a write, we may have to set the corresponding
@@ -179,65 +183,68 @@
 	}
 }
 
-static void _about_to_complete_local_write(struct drbd_conf *mdev, struct drbd_request *req)
+static void _about_to_complete_local_write(struct drbd_conf *mdev,
+	struct drbd_request *req)
 {
 	const unsigned long s = req->rq_state;
-			struct drbd_request *i;
-			struct Tl_epoch_entry *e;
-			struct hlist_node *n;
-			struct hlist_head *slot;
+	struct drbd_request *i;
+	struct Tl_epoch_entry *e;
+	struct hlist_node *n;
+	struct hlist_head *slot;
 
-			/* before we can signal completion to the upper layers,
-			 * we may need to close the current epoch */
-			if (req->epoch == mdev->newest_barrier->br_number)
-				set_bit(ISSUE_BARRIER, &mdev->flags);
+	/* before we can signal completion to the upper layers,
+	 * we may need to close the current epoch */
+	if (req->epoch == mdev->newest_barrier->br_number)
+		set_bit(ISSUE_BARRIER, &mdev->flags);
 
-			/* we need to do the conflict detection stuff,
-			 * if we have the ee_hash (two_primaries) and
-			 * this has been on the network */
-			if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
-				const sector_t sector = req->sector;
-				const int size = req->size;
+	/* we need to do the conflict detection stuff,
+	 * if we have the ee_hash (two_primaries) and
+	 * this has been on the network */
+	if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
+		const sector_t sector = req->sector;
+		const int size = req->size;
 
-				/* ASSERT:
-				 * there must be no conflicting requests, since
-				 * they must have been failed on the spot */
+		/* ASSERT:
+		 * there must be no conflicting requests, since
+		 * they must have been failed on the spot */
 #define OVERLAPS overlaps(sector, size, i->sector, i->size)
-				slot = tl_hash_slot(mdev, sector);
-				hlist_for_each_entry(i, n, slot, colision) {
-					if (OVERLAPS) {
-						ALERT("LOGIC BUG: completed: %p %llus +%u; other: %p %llus +%u\n",
-						      req, (unsigned long long)sector, size,
-						      i,   (unsigned long long)i->sector, i->size);
-					}
-				}
+		slot = tl_hash_slot(mdev, sector);
+		hlist_for_each_entry(i, n, slot, colision) {
+			if (OVERLAPS) {
+				ALERT("LOGIC BUG: completed: %p %llus +%u; "
+				      "other: %p %llus +%u\n",
+				      req, (unsigned long long)sector, size,
+				      i, (unsigned long long)i->sector, i->size);
+			}
+		}
 
-				/* maybe "wake" those conflicting epoch entries
-				 * that wait for this request to finish.
-				 *
-				 * currently, there can be only _one_ such ee
-				 * (well, or some more, which would be pending
-				 * DiscardAck not yet sent by the asender...),
-				 * since we block the receiver thread upon the
-				 * first conflict detection, which will wait on
-				 * misc_wait.  maybe we want to assert that?
-				 *
-				 * anyways, if we found one,
-				 * we just have to do a wake_up.  */
+		/* maybe "wake" those conflicting epoch entries
+		 * that wait for this request to finish.
+		 *
+		 * currently, there can be only _one_ such ee
+		 * (well, or some more, which would be pending
+		 * DiscardAck not yet sent by the asender...),
+		 * since we block the receiver thread upon the
+		 * first conflict detection, which will wait on
+		 * misc_wait.  maybe we want to assert that?
+		 *
+		 * anyways, if we found one,
+		 * we just have to do a wake_up.  */
 #undef OVERLAPS
 #define OVERLAPS overlaps(sector, size, e->sector, e->size)
-				slot = ee_hash_slot(mdev, req->sector);
-				hlist_for_each_entry(e, n, slot, colision) {
-					if (OVERLAPS) {
-						wake_up(&mdev->misc_wait);
-						break;
-					}
-				}
+		slot = ee_hash_slot(mdev, req->sector);
+		hlist_for_each_entry(e, n, slot, colision) {
+			if (OVERLAPS) {
+				wake_up(&mdev->misc_wait);
+				break;
 			}
+		}
+	}
 #undef OVERLAPS
 }
 
-static void _complete_master_bio(struct drbd_conf *mdev, struct drbd_request *req, int error)
+static void _complete_master_bio(struct drbd_conf *mdev,
+	struct drbd_request *req, int error)
 {
 	dump_bio(mdev, req->master_bio, 1);
 	bio_endio(req->master_bio, req->master_bio->bi_size, error);
@@ -377,8 +384,9 @@
 	slot = tl_hash_slot(mdev, sector);
 	hlist_for_each_entry(i, n, slot, colision) {
 		if (OVERLAPS) {
-			ALERT("%s[%u] Concurrent local write detected!"
-			      " [DISCARD L] new: %llus +%u; pending: %llus +%u\n",
+			ALERT("%s[%u] Concurrent local write detected! "
+			      "[DISCARD L] new: %llus +%u; "
+			      "pending: %llus +%u\n",
 			      current->comm, current->pid,
 			      (unsigned long long)sector, size,
 			      (unsigned long long)i->sector, i->size);
@@ -395,7 +403,8 @@
 		hlist_for_each_entry(e, n, slot, colision) {
 			if (OVERLAPS) {
 				ALERT("%s[%u] Concurrent remote write detected!"
-				      " [DISCARD L] new: %llus +%u; pending: %llus +%u\n",
+				      " [DISCARD L] new: %llus +%u; "
+				      "pending: %llus +%u\n",
 				      current->comm, current->pid,
 				      (unsigned long long)sector, size,
 				      (unsigned long long)e->sector, e->size);
@@ -405,13 +414,13 @@
 	}
 #undef OVERLAPS
 
-  out_no_conflict:
+out_no_conflict:
 	/* this is like it should be, and what we expected.
 	 * our users do behave after all... */
 	dec_net(mdev);
 	return 0;
 
-  out_conflict:
+out_conflict:
 	dec_net(mdev);
 	return 1;
 }
@@ -578,11 +587,13 @@
 		 *
 		 * Add req to the (now) current epoch (barrier). */
 
-		/* see drbd_make_request_common just after it grabs the req_lock */
+		/* see drbd_make_request_common,
+		 * just after it grabs the req_lock */
 		D_ASSERT(test_bit(ISSUE_BARRIER, &mdev->flags) == 0);
 
 		req->epoch = mdev->newest_barrier->br_number;
-		list_add_tail(&req->tl_requests, &mdev->newest_barrier->requests);
+		list_add_tail(&req->tl_requests,
+				&mdev->newest_barrier->requests);
 
 		/* mark the current epoch as closed,
 		 * in case it outgrew the limit */
@@ -602,11 +613,11 @@
 	case send_canceled:
 		/* for the request, this is the same thing */
 	case send_failed:
-		/* real cleanup will be done from tl_clear.  just update flags so
-		 * it is no longer marked as on the worker queue */
+		/* real cleanup will be done from tl_clear.  just update flags
+		 * so it is no longer marked as on the worker queue */
 		req->rq_state &= ~RQ_NET_QUEUED;
-		/* if we did it right, tl_clear should be scheduled only after this,
-		 * so this should not be necessary! */
+		/* if we did it right, tl_clear should be scheduled only after
+		 * this, so this should not be necessary! */
 		_req_may_be_done(req, error);
 		break;
 
@@ -692,7 +703,8 @@
 			/* barrier came in before all requests have been acked.
 			 * this is bad, because if the connection is lost now,
 			 * we won't be able to clean them up... */
-			_print_rq_state(req, "FIXME (barrier_acked but pending)");
+			_print_rq_state(req,
+				"FIXME (barrier_acked but pending)");
 		}
 		D_ASSERT(req->rq_state & RQ_NET_SENT);
 		req->rq_state |= RQ_NET_DONE;
@@ -753,9 +765,9 @@
  * application request is on the fly, so once we are positive about a "bad"
  * state, we know it won't get better during the lifetime of this request.
  *
- * In case we think we are ok, but "asynchronously" some interrupt or other thread
- * marks some operation as impossible, we are still ok, since we would just try
- * anyways, and then see that it does not work there and then.
+ * In case we think we are ok, but "asynchronously" some interrupt or other
+ * thread marks some operation as impossible, we are still ok, since we would
+ * just try anyways, and then see that it does not work there and then.
  */
 
 int
@@ -836,11 +848,11 @@
 		goto fail_and_free_req;
 	}
 
-	/* For WRITES going to the local disk, grab a reference on the target extent.
-	 * This waits for any resync activity in the corresponding resync
-	 * extent to finish, and, if necessary, pulls in the target extent into
-	 * the activity log, which involves further disk io because of transactional
-	 * on-disk meta data updates. */
+	/* For WRITES going to the local disk, grab a reference on the target
+	 * extent.  This waits for any resync activity in the corresponding
+	 * resync extent to finish, and, if necessary, pulls in the target
+	 * extent into the activity log, which involves further disk io because
+	 * of transactional on-disk meta data updates. */
 	if (rw == WRITE && local)
 		drbd_al_begin_io(mdev, sector);
 
@@ -997,7 +1009,8 @@
 		if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR :
 				       ( rw == READ ? DRBD_FAULT_DT_RD :
 						   DRBD_FAULT_DT_RA ) ))
-			bio_endio(req->private_bio, req->private_bio->bi_size, -EIO);
+			bio_endio(req->private_bio,
+					req->private_bio->bi_size, -EIO);
 		else
 			generic_make_request(req->private_bio);
 	}
@@ -1008,7 +1021,7 @@
 
 	return 0;
 
-  fail_and_free_req:
+fail_and_free_req:
 	if (b) kfree(b);
 	bio_endio(bio, bio->bi_size, err);
 	drbd_req_free(req);
@@ -1030,8 +1043,11 @@
 	if (mdev->state.role != Primary &&
 		( !allow_oos || is_write) ) {
 		if (DRBD_ratelimit(5*HZ, 5)) {
-			ERR("Process %s[%u] tried to %s; since we are not in Primary state, we cannot allow this\n",
-			    current->comm, current->pid, is_write ? "WRITE" : "READ");
+			ERR("Process %s[%u] tried to %s; "
+			    "since we are not in Primary state, "
+			    "we cannot allow this\n",
+			    current->comm, current->pid,
+			    is_write ? "WRITE" : "READ");
 		}
 		return 1;
 	}
@@ -1138,20 +1154,19 @@
 int drbd_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
 {
 	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
-	unsigned int bio_offset = (unsigned int)bio->bi_sector << 9; /* 32 bit */
+	unsigned int bio_offset =
+		(unsigned int)bio->bi_sector << 9; /* 32 bit */
 	unsigned int bio_size = bio->bi_size;
 	int limit, backing_limit;
 
-#if 1
-	limit = DRBD_MAX_SEGMENT_SIZE - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
-#else
-	limit = AL_EXTENT_SIZE - ((bio_offset & (AL_EXTENT_SIZE-1)) + bio_size);
-#endif
+	limit = DRBD_MAX_SEGMENT_SIZE
+	      - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
 	if (limit < 0) limit = 0;
 	if (bio_size == 0) {
 		if (limit <= bvec->bv_len) limit = bvec->bv_len;
 	} else if (limit && inc_local(mdev)) {
-		request_queue_t * const b = mdev->bc->backing_bdev->bd_disk->queue;
+		request_queue_t * const b =
+			mdev->bc->backing_bdev->bd_disk->queue;
 		if (b->merge_bvec_fn && mdev->bc->dc.use_bmbv) {
 			backing_limit = b->merge_bvec_fn(b, bio, bvec);
 			limit = min(limit, backing_limit);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_req.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_req.h	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_req.h	2007-07-24 17:02:29 UTC (rev 2999)
@@ -204,17 +204,21 @@
 #define RQ_NET_OK          (1UL << __RQ_NET_OK)
 #define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
 
-#define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) /* 0x1f8 */
+/* 0x1f8 */
+#define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
 
 /* epoch entries */
-static inline struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
+static inline
+struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
 {
 	BUG_ON(mdev->ee_hash_s == 0);
-	return mdev->ee_hash + ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
+	return mdev->ee_hash +
+		((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
 }
 
 /* transfer log (drbd_request objects) */
-static inline struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
+static inline
+struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
 {
 	BUG_ON(mdev->tl_hash_s == 0);
 	return mdev->tl_hash +
@@ -223,7 +227,8 @@
 
 /* when we receive the answer for a read request,
  * verify that we actually know about it */
-static inline struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, u64 id, sector_t sector)
+static inline struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
+	u64 id, sector_t sector)
 {
 	struct hlist_head *slot = tl_hash_slot(mdev, sector);
 	struct hlist_node *n;
@@ -255,7 +260,8 @@
 
 /* when we receive the answer for a read request,
  * verify that we actually know about it */
-static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, u64 id, sector_t sector)
+static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
+	u64 id, sector_t sector)
 {
 	struct hlist_head *slot = ar_hash_slot(mdev, sector);
 	struct hlist_node *n;
@@ -270,10 +276,12 @@
 	return NULL;
 }
 
-static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, struct bio *bio_src)
+static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+	struct bio *bio_src)
 {
 	struct bio *bio;
-	struct drbd_request *req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
+	struct drbd_request *req =
+		mempool_alloc(drbd_request_mempool, GFP_NOIO);
 	if (likely(req)) {
 		bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
 
@@ -307,10 +315,12 @@
 /* aparently too large to be inlined...
  * moved to drbd_req.c */
 extern void _req_may_be_done(struct drbd_request *req, int error);
-extern void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error);
+extern void _req_mod(struct drbd_request *req,
+		enum drbd_req_event what, int error);
 
 /* If you need it irqsave, do it your self! */
-static inline void req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
+static inline void req_mod(struct drbd_request *req,
+		enum drbd_req_event what, int error)
 {
 	struct drbd_conf *mdev = req->mdev;
 	spin_lock_irq(&mdev->req_lock);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_worker.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_worker.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_worker.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -266,7 +266,8 @@
 	/* FIXME this is ugly. we should not detach for read io-error,
 	 * but try to WRITE the DataReply to the failed location,
 	 * to give the disk the chance to relocate that block */
-	drbd_io_error(mdev, FALSE); /* tries to schedule a detach and notifies peer */
+	/* try to schedule a detach and notifies peer: */
+	drbd_io_error(mdev, FALSE);
 	return w_send_read_req(mdev, w, 0);
 }
 
@@ -302,7 +303,8 @@
 
 #define SLEEP_TIME (HZ/10)
 
-int w_make_resync_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_make_resync_request(struct drbd_conf *mdev,
+		struct drbd_work *w, int cancel)
 {
 	unsigned long bit;
 	sector_t sector;
@@ -321,7 +323,8 @@
 	}
 
 	if (mdev->state.conn != SyncTarget)
-		ERR("%s in w_make_resync_request\n", conns_to_name(mdev->state.conn));
+		ERR("%s in w_make_resync_request\n",
+			conns_to_name(mdev->state.conn));
 
 	number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
 
@@ -340,8 +343,7 @@
 	}
 
 	for (i = 0; i < number; i++) {
-
-	next_sector:
+next_sector:
 		size = BM_BLOCK_SIZE;
 		/* as of now, we are the only user of drbd_bm_find_next */
 		bit  = drbd_bm_find_next(mdev);
@@ -616,7 +618,8 @@
 			ok = drbd_send_block(mdev, RSDataReply, e);
 		} else {
 			if (DRBD_ratelimit(5*HZ, 5))
-				ERR("Not sending RSDataReply, partner DISKLESS!\n");
+				ERR("Not sending RSDataReply, "
+				    "partner DISKLESS!\n");
 			ok = 1;
 		}
 	} else {
@@ -678,7 +681,8 @@
 	/* inc_ap_pending was done where this was queued.
 	 * dec_ap_pending will be done in got_BarrierAck
 	 * or (on connection loss) in w_clear_epoch.  */
-	ok = _drbd_send_cmd(mdev, mdev->data.socket, Barrier, (struct Drbd_Header *)p, sizeof(*p), 0);
+	ok = _drbd_send_cmd(mdev, mdev->data.socket, Barrier,
+				(struct Drbd_Header *)p, sizeof(*p), 0);
 	drbd_put_data_sock(mdev);
 
 	return ok;
@@ -823,9 +827,9 @@
 			continue;
 		if (odev->state.aftr_isp) {
 			if (_drbd_may_sync_now(odev))
-				rv |= ( _drbd_set_state(_NS(odev, aftr_isp, 0),
-							ChgStateHard|ScheduleAfter)
-					!= SS_NothingToDo ) ;
+				rv |= (_drbd_set_state(_NS(odev, aftr_isp, 0),
+						ChgStateHard|ScheduleAfter)
+					!= SS_NothingToDo) ;
 		}
 	}
 	return rv;
@@ -1003,7 +1007,8 @@
 		if (!w->cb(mdev, w, mdev->state.conn < Connected )) {
 			/* WARN("worker: a callback failed! \n"); */
 			if (mdev->state.conn >= Connected)
-				drbd_force_state(mdev, NS(conn, NetworkFailure));
+				drbd_force_state(mdev,
+						NS(conn, NetworkFailure));
 		}
 	}
 
@@ -1035,7 +1040,7 @@
 	 * from the times when the worker did not live as long as the
 	 * device.. */
 
-	D_ASSERT( mdev->state.disk == Diskless && mdev->state.conn == StandAlone );
+	D_ASSERT(mdev->state.disk == Diskless && mdev->state.conn == StandAlone);
 	drbd_mdev_cleanup(mdev);
 	module_put(THIS_MODULE);
 

Modified: branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h	2007-07-24 17:02:29 UTC (rev 2999)
@@ -17,10 +17,13 @@
 extern char *drbd_sec_holder;
 
 /* bi_end_io handlers */
-extern int drbd_md_io_complete(struct bio *bio, unsigned int bytes_done, int error);
+extern int drbd_md_io_complete(struct bio *bio,
+		unsigned int bytes_done, int error);
 
-extern int drbd_endio_read_sec(struct bio *bio, unsigned int bytes_done, int error);
-extern int drbd_endio_write_sec(struct bio *bio, unsigned int bytes_done, int error);
+extern int drbd_endio_read_sec(struct bio *bio,
+		unsigned int bytes_done, int error);
+extern int drbd_endio_write_sec(struct bio *bio,
+		unsigned int bytes_done, int error);
 extern int drbd_endio_pri(struct bio *bio, unsigned int bytes_done, int error);
 
 static inline sector_t drbd_get_hardsect(struct block_device *bdev)
@@ -108,12 +111,15 @@
 /*
  * used to submit our private bio
  */
-static inline void drbd_generic_make_request(struct drbd_conf *mdev, int rw, int fault_type, struct bio *bio)
+static inline void drbd_generic_make_request(struct drbd_conf *mdev, int rw,
+	int fault_type, struct bio *bio)
 {
-	bio->bi_rw = rw; /* on the receiver side, e->..rw was not yet defined. */
+	/* on the receiver side, e->..rw was not yet defined. */
+	bio->bi_rw = rw;
 
 	if (!bio->bi_bdev) {
-		printk(KERN_ERR DEVICE_NAME "%d: drbd_generic_make_request: bio->bi_bdev == NULL\n",
+		printk(KERN_ERR DEVICE_NAME
+		       "%d: drbd_generic_make_request: bio->bi_bdev == NULL\n",
 		       mdev_to_minor(mdev));
 		dump_stack();
 		bio_endio(bio, bio->bi_size, -ENODEV);

Modified: branches/drbd-8.0-for-linus/drbd/linux/drbd.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/linux/drbd.h	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/linux/drbd.h	2007-07-24 17:02:29 UTC (rev 2999)
@@ -177,7 +177,7 @@
 	Attaching,      /* In the process of reading the meta-data */
 	Failed,         /* Becomes Diskless as soon as we told it the peer */
 			/* when >= Failed it is legal to access mdev->bc */
-	Negotiating,    /* Late attaching state, we need to talk to the peer... */
+	Negotiating,    /* Late attaching state, we need to talk to the peer */
 	Inconsistent,
 	Outdated,
 	DUnknown,       /* Only used for the peer, never for myself */
@@ -253,8 +253,8 @@
 	Bitmap,
 	History_start,
 	History_end,
-	UUID_SIZE,      /* In the packet we store the number of dirty bits here */
-	UUID_FLAGS,     /* In the packet we store flags here. */
+	UUID_SIZE,      /* nl-packet: number of dirty bits */
+	UUID_FLAGS,     /* nl-packet: flags */
 	EXT_UUID_SIZE   /* Everything. */
 };
 

Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.c	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.c	2007-07-24 17:02:29 UTC (rev 2999)
@@ -90,9 +90,10 @@
 {
 	/* NOTE:
 	 * total calls to lc_get are
-	 * starving + hits + misses
-	 * misses include "dirty" count (update from an other thread in progress)
-	 * and "changed", when this in fact lead to an successful update of the cache.
+	 * (starving + hits + misses)
+	 * misses include "dirty" count (update from an other thread in
+	 * progress) and "changed", when this in fact lead to an successful
+	 * update of the cache.
 	 */
 	return seq_printf(seq, "\t%s: used:%u/%u "
 		"hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
@@ -293,7 +294,8 @@
 	++lc->changed;
 	e->lc_number = lc->new_number;
 	list_add(&e->list, &lc->in_use);
-	hlist_add_head( &e->colision, lc->slot + lc_hash_fn(lc, lc->new_number) );
+	hlist_add_head(&e->colision,
+		lc->slot + lc_hash_fn(lc, lc->new_number));
 	lc->changing_element = NULL;
 	lc->new_number = -1;
 	clear_bit(__LC_DIRTY, &lc->flags);
@@ -312,7 +314,8 @@
 	BUG_ON(e->refcnt == 0);
 	BUG_ON(e == lc->changing_element);
 	if (--e->refcnt == 0) {
-		list_move(&e->list, &lc->lru); /* move it to the front of LRU. */
+		/* move it to the front of LRU. */
+		list_move(&e->list, &lc->lru);
 		lc->used--;
 		clear_bit(__LC_STARVING, &lc->flags);
 		smp_mb__after_clear_bit();
@@ -342,7 +345,7 @@
 	list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
 }
 
-#if 0
+#ifdef DRBD_DUMP_RESYNC_DETAIL
 /**
  * lc_dump: Dump a complete LRU cache to seq in textual form.
  */

Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.h	2007-07-24 14:57:11 UTC (rev 2998)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.h	2007-07-24 17:02:29 UTC (rev 2999)
@@ -26,22 +26,23 @@
  */
 
 /*
-  The lru_cache describes a big set of objects that are addressed
-  by an index number (=lc_number). Only a small fraction of this set
-  is present in the cache.
-  (You set the size of the cache during lc_alloc)
-  Once created, the api consists of
-    lc_find(,nr) -- finds the object with the given number, if present
-    lc_get(,nr)  -- finds the object and increases the usage count
-		    if not present, actions are taken to make sure that
-		    the cache is updated, the user is notified of this by a callback.
-		    Return value is NULL in this case.
-		    As soon as the user informs the cache that it has been updated,
-		    the next lc_get on that very object number will be successfull.
-    lc_put(,lc_element*)
-		 -- decreases the usage count of this object, and returns the new value.
-
-    NOTE: It is the USERS responsibility to make sure that calls do not happen concurrently.
+ * The lru_cache describes a big set of objects that are addressed
+ * by an index number (=lc_number). Only a small fraction of this set
+ * is present in the cache.
+ * (You set the size of the cache during lc_alloc)
+ * Once created, the api consists of
+ *   lc_find(,nr) -- finds the object with the given number, if present
+ *   lc_get(,nr)  -- finds the object and increases the usage count
+ *	if not present, actions are taken to make sure that
+ *	the cache is updated, the user is notified of this by a callback.
+ *	Return value is NULL in this case.
+ *	As soon as the user informs the cache that it has been updated,
+ *	the next lc_get on that very object number will be successfull.
+ *   lc_put(,lc_element*)
+ *     -- decreases the usage count of this object, and returns the new value.
+ *
+ * NOTE: It is the USERS responsibility
+ * to make sure that calls do not happen concurrently.
  */
 
 #ifndef LRU_CACHE_H



More information about the drbd-cvs mailing list