[DRBD-cvs] svn commit by lars - r2985 - in branches/drbd-8.0-for-linus/drbd: . linux - 293bfdba51982d69e720fe2bd371084f731b390f ..., ... => ...

drbd-cvs at lists.linbit.com drbd-cvs at lists.linbit.com
Tue Jul 24 13:38:08 CEST 2007


Author: lars
Date: 2007-07-24 13:38:05 +0200 (Tue, 24 Jul 2007)
New Revision: 2985

Modified:
   branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
   branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
   branches/drbd-8.0-for-linus/drbd/drbd_int.h
   branches/drbd-8.0-for-linus/drbd/drbd_main.c
   branches/drbd-8.0-for-linus/drbd/drbd_nl.c
   branches/drbd-8.0-for-linus/drbd/drbd_proc.c
   branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
   branches/drbd-8.0-for-linus/drbd/drbd_req.c
   branches/drbd-8.0-for-linus/drbd/drbd_req.h
   branches/drbd-8.0-for-linus/drbd/drbd_worker.c
   branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h
   branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h
   branches/drbd-8.0-for-linus/drbd/linux/drbd_tag_magic.h
   branches/drbd-8.0-for-linus/drbd/lru_cache.c
   branches/drbd-8.0-for-linus/drbd/lru_cache.h
Log:
293bfdba51982d69e720fe2bd371084f731b390f ...,... => ..., ...
(comma spacing)


Modified: branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_actlog.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_actlog.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -53,7 +53,7 @@
 
 	if (FAULT_ACTIVE(mdev, (rw & WRITE)? DRBD_FAULT_MD_WR:DRBD_FAULT_MD_RD)) {
 		bio->bi_rw |= rw;
-		bio_endio(bio,bio->bi_size,-EIO);
+		bio_endio(bio, bio->bi_size, -EIO);
 	}
 	else {
 #ifdef BIO_RW_SYNC
@@ -73,13 +73,13 @@
 int drbd_md_sync_page_io(drbd_dev *mdev, struct drbd_backing_dev *bdev,
 			 sector_t sector, int rw)
 {
-	int hardsect,mask,ok,offset=0;
+	int hardsect, mask, ok, offset=0;
 	struct page *iop = mdev->md_io_page;
 
 	D_ASSERT(semaphore_is_locked(&mdev->md_io_mutex));
 
 	if (!bdev->md_bdev) {
-		if (DRBD_ratelimit(5*HZ,5)) {
+		if (DRBD_ratelimit(5*HZ, 5)) {
 			ERR("bdev->md_bdev==NULL\n");
 			dump_stack();
 		}
@@ -113,8 +113,8 @@
 			void *p = page_address(mdev->md_io_page);
 			void *hp = page_address(mdev->md_io_tmpp);
 
-			ok = _drbd_md_sync_page_io(mdev, bdev,iop,
-						   sector,READ,hardsect);
+			ok = _drbd_md_sync_page_io(mdev, bdev, iop,
+						   sector, READ, hardsect);
 
 			if (unlikely(!ok)) {
 				ERR("drbd_md_sync_page_io(,%llus,READ [hardsect!=512]) failed!\n",
@@ -138,10 +138,10 @@
 		     (unsigned long long)sector, rw ? "WRITE" : "READ");
 	}
 
-	ok = _drbd_md_sync_page_io(mdev, bdev,iop,sector,rw,hardsect);
+	ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, hardsect);
 	if (unlikely(!ok)) {
 		ERR("drbd_md_sync_page_io(,%llus,%s) failed!\n",
-		    (unsigned long long)sector,rw ? "WRITE" : "READ");
+		    (unsigned long long)sector, rw ? "WRITE" : "READ");
 		return 0;
 	}
 
@@ -192,15 +192,15 @@
 	unsigned long     al_flags=0;
 
 	spin_lock_irq(&mdev->al_lock);
-	bm_ext = (struct bm_extent*) lc_find(mdev->resync,enr/AL_EXT_PER_BM_SECT);
+	bm_ext = (struct bm_extent*) lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
 	if (unlikely(bm_ext!=NULL)) {
-		if (test_bit(BME_NO_WRITES,&bm_ext->flags)) {
+		if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
 			spin_unlock_irq(&mdev->al_lock);
 			//INFO("Delaying app write until sync read is done\n");
 			return 0;
 		}
 	}
-	al_ext   = lc_get(mdev->act_log,enr);
+	al_ext   = lc_get(mdev->act_log, enr);
 	al_flags = mdev->act_log->flags;
 	spin_unlock_irq(&mdev->al_lock);
 
@@ -227,13 +227,13 @@
 
 	D_ASSERT(atomic_read(&mdev->local_cnt)>0);
 
-	MTRACE(TraceTypeALExts,TraceLvlMetrics,
+	MTRACE(TraceTypeALExts, TraceLvlMetrics,
 	       INFO("al_begin_io( sec=%llus (al_enr=%u) (rs_enr=%d) )\n",
 		    (unsigned long long) sector, enr, 
 		    (int)BM_SECT_TO_EXT(sector));
 	       );
 
-	wait_event(mdev->al_wait, (al_ext = _al_get(mdev,enr)) );
+	wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)) );
 
 	if (al_ext->lc_number != enr) {
 		// We have to do write an transaction to AL.
@@ -254,7 +254,7 @@
 		al_work.al_ext = al_ext;
 		al_work.enr = enr;
 		al_work.w.cb = w_al_write_transaction;
-		drbd_queue_work_front(&mdev->data.work,&al_work.w);
+		drbd_queue_work_front(&mdev->data.work, &al_work.w);
 		wait_for_completion(&al_work.event);
 
 		mdev->al_writ_cnt++;
@@ -264,7 +264,7 @@
 		DUMPI(mdev->act_log->new_number);
 		*/
 		spin_lock_irq(&mdev->al_lock);
-		lc_changed(mdev->act_log,al_ext);
+		lc_changed(mdev->act_log, al_ext);
 		spin_unlock_irq(&mdev->al_lock);
 		wake_up(&mdev->al_wait);
 	}
@@ -276,33 +276,33 @@
 	struct lc_element *extent;
 	unsigned long flags;
 
-	MTRACE(TraceTypeALExts,TraceLvlMetrics,
+	MTRACE(TraceTypeALExts, TraceLvlMetrics,
 	       INFO("al_complete_io( sec=%llus (al_enr=%u) (rs_enr=%d) )\n",
 		    (unsigned long long) sector, enr, 
 		    (int)BM_SECT_TO_EXT(sector));
 	       );
 
-	spin_lock_irqsave(&mdev->al_lock,flags);
+	spin_lock_irqsave(&mdev->al_lock, flags);
 
-	extent = lc_find(mdev->act_log,enr);
+	extent = lc_find(mdev->act_log, enr);
 
 	if (!extent) {
-		spin_unlock_irqrestore(&mdev->al_lock,flags);
-		ERR("al_complete_io() called on inactive extent %u\n",enr);
+		spin_unlock_irqrestore(&mdev->al_lock, flags);
+		ERR("al_complete_io() called on inactive extent %u\n", enr);
 		return;
 	}
 
-	if ( lc_put(mdev->act_log,extent) == 0 ) {
+	if ( lc_put(mdev->act_log, extent) == 0 ) {
 		wake_up(&mdev->al_wait);
 	}
 
-	spin_unlock_irqrestore(&mdev->al_lock,flags);
+	spin_unlock_irqrestore(&mdev->al_lock, flags);
 }
 
 STATIC int
 w_al_write_transaction(struct Drbd_Conf *mdev, struct drbd_work *w, int unused)
 {
-	int i,n,mx;
+	int i, n, mx;
 	unsigned int extent_nr;
 	struct al_transaction* buffer;
 	sector_t sector;
@@ -323,12 +323,12 @@
 	buffer->updates[0].extent = cpu_to_be32(new_enr);
 
 #if 0	/* Use this printf with the test_al.pl program */
-	ERR("T%03d S%03d=E%06d\n", mdev->al_tr_number,n,new_enr);
+	ERR("T%03d S%03d=E%06d\n", mdev->al_tr_number, n, new_enr);
 #endif
 
 	xor_sum ^= new_enr;
 
-	mx = min_t(int,AL_EXTENTS_PT,
+	mx = min_t(int, AL_EXTENTS_PT,
 		   mdev->act_log->nr_elements - mdev->al_tr_cycle);
 	for(i=0;i<mx;i++) {
 		extent_nr = lc_entry(mdev->act_log,
@@ -351,12 +351,12 @@
 // warning LGE "FIXME code missing"
 	sector = mdev->bc->md.md_offset + mdev->bc->md.al_offset + mdev->al_tr_pos;
 
-	if (!drbd_md_sync_page_io(mdev,mdev->bc,sector,WRITE)) {
+	if (!drbd_md_sync_page_io(mdev, mdev->bc, sector, WRITE)) {
 		drbd_chk_io_error(mdev, 1, TRUE);
 		drbd_io_error(mdev, TRUE);
 	}
 
-	if ( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT) ) {
+	if ( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT) ) {
 		mdev->al_tr_pos=0;
 	}
 	D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
@@ -381,12 +381,12 @@
 			   int index)
 {
 	sector_t sector;
-	int rv,i;
+	int rv, i;
 	u32 xor_sum=0;
 
 	sector = bdev->md.md_offset + bdev->md.al_offset + index;
 
-	if (!drbd_md_sync_page_io(mdev,bdev,sector,READ)) {
+	if (!drbd_md_sync_page_io(mdev, bdev, sector, READ)) {
 		// Dont process error normally as this is done before
 		// disk is atached!
 		return -1;
@@ -407,16 +407,16 @@
  * representation. Returns 1 on success, returns 0 when
  * reading the log failed due to IO errors.
  */
-int drbd_al_read_log(struct Drbd_Conf *mdev,struct drbd_backing_dev *bdev)
+int drbd_al_read_log(struct Drbd_Conf *mdev, struct drbd_backing_dev *bdev)
 {
 	struct al_transaction* buffer;
-	int from=-1,to=-1,i,cnr, overflow=0,rv;
+	int from=-1, to=-1, i, cnr, overflow=0, rv;
 	u32 from_tnr=-1, to_tnr=0;
 	int active_extents=0;
 	int transactions=0;
 	int mx;
 
-	mx = div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT);
+	mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
 
 	/* lock out all other meta data io for now,
 	 * and make sure the page is mapped.
@@ -426,7 +426,7 @@
 
 	// Find the valid transaction in the log
 	for(i=0;i<=mx;i++) {
-		rv = drbd_al_read_tr(mdev,bdev,buffer,i);
+		rv = drbd_al_read_tr(mdev, bdev, buffer, i);
 		if (rv == 0) continue;
 		if (rv == -1) {
 			up(&mdev->md_io_mutex);
@@ -461,11 +461,11 @@
 	 */
 	i=from;
 	while(1) {
-		int j,pos;
+		int j, pos;
 		unsigned int extent_nr;
 		unsigned int trn;
 
-		rv = drbd_al_read_tr(mdev,bdev,buffer,i);
+		rv = drbd_al_read_tr(mdev, bdev, buffer, i);
 		ERR_IF(rv == 0) goto cancel;
 		if (rv == -1) {
 			up(&mdev->md_io_mutex);
@@ -487,7 +487,7 @@
 			if (extent_nr == LC_FREE) continue;
 
 		       //if (j<3) INFO("T%03d S%03d=E%06d\n",trn,pos,extent_nr);
-			lc_set(mdev->act_log,extent_nr,pos);
+			lc_set(mdev->act_log, extent_nr, pos);
 			active_extents++;
 		}
 		spin_unlock_irq(&mdev->al_lock);
@@ -502,7 +502,7 @@
 
 	mdev->al_tr_number = to_tnr+1;
 	mdev->al_tr_pos = to;
-	if ( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT) ) {
+	if ( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT) ) {
 		mdev->al_tr_pos=0;
 	}
 
@@ -510,7 +510,7 @@
 	up(&mdev->md_io_mutex);
 
 	INFO("Found %d transactions (%d active extents) in activity log.\n",
-	     transactions,active_extents);
+	     transactions, active_extents);
 
 	return 1;
 }
@@ -527,7 +527,7 @@
 	struct drbd_atodb_wait *wc = bio->bi_private;
 	struct Drbd_Conf *mdev=wc->mdev;
 	struct page *page;
-	int uptodate = bio_flagged(bio,BIO_UPTODATE);
+	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 
 	if (bio->bi_size) return 1;
 	if (!error && !uptodate) {
@@ -538,7 +538,7 @@
 		error = -EIO;
 	}
 
-	drbd_chk_io_error(mdev,error,TRUE);
+	drbd_chk_io_error(mdev, error, TRUE);
 	if (error && wc->error == 0) wc->error=error;
 
 	if (atomic_dec_and_test(&wc->count)) {
@@ -564,7 +564,7 @@
 			     unsigned int enr,
 			     struct drbd_atodb_wait *wc)
 {
-	int i=0,allocated_page=0;
+	int i=0, allocated_page=0;
 	struct bio *bio;
 	struct page *np;
 	sector_t on_disk_sector = enr + mdev->bc->md.md_offset + mdev->bc->md.bm_offset;
@@ -595,7 +595,7 @@
 
 	offset = S2W(enr);
 	drbd_bm_get_lel( mdev, offset, 
-			 min_t(size_t,S2W(1), drbd_bm_words(mdev) - offset),
+			 min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset),
 			 kmap(*page) + *page_offset );
 	kunmap(*page);
 
@@ -636,7 +636,7 @@
 	unsigned int page_offset=PAGE_SIZE;
 	struct drbd_atodb_wait wc;
 
-	ERR_IF (!inc_local_if_state(mdev,Attaching))
+	ERR_IF (!inc_local_if_state(mdev, Attaching))
 		return; /* sorry, I don't have any act_log etc... */
 
 	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -646,16 +646,16 @@
 	bios = kzalloc(sizeof(struct bio*) * nr_elements, GFP_KERNEL);
 	if (!bios) goto submit_one_by_one;
 
-	atomic_set(&wc.count,0);
+	atomic_set(&wc.count, 0);
 	init_completion(&wc.io_done);
 	wc.mdev = mdev;
 	wc.error = 0;
 
 	for(i=0;i<nr_elements;i++) {
-		enr = lc_entry(mdev->act_log,i)->lc_number;
+		enr = lc_entry(mdev->act_log, i)->lc_number;
 		if (enr == LC_FREE) continue;
 		/* next statement also does atomic_inc wc.count */
-		if (atodb_prepare_unless_covered(mdev,bios,&page,
+		if (atodb_prepare_unless_covered(mdev, bios, &page,
 						&page_offset,
 						enr/AL_EXT_PER_BM_SECT,
 						&wc))
@@ -671,7 +671,7 @@
 		if (bios[i]==NULL) break;
 		if (FAULT_ACTIVE( mdev, DRBD_FAULT_MD_WR )) {
 			bios[i]->bi_rw = WRITE;
-			bio_endio(bios[i],bios[i]->bi_size,-EIO);
+			bio_endio(bios[i], bios[i]->bi_size, -EIO);
 		} else {
 			submit_bio(WRITE, bios[i]);
 		}
@@ -707,7 +707,7 @@
 	WARN("Using the slow drbd_al_to_on_disk_bm()\n");
 
 	for(i=0;i<mdev->act_log->nr_elements;i++) {
-		enr = lc_entry(mdev->act_log,i)->lc_number;
+		enr = lc_entry(mdev->act_log, i)->lc_number;
 		if (enr == LC_FREE) continue;
 		/* Really slow: if we have al-extents 16..19 active,
 		 * sector 4 will be written four times! Synchronous! */
@@ -733,7 +733,7 @@
 	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
 
 	for(i=0;i<mdev->act_log->nr_elements;i++) {
-		enr = lc_entry(mdev->act_log,i)->lc_number;
+		enr = lc_entry(mdev->act_log, i)->lc_number;
 		if (enr == LC_FREE) continue;
 		add += drbd_bm_ALe_set_all(mdev, enr);
 	}
@@ -742,16 +742,16 @@
 	wake_up(&mdev->al_wait);
 
 	INFO("Marked additional %s as out-of-sync based on AL.\n",
-	     ppsize(ppb,Bit2KB(add)));
+	     ppsize(ppb, Bit2KB(add)));
 }
 
-static inline int _try_lc_del(struct Drbd_Conf *mdev,struct lc_element *al_ext)
+static inline int _try_lc_del(struct Drbd_Conf *mdev, struct lc_element *al_ext)
 {
 	int rv;
 
 	spin_lock_irq(&mdev->al_lock);
 	rv = (al_ext->refcnt == 0);
-	if (likely(rv)) lc_del(mdev->act_log,al_ext);
+	if (likely(rv)) lc_del(mdev->act_log, al_ext);
 	spin_unlock_irq(&mdev->al_lock);
 
 	if (unlikely(!rv)) INFO("Waiting for extent in drbd_al_shrink()\n");
@@ -769,12 +769,12 @@
 	struct lc_element *al_ext;
 	int i;
 
-	D_ASSERT( test_bit(__LC_DIRTY,&mdev->act_log->flags) );
+	D_ASSERT( test_bit(__LC_DIRTY, &mdev->act_log->flags) );
 
 	for(i=0;i<mdev->act_log->nr_elements;i++) {
-		al_ext = lc_entry(mdev->act_log,i);
+		al_ext = lc_entry(mdev->act_log, i);
 		if (al_ext->lc_number == LC_FREE) continue;
-		wait_event(mdev->al_wait, _try_lc_del(mdev,al_ext));
+		wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
 	}
 
 	wake_up(&mdev->al_wait);
@@ -784,8 +784,8 @@
 {
 	struct update_odbm_work *udw = (struct update_odbm_work*)w;
 
-	if ( !inc_local_if_state(mdev,Attaching) ) {
-		if (DRBD_ratelimit(5*HZ,5))
+	if ( !inc_local_if_state(mdev, Attaching) ) {
+		if (DRBD_ratelimit(5*HZ, 5))
 			WARN("Can not update on disk bitmap, local IO disabled.\n");
 		return 1;
 	}
@@ -814,7 +814,7 @@
  *
  * TODO will be obsoleted once we have a caching lru of the on disk bitmap
  */
-STATIC void drbd_try_clear_on_disk_bm(struct Drbd_Conf *mdev,sector_t sector,
+STATIC void drbd_try_clear_on_disk_bm(struct Drbd_Conf *mdev, sector_t sector,
 				      int count, int success)
 {
 	struct bm_extent* ext;
@@ -829,7 +829,7 @@
 	// a 16 MB extent border. (Currently this is true...)
 	enr = BM_SECT_TO_EXT(sector);
 
-	ext = (struct bm_extent *) lc_get(mdev->resync,enr);
+	ext = (struct bm_extent *) lc_get(mdev->resync, enr);
 	if (ext) {
 		if (ext->lce.lc_number == enr) {
 			if (success)
@@ -842,7 +842,7 @@
 				     ext->lce.lc_number, ext->rs_left, ext->rs_failed, count);
 				dump_stack();
 				// FIXME brrrgs. should never happen!
-				drbd_force_state(mdev,NS(conn,Disconnecting));
+				drbd_force_state(mdev, NS(conn, Disconnecting));
 				return;
 			}
 		} else {
@@ -852,7 +852,7 @@
 
 			// OR an application write finished, and therefore
 			// we set something in this area in sync.
-			int rs_left = drbd_bm_e_weight(mdev,enr);
+			int rs_left = drbd_bm_e_weight(mdev, enr);
 			if (ext->flags != 0) {
 				WARN("changing resync lce: %d[%u;%02lx]"
 				     " -> %d[%u;00]\n",
@@ -864,26 +864,26 @@
 				WARN("Kicking resync_lru element enr=%u "
 				     "out with rs_failed=%d\n",
 				     ext->lce.lc_number, ext->rs_failed);
-				set_bit(WRITE_BM_AFTER_RESYNC,&mdev->flags);
+				set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
 			}
 			ext->rs_left = rs_left;
 			ext->rs_failed = success ? 0 : count;
-			lc_changed(mdev->resync,&ext->lce);
+			lc_changed(mdev->resync, &ext->lce);
 		}
-		lc_put(mdev->resync,&ext->lce);
+		lc_put(mdev->resync, &ext->lce);
 		// no race, we are within the al_lock!
 
 		if (ext->rs_left == ext->rs_failed) {
 			ext->rs_failed = 0;
 
-			udw=kmalloc(sizeof(*udw),GFP_ATOMIC);
+			udw=kmalloc(sizeof(*udw), GFP_ATOMIC);
 			if (udw) {
 				udw->enr = ext->lce.lc_number;
 				udw->w.cb = w_update_odbm;
-				drbd_queue_work_front(&mdev->data.work,&udw->w);
+				drbd_queue_work_front(&mdev->data.work, &udw->w);
 			} else {
 				WARN("Could not kmalloc an udw\n");
-				set_bit(WRITE_BM_AFTER_RESYNC,&mdev->flags);
+				set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
 			}
 		}
 	} else {
@@ -904,7 +904,7 @@
 void __drbd_set_in_sync(drbd_dev* mdev, sector_t sector, int size, const char* file, const unsigned int line)
 {
 	/* Is called from worker and receiver context _only_ */
-	unsigned long sbnr,ebnr,lbnr,bnr;
+	unsigned long sbnr, ebnr, lbnr, bnr;
 	unsigned long count = 0;
 	sector_t esector, nr_sectors;
 	int wake_up=0;
@@ -912,7 +912,7 @@
 
 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
 		ERR("drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
-				(unsigned long long)sector,size);
+				(unsigned long long)sector, size);
 		return;
 	}
 	nr_sectors = drbd_get_capacity(mdev->this_bdev);
@@ -946,9 +946,9 @@
 	 * ok, (capacity & 7) != 0 sometimes, but who cares...
 	 * we count rs_{total,left} in bits, not sectors.
 	 */
-	spin_lock_irqsave(&mdev->al_lock,flags);
+	spin_lock_irqsave(&mdev->al_lock, flags);
 	for(bnr=sbnr; bnr <= ebnr; bnr++) {
-		if (drbd_bm_clear_bit(mdev,bnr)) count++;
+		if (drbd_bm_clear_bit(mdev, bnr)) count++;
 	}
 	if (count) {
 		// we need the lock for drbd_try_clear_on_disk_bm
@@ -961,15 +961,15 @@
 				mdev->rs_mark_left =drbd_bm_total_weight(mdev);
 			}
 		}
-		if ( inc_local_if_state(mdev,Attaching) ) {
-			drbd_try_clear_on_disk_bm(mdev,sector,count,TRUE);
+		if ( inc_local_if_state(mdev, Attaching) ) {
+			drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
 			dec_local(mdev);
 		}
 		/* just wake_up unconditional now,
 		 * various lc_chaged(), lc_put() in drbd_try_clear_on_disk_bm(). */
 		wake_up=1;
 	}
-	spin_unlock_irqrestore(&mdev->al_lock,flags);
+	spin_unlock_irqrestore(&mdev->al_lock, flags);
 	if (wake_up) wake_up(&mdev->al_wait);
 }
 
@@ -982,7 +982,7 @@
  */
 void __drbd_set_out_of_sync(drbd_dev* mdev, sector_t sector, int size, const char* file, const unsigned int line)
 {
-	unsigned long sbnr,ebnr,lbnr;
+	unsigned long sbnr, ebnr, lbnr;
 	sector_t esector, nr_sectors;
 
 	/*  Find codepoints that call set_out_of_sync()
@@ -1004,7 +1004,7 @@
 	*/
 
 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
-		ERR("sector: %llus, size: %d\n",(unsigned long long)sector,size);
+		ERR("sector: %llus, size: %d\n", (unsigned long long)sector, size);
 		return;
 	}
 
@@ -1028,7 +1028,7 @@
 
 	/* ok, (capacity & 7) != 0 sometimes, but who cares...
 	 * we count rs_{total,left} in bits, not sectors.  */
-	drbd_bm_set_bits_in_irq(mdev,sbnr,ebnr);
+	drbd_bm_set_bits_in_irq(mdev, sbnr, ebnr);
 }
 
 static inline
@@ -1044,16 +1044,16 @@
 		spin_unlock_irq(&mdev->al_lock);
 		return NULL;
 	}
-	bm_ext = (struct bm_extent*) lc_get(mdev->resync,enr);
+	bm_ext = (struct bm_extent*) lc_get(mdev->resync, enr);
 	if (bm_ext) {
 		if (bm_ext->lce.lc_number != enr) {
-			bm_ext->rs_left = drbd_bm_e_weight(mdev,enr);
+			bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
 			bm_ext->rs_failed = 0;
-			lc_changed(mdev->resync,(struct lc_element*)bm_ext);
+			lc_changed(mdev->resync, (struct lc_element*)bm_ext);
 			wakeup = 1;
 		}
 		if (bm_ext->lce.refcnt == 1) mdev->resync_locked++;
-		set_bit(BME_NO_WRITES,&bm_ext->flags);
+		set_bit(BME_NO_WRITES, &bm_ext->flags);
 	}
 	rs_flags=mdev->resync->flags;
 	spin_unlock_irq(&mdev->al_lock);
@@ -1080,7 +1080,7 @@
 	spin_lock_irq(&mdev->al_lock);
 	if (unlikely(enr == mdev->act_log->new_number)) rv=1;
 	else {
-		al_ext = lc_find(mdev->act_log,enr);
+		al_ext = lc_find(mdev->act_log, enr);
 		if (al_ext) {
 			if (al_ext->refcnt) rv=1;
 		}
@@ -1113,22 +1113,22 @@
 
 	MTRACE(TraceTypeResync, TraceLvlAll,
 	       INFO("drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
-		    (unsigned long long)sector,enr);
+		    (unsigned long long)sector, enr);
 	    );
 
 	sig = wait_event_interruptible( mdev->al_wait,
-			(bm_ext = _bme_get(mdev,enr)) );
+			(bm_ext = _bme_get(mdev, enr)) );
 	if (sig) return 0;
 
-	if (test_bit(BME_LOCKED,&bm_ext->flags)) return 1;
+	if (test_bit(BME_LOCKED, &bm_ext->flags)) return 1;
 
 	for(i=0;i<AL_EXT_PER_BM_SECT;i++) {
 		sig = wait_event_interruptible( mdev->al_wait,
-				!_is_in_al(mdev,enr*AL_EXT_PER_BM_SECT+i) );
+				!_is_in_al(mdev, enr*AL_EXT_PER_BM_SECT+i) );
 		if (sig) {
 			spin_lock_irq(&mdev->al_lock);
-			if ( lc_put(mdev->resync,&bm_ext->lce) == 0 ) {
-				clear_bit(BME_NO_WRITES,&bm_ext->flags);
+			if ( lc_put(mdev->resync, &bm_ext->lce) == 0 ) {
+				clear_bit(BME_NO_WRITES, &bm_ext->flags);
 				mdev->resync_locked--;
 				wake_up(&mdev->al_wait);
 			}
@@ -1137,7 +1137,7 @@
 		}
 	}
 
-	set_bit(BME_LOCKED,&bm_ext->flags);
+	set_bit(BME_LOCKED, &bm_ext->flags);
 
 	return 1;
 }
@@ -1183,24 +1183,24 @@
 			INFO("dropping %u, aparently got 'synced' "
 			     "by application io\n", mdev->resync_wenr);
 		);
-		bm_ext = (struct bm_extent*)lc_find(mdev->resync,mdev->resync_wenr);
+		bm_ext = (struct bm_extent*)lc_find(mdev->resync, mdev->resync_wenr);
 		if (bm_ext) {
-			D_ASSERT(!test_bit(BME_LOCKED,&bm_ext->flags));
-			D_ASSERT(test_bit(BME_NO_WRITES,&bm_ext->flags));
-			clear_bit(BME_NO_WRITES,&bm_ext->flags);
+			D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
+			D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+			clear_bit(BME_NO_WRITES, &bm_ext->flags);
 			mdev->resync_wenr = LC_FREE;
-			lc_put(mdev->resync,&bm_ext->lce);
+			lc_put(mdev->resync, &bm_ext->lce);
 			wake_up(&mdev->al_wait);
 		} else {
 			ALERT("LOGIC BUG\n");
 		}
 	}
-	bm_ext = (struct bm_extent*)lc_try_get(mdev->resync,enr);
+	bm_ext = (struct bm_extent*)lc_try_get(mdev->resync, enr);
 	if (bm_ext) {
-		if (test_bit(BME_LOCKED,&bm_ext->flags)) {
+		if (test_bit(BME_LOCKED, &bm_ext->flags)) {
 			goto proceed;
 		}
-		if (!test_and_set_bit(BME_NO_WRITES,&bm_ext->flags)) {
+		if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
 			mdev->resync_locked++;
 		} else {
 			/* we did set the BME_NO_WRITES,
@@ -1208,7 +1208,7 @@
 			 * so we tried again.
 			 * drop the extra reference. */
 			MTRACE(TraceTypeResync, TraceLvlAll,
-				INFO("dropping extra reference on %u\n",enr);
+				INFO("dropping extra reference on %u\n", enr);
 			);
 			bm_ext->lce.refcnt--;
 			D_ASSERT(bm_ext->lce.refcnt > 0);
@@ -1217,7 +1217,7 @@
 	} else {
 		if (mdev->resync_locked > mdev->resync->nr_elements-3)
 			goto try_again;
-		bm_ext = (struct bm_extent*)lc_get(mdev->resync,enr);
+		bm_ext = (struct bm_extent*)lc_get(mdev->resync, enr);
 		if (!bm_ext) {
 			const unsigned long rs_flags = mdev->resync->flags;
 			if (rs_flags & LC_STARVING) {
@@ -1230,28 +1230,28 @@
 			goto try_again;
 		}
 		if (bm_ext->lce.lc_number != enr) {
-			bm_ext->rs_left = drbd_bm_e_weight(mdev,enr);
+			bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
 			bm_ext->rs_failed = 0;
-			lc_changed(mdev->resync,(struct lc_element*)bm_ext);
+			lc_changed(mdev->resync, (struct lc_element*)bm_ext);
 			wake_up(&mdev->al_wait);
-			D_ASSERT(test_bit(BME_LOCKED,&bm_ext->flags) == 0);
+			D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
 		}
-		set_bit(BME_NO_WRITES,&bm_ext->flags);
+		set_bit(BME_NO_WRITES, &bm_ext->flags);
 		D_ASSERT(bm_ext->lce.refcnt == 1);
 		mdev->resync_locked++;
 		goto check_al;
 	}
   check_al:
 	MTRACE(TraceTypeResync, TraceLvlAll,
-		INFO("checking al for %u\n",enr);
+		INFO("checking al for %u\n", enr);
 	);
 	for (i=0;i<AL_EXT_PER_BM_SECT;i++) {
 		if (unlikely(al_enr+i == mdev->act_log->new_number))
 			goto try_again;
-		if (lc_is_used(mdev->act_log,al_enr+i))
+		if (lc_is_used(mdev->act_log, al_enr+i))
 			goto try_again;
 	}
-	set_bit(BME_LOCKED,&bm_ext->flags);
+	set_bit(BME_LOCKED, &bm_ext->flags);
   proceed:
 	mdev->resync_wenr = LC_FREE;
 	spin_unlock_irq(&mdev->al_lock);
@@ -1259,7 +1259,7 @@
 
   try_again:
 	MTRACE(TraceTypeResync, TraceLvlAll,
-		INFO("need to try again for %u\n",enr);
+		INFO("need to try again for %u\n", enr);
 	);
 	if (bm_ext) mdev->resync_wenr = enr;
 	spin_unlock_irq(&mdev->al_lock);
@@ -1277,29 +1277,29 @@
 		    (long long)sector, enr);
 	    );
 
-	spin_lock_irqsave(&mdev->al_lock,flags);
-	bm_ext = (struct bm_extent*) lc_find(mdev->resync,enr);
+	spin_lock_irqsave(&mdev->al_lock, flags);
+	bm_ext = (struct bm_extent*) lc_find(mdev->resync, enr);
 	if (!bm_ext) {
-		spin_unlock_irqrestore(&mdev->al_lock,flags);
+		spin_unlock_irqrestore(&mdev->al_lock, flags);
 		ERR("drbd_rs_complete_io() called, but extent not found\n");
 		return;
 	}
 
 	if (bm_ext->lce.refcnt == 0) {
-		spin_unlock_irqrestore(&mdev->al_lock,flags);
+		spin_unlock_irqrestore(&mdev->al_lock, flags);
 		ERR("drbd_rs_complete_io(,%llu [=%u]) called, but refcnt is 0!?\n",
 		    (unsigned long long)sector, enr);
 		return;
 	}
 
-	if ( lc_put(mdev->resync,(struct lc_element *)bm_ext) == 0 ) {
-		clear_bit(BME_LOCKED,&bm_ext->flags);
-		clear_bit(BME_NO_WRITES,&bm_ext->flags);
+	if ( lc_put(mdev->resync, (struct lc_element *)bm_ext) == 0 ) {
+		clear_bit(BME_LOCKED, &bm_ext->flags);
+		clear_bit(BME_NO_WRITES, &bm_ext->flags);
 		mdev->resync_locked--;
 		wake_up(&mdev->al_wait);
 	}
 
-	spin_unlock_irqrestore(&mdev->al_lock,flags);
+	spin_unlock_irqrestore(&mdev->al_lock, flags);
 }
 
 /**
@@ -1317,15 +1317,15 @@
 
 	spin_lock_irq(&mdev->al_lock);
 
-	if (inc_local_if_state(mdev,Failed)) { // Makes sure ->resync is there.
+	if (inc_local_if_state(mdev, Failed)) { // Makes sure ->resync is there.
 		for(i=0;i<mdev->resync->nr_elements;i++) {
-			bm_ext = (struct bm_extent*) lc_entry(mdev->resync,i);
+			bm_ext = (struct bm_extent*) lc_entry(mdev->resync, i);
 			if (bm_ext->lce.lc_number == LC_FREE) continue;
 			bm_ext->lce.refcnt = 0; // Rude but ok.
 			bm_ext->rs_left = 0;
-			clear_bit(BME_LOCKED,&bm_ext->flags);
-			clear_bit(BME_NO_WRITES,&bm_ext->flags);
-			lc_del(mdev->resync,&bm_ext->lce);
+			clear_bit(BME_LOCKED, &bm_ext->flags);
+			clear_bit(BME_NO_WRITES, &bm_ext->flags);
+			lc_del(mdev->resync, &bm_ext->lce);
 		}
 		mdev->resync->used=0;
 		dec_local(mdev);
@@ -1353,31 +1353,31 @@
 
 	spin_lock_irq(&mdev->al_lock);
 
-	if (inc_local_if_state(mdev,Failed)) { // Makes sure ->resync is there.
+	if (inc_local_if_state(mdev, Failed)) { // Makes sure ->resync is there.
 		for(i=0;i<mdev->resync->nr_elements;i++) {
-			bm_ext = (struct bm_extent*) lc_entry(mdev->resync,i);
+			bm_ext = (struct bm_extent*) lc_entry(mdev->resync, i);
 			if (bm_ext->lce.lc_number == LC_FREE) continue;
 			if (bm_ext->lce.lc_number == mdev->resync_wenr) {
 				INFO("dropping %u in drbd_rs_del_all, "
 				     "aparently got 'synced' by application io\n",
 				     mdev->resync_wenr);
-				D_ASSERT(!test_bit(BME_LOCKED,&bm_ext->flags));
-				D_ASSERT(test_bit(BME_NO_WRITES,&bm_ext->flags));
-				clear_bit(BME_NO_WRITES,&bm_ext->flags);
+				D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
+				D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+				clear_bit(BME_NO_WRITES, &bm_ext->flags);
 				mdev->resync_wenr = LC_FREE;
-				lc_put(mdev->resync,&bm_ext->lce);
+				lc_put(mdev->resync, &bm_ext->lce);
 			}
 			if (bm_ext->lce.refcnt != 0) {
 				INFO("Retrying drbd_rs_del_all() later. "
-				     "refcnt=%d\n",bm_ext->lce.refcnt);
+				     "refcnt=%d\n", bm_ext->lce.refcnt);
 				dec_local(mdev);
 				spin_unlock_irq(&mdev->al_lock);
 				return -EAGAIN;
 			}
 			D_ASSERT(bm_ext->rs_left == 0);
-			D_ASSERT(!test_bit(BME_LOCKED,&bm_ext->flags));
-			D_ASSERT(!test_bit(BME_NO_WRITES,&bm_ext->flags));
-			lc_del(mdev->resync,&bm_ext->lce);
+			D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
+			D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
+			lc_del(mdev->resync, &bm_ext->lce);
 		}
 		D_ASSERT(mdev->resync->used==0);
 		dec_local(mdev);
@@ -1395,19 +1395,19 @@
 void drbd_rs_failed_io(drbd_dev* mdev, sector_t sector, int size)
 {
 	/* Is called from worker and receiver context _only_ */
-	unsigned long sbnr,ebnr,lbnr,bnr;
+	unsigned long sbnr, ebnr, lbnr, bnr;
 	unsigned long count = 0;
 	sector_t esector, nr_sectors;
 	int wake_up=0;
 
 	MTRACE(TraceTypeResync, TraceLvlSummary,
 	       INFO("drbd_rs_failed_io: sector=%llus, size=%u\n",
-		    (unsigned long long)sector,size);
+		    (unsigned long long)sector, size);
 	    );
 
 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
 		ERR("drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
-				(unsigned long long)sector,size);
+				(unsigned long long)sector, size);
 		return;
 	}
 	nr_sectors = drbd_get_capacity(mdev->this_bdev);
@@ -1438,13 +1438,13 @@
 	 */
 	spin_lock_irq(&mdev->al_lock);
 	for(bnr=sbnr; bnr <= ebnr; bnr++) {
-		if (drbd_bm_test_bit(mdev,bnr)>0) count++;
+		if (drbd_bm_test_bit(mdev, bnr)>0) count++;
 	}
 	if (count) {
 		mdev->rs_failed += count;
 
-		if ( inc_local_if_state(mdev,Attaching) ) {
-			drbd_try_clear_on_disk_bm(mdev,sector,count,FALSE);
+		if ( inc_local_if_state(mdev, Attaching) ) {
+			drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
 			dec_local(mdev);
 		}
 

Modified: branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -118,17 +118,17 @@
 
 #if 0 // simply disabled for now...
 #define MUST_NOT_BE_LOCKED() do {					\
-	if (test_bit(BM_LOCKED,&b->bm_flags)) {				\
-		if (DRBD_ratelimit(5*HZ,5)) {				\
+	if (test_bit(BM_LOCKED, &b->bm_flags)) {				\
+		if (DRBD_ratelimit(5*HZ, 5)) {				\
 			ERR("%s:%d: bitmap is locked by %s:%lu\n",	\
-			    __FILE__, __LINE__, b->bm_file,b->bm_line);	\
+			    __FILE__, __LINE__, b->bm_file, b->bm_line);	\
 			dump_stack();					\
 		}							\
 	}								\
 } while (0)
 #define MUST_BE_LOCKED() do {						\
-	if (!test_bit(BM_LOCKED,&b->bm_flags)) {			\
-		if (DRBD_ratelimit(5*HZ,5)) {				\
+	if (!test_bit(BM_LOCKED, &b->bm_flags)) {			\
+		if (DRBD_ratelimit(5*HZ, 5)) {				\
 			ERR("%s:%d: bitmap not locked!\n",		\
 					__FILE__, __LINE__);		\
 			dump_stack();					\
@@ -143,12 +143,12 @@
 {
 	struct drbd_bitmap *b = mdev->bitmap;
 	spin_lock_irq(&b->bm_lock);
-	if (!__test_and_set_bit(BM_LOCKED,&b->bm_flags)) {
+	if (!__test_and_set_bit(BM_LOCKED, &b->bm_flags)) {
 		b->bm_file = file;
 		b->bm_line = line;
-	} else if (DRBD_ratelimit(5*HZ,5)) {
+	} else if (DRBD_ratelimit(5*HZ, 5)) {
 		ERR("%s:%d: bitmap already locked by %s:%lu\n",
-		    file, line, b->bm_file,b->bm_line);
+		    file, line, b->bm_file, b->bm_line);
 		/*
 		dump_stack();
 		ERR("This is no oops, but debug stack trace only.\n");
@@ -162,7 +162,7 @@
 {
 	struct drbd_bitmap *b = mdev->bitmap;
 	spin_lock_irq(&b->bm_lock);
-	if (!__test_and_clear_bit(BM_LOCKED,&mdev->bitmap->bm_flags)) {
+	if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) {
 		ERR("bitmap not locked in bm_unlock\n");
 	} else {
 		/* FIXME if we got a "is already locked" previously,
@@ -181,8 +181,8 @@
 	D_ASSERT(b->bm_dev_capacity == drbd_get_capacity(mdev->this_bdev));	\
 	if ( (b->bm_set != mdev->rs_total) &&					\
 	     (b->bm_set != mdev->rs_left) ) {					\
-		if ( DRBD_ratelimit(5*HZ,5) ) {					\
-			ERR("%s:%d: ?? bm_set=%lu; rs_total=%lu, rs_left=%lu\n",\
+		if ( DRBD_ratelimit(5*HZ, 5) ) {					\
+			ERR("%s:%d: ?? bm_set=%lu; rs_total=%lu, rs_left=%lu\n", \
 				__FILE__ , __LINE__ ,				\
 				b->bm_set, mdev->rs_total, mdev->rs_left );	\
 		}								\
@@ -214,7 +214,7 @@
 
 	if (w < b->bm_words) {
 		D_ASSERT(w == b->bm_words -1);
-		INFO("bm[%d]=0x%lX\n",w,b->bm[w]);
+		INFO("bm[%d]=0x%lX\n", w, b->bm[w]);
 	}
 }
 #else
@@ -244,7 +244,7 @@
 {
 	struct drbd_bitmap *b = mdev->bitmap;
 	D_BUG_ON(b);
-	b = kzalloc(sizeof(struct drbd_bitmap),GFP_KERNEL);
+	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
 	if (!b)
 		return -ENOMEM;
 	spin_lock_init(&b->bm_lock);
@@ -338,14 +338,14 @@
 
 	ERR_IF(!b) return;
 
-	spin_lock_irqsave(&b->bm_lock,flags);
-	bits = bm_count_bits(b,0);
+	spin_lock_irqsave(&b->bm_lock, flags);
+	bits = bm_count_bits(b, 0);
 	if (bits != b->bm_set) {
 		ERR("bm_set was %lu, corrected to %lu. %s:%d\n",
-		    b->bm_set,bits,file,line);
+		    b->bm_set, bits, file, line);
 		b->bm_set = bits;
 	}
-	spin_unlock_irqrestore(&b->bm_lock,flags);
+	spin_unlock_irqrestore(&b->bm_lock, flags);
 }
 
 #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512)
@@ -389,14 +389,14 @@
 		spin_unlock_irq(&b->bm_lock);
 		goto free_obm;
 	} else {
-		bits = BM_SECT_TO_BIT(ALIGN(capacity,BM_SECTORS_PER_BIT));
+		bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECTORS_PER_BIT));
 
 		/* if we would use
 		   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
 		   a 32bit host could present the wrong number of words
 		   to a 64bit host.
 		*/
-		words = ALIGN(bits,64) >> LN2_BPL;
+		words = ALIGN(bits, 64) >> LN2_BPL;
 
 		D_ASSERT((u64)bits <= (((u64)mdev->bc->md.md_size_sect-MD_BM_OFFSET) << 12));
 
@@ -417,7 +417,7 @@
 			bytes = (words+1)*sizeof(long);
 			nbm = vmalloc(bytes);
 			if (!nbm) {
-				ERR("bitmap: failed to vmalloc %lu bytes\n",bytes);
+				ERR("bitmap: failed to vmalloc %lu bytes\n", bytes);
 				err = -ENOMEM;
 				goto out;
 			}
@@ -429,7 +429,7 @@
 		if (obm) {
 			bm_set_surplus(b);
 			D_ASSERT(b->bm[b->bm_words] == DRBD_MAGIC);
-			memcpy(nbm,obm,min_t(size_t,b->bm_words,words)*sizeof(long));
+			memcpy(nbm, obm, min_t(size_t, b->bm_words, words)*sizeof(long));
 		}
 		growing = words > b->bm_words;
 		if (growing) { // set all newly allocated bits
@@ -444,10 +444,10 @@
 		b->bm_words = words;
 		b->bm_dev_capacity = capacity;
 		bm_clear_surplus(b);
-		if (!growing) b->bm_set = bm_count_bits(b,0);
+		if (!growing) b->bm_set = bm_count_bits(b, 0);
 		bm_end_info(mdev, __FUNCTION__ );
 		spin_unlock_irq(&b->bm_lock);
-		INFO("resync bitmap: bits=%lu words=%lu\n",bits,words);
+		INFO("resync bitmap: bits=%lu words=%lu\n", bits, words);
 	}
  free_obm:
 	vfree(obm); // vfree(NULL) is noop
@@ -473,9 +473,9 @@
 	ERR_IF(!b) return 0;
 	// MUST_BE_LOCKED(); well. yes. but ...
 
-	spin_lock_irqsave(&b->bm_lock,flags);
+	spin_lock_irqsave(&b->bm_lock, flags);
 	s = b->bm_set;
-	spin_unlock_irqrestore(&b->bm_lock,flags);
+	spin_unlock_irqrestore(&b->bm_lock, flags);
 
 	return s;
 }
@@ -620,7 +620,7 @@
 
 	spin_lock_irq(&b->bm_lock);
 	BM_PARANOIA_CHECK();
-	memset(b->bm,0xff,b->bm_words*sizeof(long));
+	memset(b->bm, 0xff, b->bm_words*sizeof(long));
 	bm_clear_surplus(b);
 	b->bm_set = b->bm_bits;
 	spin_unlock_irq(&b->bm_lock);
@@ -629,7 +629,7 @@
 int drbd_bm_async_io_complete(struct bio *bio, unsigned int bytes_done, int error)
 {
 	struct drbd_bitmap *b = bio->bi_private;
-	int uptodate = bio_flagged(bio,BIO_UPTODATE);
+	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 
 	if (bio->bi_size)
 		return 1;
@@ -646,7 +646,7 @@
 		 * for now, set all bits, and flag MD_IO_ERROR
 		 */
 		/* FIXME kmap_atomic memset etc. pp. */
-		__set_bit(BM_MD_IO_ERROR,&b->bm_flags);
+		__set_bit(BM_MD_IO_ERROR, &b->bm_flags);
 	}
 	if (atomic_dec_and_test(&b->bm_async_io))
 		wake_up(&b->bm_io_wait);
@@ -681,7 +681,7 @@
 
 	if (FAULT_ACTIVE(mdev, (rw&WRITE)?DRBD_FAULT_MD_WR:DRBD_FAULT_MD_RD)) {
 		bio->bi_rw |= rw;
-		bio_endio(bio,bio->bi_size,-EIO);
+		bio_endio(bio, bio->bi_size, -EIO);
 	}
 	else
 		submit_bio(rw, bio);
@@ -691,7 +691,7 @@
  * @enr is _sector_ offset from start of on disk bitmap (aka bm-extent nr).
  * returns 0 on success, -EIO on failure
  */
-int drbd_bm_read_sect(drbd_dev *mdev,unsigned long enr)
+int drbd_bm_read_sect(drbd_dev *mdev, unsigned long enr)
 {
 	sector_t on_disk_sector = mdev->bc->md.md_offset + mdev->bc->md.bm_offset + enr;
 	int bm_words, num_words, offset, err  = 0;
@@ -699,7 +699,7 @@
 	// MUST_BE_LOCKED(); not neccessarily global ...
 
 	down(&mdev->md_io_mutex);
-	if (drbd_md_sync_page_io(mdev,mdev->bc,on_disk_sector,READ)) {
+	if (drbd_md_sync_page_io(mdev, mdev->bc, on_disk_sector, READ)) {
 		bm_words  = drbd_bm_words(mdev);
 		offset    = S2W(enr);	// word offset into bitmap
 		num_words = min(S2W(1), bm_words - offset);
@@ -718,7 +718,7 @@
 		drbd_chk_io_error(mdev, 1, TRUE);
 		drbd_io_error(mdev, TRUE);
 		for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
-			drbd_bm_ALe_set_all(mdev,enr*AL_EXT_PER_BM_SECT+i);
+			drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
 	}
 	up(&mdev->md_io_mutex);
 	return err;
@@ -787,11 +787,11 @@
 
 	now = jiffies;
 	atomic_set(&b->bm_async_io, num_pages);
-	__clear_bit(BM_MD_IO_ERROR,&b->bm_flags);
+	__clear_bit(BM_MD_IO_ERROR, &b->bm_flags);
 
 	for (i = 0; i < num_pages; i++) {
 		/* let the layers below us try to merge these bios... */
-		drbd_bm_page_io_async(mdev,b,i,rw);
+		drbd_bm_page_io_async(mdev, b, i, rw);
 	}
 
 	drbd_blk_run_queue(bdev_get_queue(mdev->bc->md_bdev));
@@ -799,7 +799,7 @@
 	INFO("%s of bitmap took %lu jiffies\n",
 	     rw == READ ? "reading" : "writing", jiffies - now);
 
-	if (test_bit(BM_MD_IO_ERROR,&b->bm_flags)) {
+	if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
 		ALERT("we had at least one MD IO ERROR during bitmap IO\n");
 		drbd_chk_io_error(mdev, 1, TRUE);
 		drbd_io_error(mdev, TRUE);
@@ -823,7 +823,7 @@
 	mdev->bitmap = b;
 
 	INFO("%s marked out-of-sync by on disk bit-map.\n",
-	     ppsize(ppb,drbd_bm_total_weight(mdev) << (BM_BLOCK_SIZE_B-10)) );
+	     ppsize(ppb, drbd_bm_total_weight(mdev) << (BM_BLOCK_SIZE_B-10)) );
 
 	return err;
 }
@@ -851,7 +851,7 @@
  * @enr: The _sector_ offset from the start of the bitmap.
  *
  */
-int drbd_bm_write_sect(struct Drbd_Conf *mdev,unsigned long enr)
+int drbd_bm_write_sect(struct Drbd_Conf *mdev, unsigned long enr)
 {
 	sector_t on_disk_sector = enr + mdev->bc->md.md_offset + mdev->bc->md.bm_offset;
 	int bm_words, num_words, offset, err  = 0;
@@ -867,11 +867,11 @@
 			enr, offset, num_words);
 #endif
 	if (num_words < S2W(1)) {
-		memset(page_address(mdev->md_io_page),0,MD_HARDSECT);
+		memset(page_address(mdev->md_io_page), 0, MD_HARDSECT);
 	}
 	drbd_bm_get_lel( mdev, offset, num_words,
 			 page_address(mdev->md_io_page) );
-	if (!drbd_md_sync_page_io(mdev,mdev->bc,on_disk_sector,WRITE)) {
+	if (!drbd_md_sync_page_io(mdev, mdev->bc, on_disk_sector, WRITE)) {
 		int i;
 		err = -EIO;
 		ERR( "IO ERROR writing bitmap sector %lu "
@@ -880,7 +880,7 @@
 		drbd_chk_io_error(mdev, 1, TRUE);
 		drbd_io_error(mdev, TRUE);
 		for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
-			drbd_bm_ALe_set_all(mdev,enr*AL_EXT_PER_BM_SECT+i);
+			drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
 	}
 	mdev->bm_writ_cnt++;
 	up(&mdev->md_io_mutex);
@@ -912,7 +912,7 @@
 
 	spin_lock_irq(&b->bm_lock);
 	BM_PARANOIA_CHECK();
-	memset(b->bm,0,b->bm_words*sizeof(long));
+	memset(b->bm, 0, b->bm_words*sizeof(long));
 	b->bm_set = 0;
 	spin_unlock_irq(&b->bm_lock);
 }
@@ -948,9 +948,9 @@
 	spin_lock_irq(&b->bm_lock);
 	BM_PARANOIA_CHECK();
 	if (b->bm_fo < b->bm_bits) {
-		i = find_next_bit(b->bm,b->bm_bits,b->bm_fo);
+		i = find_next_bit(b->bm, b->bm_bits, b->bm_fo);
 	} else if (b->bm_fo > b->bm_bits) {
-		ERR("bm_fo=%lu bm_bits=%lu\n",b->bm_fo, b->bm_bits);
+		ERR("bm_fo=%lu bm_bits=%lu\n", b->bm_fo, b->bm_bits);
 	}
 	if (i >= b->bm_bits) {
 		i = -1UL;
@@ -1007,7 +1007,7 @@
 	BM_PARANOIA_CHECK();
 	MUST_NOT_BE_LOCKED();
 	ERR_IF (bitnr >= b->bm_bits) {
-		ERR("bitnr=%lu bm_bits=%lu\n",bitnr, b->bm_bits);
+		ERR("bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
 		i = 0;
 	} else {
 		i = (0 != __test_and_set_bit(bitnr, b->bm));
@@ -1032,7 +1032,7 @@
 	MUST_NOT_BE_LOCKED();
 	for (bitnr = s; bitnr <=e; bitnr++) {
 		ERR_IF (bitnr >= b->bm_bits) {
-			ERR("bitnr=%lu bm_bits=%lu\n",bitnr, b->bm_bits);
+			ERR("bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
 		} else {
 			c += (0 == __test_and_set_bit(bitnr, b->bm));
 		}
@@ -1053,17 +1053,17 @@
 	ERR_IF(!b) return 0;
 	ERR_IF(!b->bm) return 0;
 
-	spin_lock_irqsave(&b->bm_lock,flags);
+	spin_lock_irqsave(&b->bm_lock, flags);
 	BM_PARANOIA_CHECK();
 	MUST_NOT_BE_LOCKED();
 	ERR_IF (bitnr >= b->bm_bits) {
-		ERR("bitnr=%lu bm_bits=%lu\n",bitnr, b->bm_bits);
+		ERR("bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
 		i = 0;
 	} else {
 		i = (0 != __test_and_clear_bit(bitnr, b->bm));
 		b->bm_set -= i;
 	}
-	spin_unlock_irqrestore(&b->bm_lock,flags);
+	spin_unlock_irqrestore(&b->bm_lock, flags);
 
 	/* clearing bits should only take place when sync is in progress!
 	 * this is only called from drbd_set_in_sync.
@@ -1097,7 +1097,7 @@
 	} else if (bitnr == b->bm_bits) {
 		i = -1;
 	} else /* (bitnr > b->bm_bits) */ {
-		ERR("bitnr=%lu > bm_bits=%lu\n",bitnr, b->bm_bits);
+		ERR("bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
 		i = 0;
 	}
 
@@ -1127,11 +1127,11 @@
 
 	ERR_IF(!b) return 0;
 	ERR_IF(!b->bm) return 0;
-	spin_lock_irqsave(&b->bm_lock,flags);
+	spin_lock_irqsave(&b->bm_lock, flags);
 	BM_PARANOIA_CHECK();
 
 	s = S2W(enr);
-	e = min((size_t)S2W(enr+1),b->bm_words);
+	e = min((size_t)S2W(enr+1), b->bm_words);
 	count = 0;
 	if (s < b->bm_words) {
 		const unsigned long* w = b->bm+s;
@@ -1140,7 +1140,7 @@
 	} else {
 		ERR("start offset (%d) too large in drbd_bm_e_weight\n", s);
 	}
-	spin_unlock_irqrestore(&b->bm_lock,flags);
+	spin_unlock_irqrestore(&b->bm_lock, flags);
 #if DUMP_MD >= 3
 	INFO("enr=%lu weight=%d e=%d s=%d\n", enr, count, e, s);
 #endif
@@ -1170,7 +1170,7 @@
 		int n = e-s;
 		while (n--) count += hweight_long(*w++);
 		n = e-s;
-		memset(b->bm+s,-1,n*sizeof(long));
+		memset(b->bm+s, -1, n*sizeof(long));
 		b->bm_set += n*BITS_PER_LONG - count;
 		if (e == b->bm_words) {
 			b->bm_set -= bm_clear_surplus(b);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_int.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_int.h	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_int.h	2007-07-24 11:38:05 UTC (rev 2985)
@@ -15,7 +15,7 @@
 
   drbd is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
   GNU General Public License for more details.
 
   You should have received a copy of the GNU General Public License
@@ -87,8 +87,8 @@
 #define DRBD_SIGKILL SIGHUP
 
 #define ID_SYNCER (-1ULL)
-#define ID_VACANT 0     // All EEs on the free list should have this value
-                        // freshly allocated EEs get !ID_VACANT (== 1)
+#define ID_VACANT 0	// All EEs on the free list should have this value
+			// freshly allocated EEs get !ID_VACANT (== 1)
 			// so if it says "cannot dereference null
 			// pointer at adress 0x00000001, it is most
 			// probably one of these :(
@@ -114,11 +114,11 @@
  *************************/
 
 // handy macro: DUMPP(somepointer)
-#define DUMPP(A)   ERR( #A " = %p in %s:%d\n",  (A),__FILE__,__LINE__);
-#define DUMPLU(A)  ERR( #A " = %lu in %s:%d\n", (unsigned long)(A),__FILE__,__LINE__);
-#define DUMPLLU(A) ERR( #A " = %llu in %s:%d\n",(unsigned long long)(A),__FILE__,__LINE__);
-#define DUMPLX(A)  ERR( #A " = %lx in %s:%d\n", (A),__FILE__,__LINE__);
-#define DUMPI(A)   ERR( #A " = %d in %s:%d\n",  (int)(A),__FILE__,__LINE__);
+#define DUMPP(A)   ERR( #A " = %p in %s:%d\n", (A), __FILE__, __LINE__);
+#define DUMPLU(A)  ERR( #A " = %lu in %s:%d\n", (unsigned long)(A), __FILE__, __LINE__);
+#define DUMPLLU(A) ERR( #A " = %llu in %s:%d\n", (unsigned long long)(A), __FILE__, __LINE__);
+#define DUMPLX(A)  ERR( #A " = %lx in %s:%d\n", (A), __FILE__, __LINE__);
+#define DUMPI(A)   ERR( #A " = %d in %s:%d\n", (int)(A), __FILE__, __LINE__);
 
 #define DUMPST(A) DUMPLLU((unsigned long long)(A))
 
@@ -137,23 +137,23 @@
 #endif
 
 // Info: do not remove the spaces around the "," before ##
-//       Otherwise this is not portable from gcc-2.95 to gcc-3.3
-#define PRINTK(level,fmt,args...) \
+//	 Otherwise this is not portable from gcc-2.95 to gcc-3.3
+#define PRINTK(level, fmt, args...) \
 	printk(level DEVICE_NAME "%d: " fmt, \
 		mdev->minor , ##args)
 
-#define ALERT(fmt,args...) PRINTK(KERN_ALERT, fmt , ##args)
-#define ERR(fmt,args...)  PRINTK(KERN_ERR, fmt , ##args)
-#define WARN(fmt,args...) PRINTK(KERN_WARNING, fmt , ##args)
-#define INFO(fmt,args...) PRINTK(KERN_INFO, fmt , ##args)
-#define DBG(fmt,args...)  PRINTK(KERN_DEBUG, fmt , ##args)
+#define ALERT(fmt, args...) PRINTK(KERN_ALERT, fmt , ##args)
+#define ERR(fmt, args...)   PRINTK(KERN_ERR, fmt , ##args)
+#define WARN(fmt, args...)  PRINTK(KERN_WARNING, fmt , ##args)
+#define INFO(fmt, args...)  PRINTK(KERN_INFO, fmt , ##args)
+#define DBG(fmt, args...)   PRINTK(KERN_DEBUG, fmt , ##args)
 
 /* see kernel/printk.c:printk_ratelimit
  * macro, so it is easy do have independend rate limits at different locations
  * "initializer element not constant ..." with kernel 2.4 :(
  * so I initialize toks to something large
  */
-#define DRBD_ratelimit(ratelimit_jiffies,ratelimit_burst)	\
+#define DRBD_ratelimit(ratelimit_jiffies, ratelimit_burst)	\
 ({								\
 	int __ret;						\
 	static unsigned long toks = 0x80000000UL;		\
@@ -169,7 +169,7 @@
 		missed = 0;					\
 		toks -= ratelimit_jiffies;			\
 		if (lost)					\
-			WARN("%d messages suppressed in %s:%d.\n",\
+			WARN("%d messages suppressed in %s:%d.\n", \
 				lost , __FILE__ , __LINE__ );	\
 		__ret=1;					\
 	} else {						\
@@ -182,15 +182,15 @@
 
 #ifdef DBG_ASSERTS
 extern void drbd_assert_breakpoint(drbd_dev*, char *, char *, int );
-# define D_ASSERT(exp)  if (!(exp)) \
-	 drbd_assert_breakpoint(mdev,#exp,__FILE__,__LINE__)
+# define D_ASSERT(exp)	if (!(exp)) \
+	 drbd_assert_breakpoint(mdev, #exp, __FILE__, __LINE__)
 #else
-# define D_ASSERT(exp)  if (!(exp)) \
-	 ERR("ASSERT( " #exp " ) in %s:%d\n", __FILE__,__LINE__)
+# define D_ASSERT(exp)	if (!(exp)) \
+	 ERR("ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
 #endif
 #define ERR_IF(exp) if (({ \
 	int _b = (exp)!=0; \
-	if (_b) ERR("%s: (" #exp ") in %s:%d\n", __func__, __FILE__,__LINE__); \
+	if (_b) ERR("%s: (" #exp ") in %s:%d\n", __func__, __FILE__, __LINE__); \
 	 _b; \
 	}))
 
@@ -211,11 +211,11 @@
 extern unsigned int _drbd_insert_fault(drbd_dev *mdev, unsigned int type);
 static inline int
 drbd_insert_fault(drbd_dev *mdev, unsigned int type) {
-    return (fault_rate && 
-	    (enable_faults & (1<<type)) && 
-	    _drbd_insert_fault(mdev,type));
+    return (fault_rate &&
+	    (enable_faults & (1<<type)) &&
+	    _drbd_insert_fault(mdev, type));
 }
-#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m),(_t)))
+#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t)))
 
 #else
 #define FAULT_ACTIVE(_m, _t) (0)
@@ -223,17 +223,17 @@
 
 #include <linux/stringify.h>
 // integer division, round _UP_ to the next integer
-#define div_ceil(A,B) ( (A)/(B) + ((A)%(B) ? 1 : 0) )
+#define div_ceil(A, B) ( (A)/(B) + ((A)%(B) ? 1 : 0) )
 // usual integer division
-#define div_floor(A,B) ( (A)/(B) )
+#define div_floor(A, B) ( (A)/(B) )
 
 /*
  * Compatibility Section
  *************************/
 
-#define LOCK_SIGMASK(task,flags)   spin_lock_irqsave(&task->sighand->siglock, flags)
-#define UNLOCK_SIGMASK(task,flags) spin_unlock_irqrestore(&task->sighand->siglock, flags)
-#define RECALC_SIGPENDING()        recalc_sigpending();
+#define LOCK_SIGMASK(task, flags)   spin_lock_irqsave(&task->sighand->siglock, flags)
+#define UNLOCK_SIGMASK(task, flags) spin_unlock_irqrestore(&task->sighand->siglock, flags)
+#define RECALC_SIGPENDING()	    recalc_sigpending();
 
 #if defined(DBG_SPINLOCKS) && defined(__SMP__)
 # define MUST_HOLD(lock) if (!spin_is_locked(lock)) { ERR("Not holding lock! in %s\n", __FUNCTION__ ); }
@@ -253,10 +253,10 @@
  *************************/
 
 #define SET_MDEV_MAGIC(x) \
-	({ typecheck(struct Drbd_Conf*,x); \
+	({ typecheck(struct Drbd_Conf*, x); \
 	  (x)->magic = (long)(x) ^ DRBD_MAGIC; })
 #define IS_VALID_MDEV(x)  \
-	( typecheck(struct Drbd_Conf*,x) && \
+	( typecheck(struct Drbd_Conf*, x) && \
 	  ((x) ? (((x)->magic ^ DRBD_MAGIC) == (long)(x)):0))
 
 /* drbd_meta-data.c (still in drbd_main.c) */
@@ -289,7 +289,7 @@
 	AuthResponse,
 	StateChgRequest,
 
-	Ping,         // These are sent on the meta socket...
+	Ping,	      // These are sent on the meta socket...
 	PingAck,
 	RecvAck,      // Used in protocol B
 	WriteAck,     // Used in protocol C
@@ -320,34 +320,34 @@
 	 * when we want to support more than
 	 * one PRO_VERSION */
 	static const char *cmdnames[] = {
-		[Data]             = "Data",
-		[DataReply]        = "DataReply",
-		[RSDataReply]      = "RSDataReply",
-		[Barrier]          = "Barrier",
-		[ReportBitMap]     = "ReportBitMap",
+		[Data]		   = "Data",
+		[DataReply]	   = "DataReply",
+		[RSDataReply]	   = "RSDataReply",
+		[Barrier]	   = "Barrier",
+		[ReportBitMap]	   = "ReportBitMap",
 		[BecomeSyncTarget] = "BecomeSyncTarget",
 		[BecomeSyncSource] = "BecomeSyncSource",
-		[UnplugRemote]     = "UnplugRemote",
-		[DataRequest]      = "DataRequest",
+		[UnplugRemote]	   = "UnplugRemote",
+		[DataRequest]	   = "DataRequest",
 		[RSDataRequest]    = "RSDataRequest",
-		[SyncParam]        = "SyncParam",
+		[SyncParam]	   = "SyncParam",
 		[ReportProtocol]   = "ReportProtocol",
-		[ReportUUIDs]      = "ReportUUIDs",
-		[ReportSizes]      = "ReportSizes",
-		[ReportState]      = "ReportState",
+		[ReportUUIDs]	   = "ReportUUIDs",
+		[ReportSizes]	   = "ReportSizes",
+		[ReportState]	   = "ReportState",
 		[ReportSyncUUID]   = "ReportSyncUUID",
 		[AuthChallenge]    = "AuthChallenge",
-		[AuthResponse]     = "AuthResponse",
-		[Ping]             = "Ping",
-		[PingAck]          = "PingAck",
-		[RecvAck]          = "RecvAck",
-		[WriteAck]         = "WriteAck",
-		[RSWriteAck]       = "RSWriteAck",
-		[DiscardAck]       = "DiscardAck",
-		[NegAck]           = "NegAck",
-		[NegDReply]        = "NegDReply",
-		[NegRSDReply]      = "NegRSDReply",
-		[BarrierAck]       = "BarrierAck",
+		[AuthResponse]	   = "AuthResponse",
+		[Ping]		   = "Ping",
+		[PingAck]	   = "PingAck",
+		[RecvAck]	   = "RecvAck",
+		[WriteAck]	   = "WriteAck",
+		[RSWriteAck]	   = "RSWriteAck",
+		[DiscardAck]	   = "DiscardAck",
+		[NegAck]	   = "NegAck",
+		[NegDReply]	   = "NegDReply",
+		[NegRSDReply]	   = "NegRSDReply",
+		[BarrierAck]	   = "BarrierAck",
 		[StateChgRequest]  = "StateChgRequest",
 		[StateChgReply]    = "StateChgReply"
 	};
@@ -375,18 +375,18 @@
 /* This is the layout for a packet on the wire.
  * The byteorder is the network byte order.
  *     (except block_id and barrier fields.
- *      these are pointers to local structs
- *      and have no relevance for the partner,
- *      which just echoes them as received.)
+ *	these are pointers to local structs
+ *	and have no relevance for the partner,
+ *	which just echoes them as received.)
  *
  * NOTE that the payload starts at a long aligned offset,
  * regardless of 32 or 64 bit arch!
  */
 typedef struct {
-	u32       magic;
-	u16       command;
-	u16       length;	// bytes of data after this header
-	char      payload[0];
+	u32	  magic;
+	u16	  command;
+	u16	  length;	// bytes of data after this header
+	char	  payload[0];
 } __attribute((packed)) Drbd_Header;
 // 8 bytes. packet FIXED for the next century!
 
@@ -405,16 +405,16 @@
  *   Data, DataReply (see Drbd_Data_Packet)
  */
 
-#define DP_HARDBARRIER        1
-#define DP_RW_SYNC            2
+#define DP_HARDBARRIER	      1
+#define DP_RW_SYNC	      2
 #define DP_MAY_SET_IN_SYNC    4
 
 typedef struct {
 	Drbd_Header head;
-	u64         sector;    // 64 bits sector number
-	u64         block_id;  // Used in protocol B&C for the address of the req.
-	u32         seq_num;
-	u32         dp_flags;
+	u64	    sector;    // 64 bits sector number
+	u64	    block_id;  // Used in protocol B&C for the address of the req.
+	u32	    seq_num;
+	u32	    dp_flags;
 } __attribute((packed)) Drbd_Data_Packet;
 
 /*
@@ -427,19 +427,19 @@
  */
 typedef struct {
 	Drbd_Header head;
-	u64         sector;
-	u64         block_id;
-	u32         blksize;
-	u32         seq_num;
+	u64	    sector;
+	u64	    block_id;
+	u32	    blksize;
+	u32	    seq_num;
 } __attribute((packed)) Drbd_BlockAck_Packet;
 
 
 typedef struct {
 	Drbd_Header head;
-	u64         sector;
-	u64         block_id;
-	u32         blksize;
-	u32         pad;	//make sure packet is a multiple of 8 Byte
+	u64	    sector;
+	u64	    block_id;
+	u32	    blksize;
+	u32	    pad;	//make sure packet is a multiple of 8 Byte
 } __attribute((packed)) Drbd_BlockRequest_Packet;
 
 /*
@@ -453,108 +453,108 @@
 
 typedef struct {
 	Drbd_Header head;		// 8 bytes
-	u32         protocol_version;
-	u32         feature_flags;
+	u32	    protocol_version;
+	u32	    feature_flags;
 
 	/* should be more than enough for future enhancements
 	 * for now, feature_flags and the reserverd array shall be zero.
 	 */
 
-	u64         reserverd[8];
+	u64	    reserverd[8];
 } __attribute((packed)) Drbd_HandShake_Packet;
 // 80 bytes, FIXED for the next century
 
 typedef struct {
 	Drbd_Header head;
-	u32         barrier;	// barrier number _handle_ only
-	u32         pad;	// make sure packet is a multiple of 8 Byte
+	u32	    barrier;	// barrier number _handle_ only
+	u32	    pad;	// make sure packet is a multiple of 8 Byte
 } __attribute((packed)) Drbd_Barrier_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u32         barrier;
-	u32         set_size;
+	u32	    barrier;
+	u32	    set_size;
 } __attribute((packed)) Drbd_BarrierAck_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u32         rate;
+	u32	    rate;
 } __attribute((packed)) Drbd_SyncParam_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u32         protocol;
-	u32         after_sb_0p;
-	u32         after_sb_1p;
-	u32         after_sb_2p;
-	u32         want_lose;
-	u32         two_primaries;
+	u32	    protocol;
+	u32	    after_sb_0p;
+	u32	    after_sb_1p;
+	u32	    after_sb_2p;
+	u32	    want_lose;
+	u32	    two_primaries;
 } __attribute((packed)) Drbd_Protocol_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u64         uuid[EXT_UUID_SIZE];
+	u64	    uuid[EXT_UUID_SIZE];
 } __attribute((packed)) Drbd_GenCnt_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u64         uuid;
+	u64	    uuid;
 } __attribute((packed)) Drbd_SyncUUID_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u64         d_size;  // size of disk
-	u64         u_size;  // user requested size
-	u64         c_size;  // current exported size
-	u32         max_segment_size;  // Maximal size of a BIO
-	u32         queue_order_type;
+	u64	    d_size;  // size of disk
+	u64	    u_size;  // user requested size
+	u64	    c_size;  // current exported size
+	u32	    max_segment_size;  // Maximal size of a BIO
+	u32	    queue_order_type;
 } __attribute((packed)) Drbd_Sizes_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u32         state;
+	u32	    state;
 } __attribute((packed)) Drbd_State_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u32         mask;
-	u32         val;
+	u32	    mask;
+	u32	    val;
 } __attribute((packed)) Drbd_Req_State_Packet;
 
 typedef struct {
 	Drbd_Header head;
-	u32         retcode;
+	u32	    retcode;
 } __attribute((packed)) Drbd_RqS_Reply_Packet;
 
 typedef struct {
-	u64       size;
-	u32       state;
-	u32       blksize;
-	u32       protocol;
-	u32       version;
-	u32       gen_cnt[5];
-	u32       bit_map_gen[5];
+	u64	  size;
+	u32	  state;
+	u32	  blksize;
+	u32	  protocol;
+	u32	  version;
+	u32	  gen_cnt[5];
+	u32	  bit_map_gen[5];
 } __attribute((packed)) Drbd06_Parameter_P;
 
 typedef struct {
 	Drbd_Header head;
-	u64         block_id;
-	u32         seq_num;
-	u32         pad;
+	u64	    block_id;
+	u32	    seq_num;
+	u32	    pad;
 } __attribute((packed)) Drbd_Discard_Packet;
 
 typedef union {
-	Drbd_Header              head;
-	Drbd_HandShake_Packet    HandShake;
-	Drbd_Data_Packet         Data;
-	Drbd_BlockAck_Packet     BlockAck;
-	Drbd_Barrier_Packet      Barrier;
-	Drbd_BarrierAck_Packet   BarrierAck;
-	Drbd_SyncParam_Packet    SyncParam;
-	Drbd_Protocol_Packet     Protocol;
-	Drbd_Sizes_Packet        Sizes;
-	Drbd_GenCnt_Packet       GenCnt;
-	Drbd_State_Packet        State;
+	Drbd_Header		 head;
+	Drbd_HandShake_Packet	 HandShake;
+	Drbd_Data_Packet	 Data;
+	Drbd_BlockAck_Packet	 BlockAck;
+	Drbd_Barrier_Packet	 Barrier;
+	Drbd_BarrierAck_Packet	 BarrierAck;
+	Drbd_SyncParam_Packet	 SyncParam;
+	Drbd_Protocol_Packet	 Protocol;
+	Drbd_Sizes_Packet	 Sizes;
+	Drbd_GenCnt_Packet	 GenCnt;
+	Drbd_State_Packet	 State;
 	Drbd_Req_State_Packet	 ReqState;
 	Drbd_RqS_Reply_Packet	 RqSReply;
 	Drbd_BlockRequest_Packet BlockRequest;
@@ -582,7 +582,7 @@
 {
 	/* THINK testing the t_state seems to be uncritical in all cases
 	 * (but thread_{start,stop}), so we can read it *without* the lock.
-	 * 	--lge */
+	 *	--lge */
 
 	smp_rmb();
 	return (volatile int)thi->t_state;
@@ -631,7 +631,7 @@
 	struct list_head requests; // requests before
 	struct drbd_barrier *next; // pointer to the next barrier
 	unsigned int br_number;  // the barriers identifier.
-	int n_req;      // number of requests attached before this barrier
+	int n_req;	// number of requests attached before this barrier
 };
 
 typedef struct drbd_request drbd_request_t;
@@ -687,14 +687,14 @@
 	UNPLUG_QUEUED,		// only relevant with kernel 2.4
 	UNPLUG_REMOTE,		// whether sending a "UnplugRemote" makes sense
 	MD_DIRTY,		// current gen counts and flags not yet on disk
-	DISCARD_CONCURRENT,     // Set on one node, cleared on the peer!
+	DISCARD_CONCURRENT,	// Set on one node, cleared on the peer!
 	USE_DEGR_WFC_T,		// Use degr-wfc-timeout instead of wfc-timeout.
-	CLUSTER_ST_CHANGE,      // Cluster wide state change going on...
+	CLUSTER_ST_CHANGE,	// Cluster wide state change going on...
 	CL_ST_CHG_SUCCESS,
 	CL_ST_CHG_FAIL,
 	CRASHED_PRIMARY,	// This node was a crashed primary. Gets
-	                        // cleared when the state.conn  goes into
-	                        // Connected state.
+				// cleared when the state.conn	goes into
+				// Connected state.
 	WRITE_BM_AFTER_RESYNC	// A kmalloc() during resync failed
 };
 
@@ -724,7 +724,7 @@
 struct drbd_socket {
 	struct drbd_work_queue work;
 	struct semaphore  mutex;
-	struct socket    *socket;
+	struct socket	 *socket;
 	Drbd_Polymorph_Packet sbuf;  // this way we get our
 	Drbd_Polymorph_Packet rbuf;  // send/receive buffers off the stack
 };
@@ -749,10 +749,10 @@
 
 // for sync_conf and other types...
 #define PACKET(name, number, fields) struct name { fields };
-#define INTEGER(pn,pr,member) int member;
-#define INT64(pn,pr,member) __u64 member;
-#define BIT(pn,pr,member)   unsigned member : 1;
-#define STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
+#define INTEGER(pn, pr, member) int member;
+#define INT64(pn, pr, member) __u64 member;
+#define BIT(pn, pr, member) unsigned member : 1;
+#define STRING(pn, pr, member, len) unsigned char member[len]; int member ## _len;
 #include "linux/drbd_nl.h"
 
 struct drbd_backing_dev {
@@ -779,7 +779,7 @@
 	sector_t p_size;     /* partner's disk size */
 	request_queue_t     *rq_queue;
 	struct block_device *this_bdev;
-	struct gendisk      *vdisk;
+	struct gendisk	    *vdisk;
 
 	struct drbd_socket data; // for data/barrier/cstate/parameter packets
 	struct drbd_socket meta; // for ping/ack (metadata) packets
@@ -787,7 +787,7 @@
 	volatile unsigned int ko_count;
 	struct drbd_work  resync_work,
 			  unplug_work,
-	                  md_sync_work;
+			  md_sync_work;
 	struct timer_list resync_timer;
 	struct timer_list md_sync_timer;
 
@@ -801,12 +801,12 @@
 	unsigned int writ_cnt;
 	unsigned int al_writ_cnt;
 	unsigned int bm_writ_cnt;
-	atomic_t ap_bio_cnt;     // Requests we need to complete
+	atomic_t ap_bio_cnt;	 // Requests we need to complete
 	atomic_t ap_pending_cnt; // AP data packets on the wire, ack expected
 	atomic_t rs_pending_cnt; // RS request/data packets on the wire
-	atomic_t unacked_cnt;    // Need to send replys for
-	atomic_t local_cnt;      // Waiting for local disk to signal completion
-	atomic_t net_cnt;        // Users of net_conf
+	atomic_t unacked_cnt;	 // Need to send replys for
+	atomic_t local_cnt;	 // Waiting for local disk to signal completion
+	atomic_t net_cnt;	 // Users of net_conf
 	spinlock_t req_lock;
 	struct drbd_barrier* unused_spare_barrier; /* for pre-allocation */
 	struct drbd_barrier* newest_barrier;
@@ -854,7 +854,7 @@
 	struct lru_cache* act_log;     // activity log
 	unsigned int al_tr_number;
 	int al_tr_cycle;
-	int al_tr_pos;     // position of the next transaction in the journal
+	int al_tr_pos;	   // position of the next transaction in the journal
 	struct crypto_hash* cram_hmac_tfm;
 	wait_queue_head_t seq_wait;
 	atomic_t packet_seq;
@@ -911,9 +911,9 @@
 // drbd_main.c
 
 enum chg_state_flags {
-	ChgStateHard    = 1,
+	ChgStateHard	= 1,
 	ChgStateVerbose = 2,
-	ScheduleAfter   = 4,
+	ScheduleAfter	= 4,
 };
 
 extern int drbd_change_state(drbd_dev* mdev, enum chg_state_flags f,
@@ -929,10 +929,10 @@
 extern void _drbd_thread_stop(struct Drbd_thread *thi, int restart, int wait);
 extern void drbd_thread_signal(struct Drbd_thread *thi);
 extern void drbd_free_resources(drbd_dev *mdev);
-extern void tl_release(drbd_dev *mdev,unsigned int barrier_nr,
+extern void tl_release(drbd_dev *mdev, unsigned int barrier_nr,
 		       unsigned int set_size);
 extern void tl_clear(drbd_dev *mdev);
-extern struct drbd_barrier *_tl_add_barrier(drbd_dev *,struct drbd_barrier *);
+extern struct drbd_barrier *_tl_add_barrier(drbd_dev *, struct drbd_barrier *);
 extern void drbd_free_sock(drbd_dev *mdev);
 extern int drbd_send(drbd_dev *mdev, struct socket *sock,
 		     void* buf, size_t size, unsigned msg_flags);
@@ -966,7 +966,7 @@
 extern int drbd_send_dblock(drbd_dev *mdev, drbd_request_t *req);
 extern int _drbd_send_barrier(drbd_dev *mdev, struct drbd_barrier *barrier);
 extern int drbd_send_drequest(drbd_dev *mdev, int cmd,
-			      sector_t sector,int size, u64 block_id);
+			      sector_t sector, int size, u64 block_id);
 extern int drbd_send_bitmap(drbd_dev *mdev);
 extern int _drbd_send_bitmap(drbd_dev *mdev);
 extern int drbd_send_sr_reply(drbd_dev *mdev, int retcode);
@@ -978,7 +978,7 @@
 extern void drbd_md_sync(drbd_dev *mdev);
 extern int  drbd_md_read(drbd_dev *mdev, struct drbd_backing_dev * bdev);
 // maybe define them below as inline?
-extern void drbd_uuid_set(drbd_dev *mdev,int idx, u64 val);
+extern void drbd_uuid_set(drbd_dev *mdev, int idx, u64 val);
 extern void _drbd_uuid_set(drbd_dev *mdev, int idx, u64 val);
 extern void drbd_uuid_new_current(drbd_dev *mdev);
 extern void drbd_uuid_set_bm(drbd_dev *mdev, u64 val);
@@ -994,16 +994,16 @@
 
 #define MD_RESERVED_SECT ( 128LU << 11 )  // 128 MB, unit sectors
 // The following numbers are sectors
-#define MD_AL_OFFSET 8      // 8 Sectors after start of meta area
+#define MD_AL_OFFSET 8	    // 8 Sectors after start of meta area
 #define MD_AL_MAX_SIZE 64   // = 32 kb LOG  ~ 3776 extents ~ 14 GB Storage
 #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE) //Allows up to about 3.8TB
 
-#define MD_HARDSECT_B    9     // Since the smalles IO unit is usually 512 byte
-#define MD_HARDSECT      (1<<MD_HARDSECT_B)
+#define MD_HARDSECT_B	 9     // Since the smalles IO unit is usually 512 byte
+#define MD_HARDSECT	 (1<<MD_HARDSECT_B)
 
 // activity log
-#define AL_EXTENTS_PT    ((MD_HARDSECT-12)/8-1) // 61 ; Extents per 512B sector
-#define AL_EXTENT_SIZE_B 22      // One extent represents 4M Storage
+#define AL_EXTENTS_PT	 ((MD_HARDSECT-12)/8-1) // 61 ; Extents per 512B sector
+#define AL_EXTENT_SIZE_B 22	 // One extent represents 4M Storage
 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SIZE_B)
 
 #if BITS_PER_LONG == 32
@@ -1039,11 +1039,11 @@
  */
 
 #define BM_BLOCK_SIZE_B  12			 //  4k per bit
-#define BM_BLOCK_SIZE    (1<<BM_BLOCK_SIZE_B)
+#define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SIZE_B)
 /* (9+3) : 512 bytes @ 8 bits; representing 16M storage
  * per sector of on disk bitmap */
-#define BM_EXT_SIZE_B    (BM_BLOCK_SIZE_B + MD_HARDSECT_B + 3 )  // = 24
-#define BM_EXT_SIZE      (1<<BM_EXT_SIZE_B)
+#define BM_EXT_SIZE_B	 (BM_BLOCK_SIZE_B + MD_HARDSECT_B + 3 )  // = 24
+#define BM_EXT_SIZE	 (1<<BM_EXT_SIZE_B)
 
 #if (BM_EXT_SIZE_B != 24) || (BM_BLOCK_SIZE_B != 12)
 #error "HAVE YOU FIXED drbdmeta AS WELL??"
@@ -1081,21 +1081,21 @@
 
 /* the extent in "PER_EXTENT" below is an activity log extent
  * we need that many (long words/bytes) to store the bitmap
- *                   of one AL_EXTENT_SIZE chunk of storage.
+ *		     of one AL_EXTENT_SIZE chunk of storage.
  * we can store the bitmap for that many AL_EXTENTS within
  * one sector of the _on_disk_ bitmap:
- * bit   0        bit 37   bit 38            bit (512*8)-1
- *           ...|........|........|.. // ..|........|
- * sect. 0       `296     `304                     ^(512*8*8)-1
+ * bit	 0	  bit 37   bit 38	     bit (512*8)-1
+ *	     ...|........|........|.. // ..|........|
+ * sect. 0	 `296	  `304			   ^(512*8*8)-1
  *
 #define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
 #define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
-#define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )        //   4
+#define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
  */
 
 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
 #define DRBD_MAX_SECTORS_BM \
-          ( (MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SIZE_B-9)) )
+	  ( (MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SIZE_B-9)) )
 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
@@ -1134,9 +1134,9 @@
 extern int  drbd_bm_read      (drbd_dev *mdev);
 extern int  drbd_bm_write     (drbd_dev *mdev);
 extern unsigned long drbd_bm_ALe_set_all (drbd_dev *mdev, unsigned long al_enr);
-extern size_t        drbd_bm_words       (drbd_dev *mdev);
-extern sector_t      drbd_bm_capacity    (drbd_dev *mdev);
-extern unsigned long drbd_bm_find_next   (drbd_dev *mdev);
+extern size_t	     drbd_bm_words	 (drbd_dev *mdev);
+extern sector_t      drbd_bm_capacity	 (drbd_dev *mdev);
+extern unsigned long drbd_bm_find_next	 (drbd_dev *mdev);
 extern void drbd_bm_set_find(drbd_dev *mdev, unsigned long i);
 extern unsigned long drbd_bm_total_weight(drbd_dev *mdev);
 extern int drbd_bm_rs_done(drbd_dev *mdev);
@@ -1157,7 +1157,7 @@
 #define drbd_bm_lock(mdev)    __drbd_bm_lock(mdev, __FILE__, __LINE__ )
 
 extern void _drbd_bm_recount_bits(drbd_dev *mdev, char* file, int line);
-#define drbd_bm_recount_bits(mdev) _drbd_bm_recount_bits(mdev,  __FILE__, __LINE__ )
+#define drbd_bm_recount_bits(mdev) _drbd_bm_recount_bits(mdev,	__FILE__, __LINE__ )
 // drbd_main.c
 
 /* needs to be included here,
@@ -1172,7 +1172,7 @@
 
 extern struct page* drbd_pp_pool; // drbd's page pool
 extern spinlock_t   drbd_pp_lock;
-extern int          drbd_pp_vacant;
+extern int	    drbd_pp_vacant;
 extern wait_queue_head_t drbd_pp_wait;
 
 extern drbd_dev *drbd_new_device(int minor);
@@ -1194,12 +1194,12 @@
 
 enum {
 	TraceTypePacket = 0x00000001,
-	TraceTypeRq     = 0x00000002,
+	TraceTypeRq	= 0x00000002,
 	TraceTypeUuid	= 0x00000004,
 	TraceTypeResync = 0x00000008,
-	TraceTypeEE     = 0x00000010,
+	TraceTypeEE	= 0x00000010,
 	TraceTypeUnplug = 0x00000020,
-	TraceTypeNl     = 0x00000040,
+	TraceTypeNl	= 0x00000040,
 	TraceTypeALExts = 0x00000080,
 };
 
@@ -1213,16 +1213,16 @@
 		( ( 1 << mdev_to_minor(mdev)) & trace_devs));
 }
 
-#define MTRACE(type,lvl,code...) \
+#define MTRACE(type, lvl, code...) \
 do { \
-	if (unlikely(is_mdev_trace(mdev,type,lvl))) { \
+	if (unlikely(is_mdev_trace(mdev, type, lvl))) { \
 		code \
 	} \
 } while (0)
 
-#define TRACE(type,lvl,code...) \
+#define TRACE(type, lvl, code...) \
 do { \
-	if (unlikely(is_trace(type,lvl))) { \
+	if (unlikely(is_trace(type, lvl))) { \
 		code \
 	} \
 } while (0)
@@ -1230,24 +1230,24 @@
 // Buffer printing support
 // DbgPrintFlags: used for Flags arg to DbgPrintBuffer
 // - DBGPRINT_BUFFADDR; if set, each line starts with the
-//       virtual address of the line being output. If clear,
-//       each line starts with the offset from the beginning
-//       of the buffer.
+//	 virtual address of the line being output. If clear,
+//	 each line starts with the offset from the beginning
+//	 of the buffer.
 typedef enum {
     DBGPRINT_BUFFADDR = 0x0001,
 }  DbgPrintFlags;
 
 extern void drbd_print_uuid(drbd_dev *mdev, unsigned int idx);
 
-extern void drbd_print_buffer(const char *prefix,unsigned int flags,int size,
-			      const void *buffer,const void *buffer_va,
+extern void drbd_print_buffer(const char *prefix, unsigned int flags, int size,
+			      const void *buffer, const void *buffer_va,
 			      unsigned int length);
 
 // Bio printing support
 extern void _dump_bio(drbd_dev *mdev, struct bio *bio, int complete);
 
 static inline void dump_bio(drbd_dev *mdev, struct bio *bio, int complete) {
-	MTRACE(TraceTypeRq,TraceLvlSummary,
+	MTRACE(TraceTypeRq, TraceLvlSummary,
 	       _dump_bio(mdev, bio, complete);
 		);
 }
@@ -1261,7 +1261,7 @@
 	    int recv, Drbd_Polymorph_Packet *p, char* file, int line)
 {
 	MTRACE(TraceTypePacket, TraceLvlSummary,
-	       _dump_packet(mdev,sock,recv,p,file,line);
+	       _dump_packet(mdev, sock, recv, p, file, line);
 		);
 }
 
@@ -1305,18 +1305,18 @@
 				sector_t sector, int rw);
 // worker callbacks
 extern int w_req_cancel_conflict (drbd_dev *, struct drbd_work *, int);
-extern int w_read_retry_remote   (drbd_dev *, struct drbd_work *, int);
-extern int w_e_end_data_req      (drbd_dev *, struct drbd_work *, int);
-extern int w_e_end_rsdata_req    (drbd_dev *, struct drbd_work *, int);
-extern int w_resync_inactive     (drbd_dev *, struct drbd_work *, int);
-extern int w_resume_next_sg      (drbd_dev *, struct drbd_work *, int);
-extern int w_io_error            (drbd_dev *, struct drbd_work *, int);
-extern int w_send_write_hint     (drbd_dev *, struct drbd_work *, int);
+extern int w_read_retry_remote	 (drbd_dev *, struct drbd_work *, int);
+extern int w_e_end_data_req	 (drbd_dev *, struct drbd_work *, int);
+extern int w_e_end_rsdata_req	 (drbd_dev *, struct drbd_work *, int);
+extern int w_resync_inactive	 (drbd_dev *, struct drbd_work *, int);
+extern int w_resume_next_sg	 (drbd_dev *, struct drbd_work *, int);
+extern int w_io_error		 (drbd_dev *, struct drbd_work *, int);
+extern int w_send_write_hint	 (drbd_dev *, struct drbd_work *, int);
 extern int w_make_resync_request (drbd_dev *, struct drbd_work *, int);
-extern int w_send_dblock         (drbd_dev *, struct drbd_work *, int);
-extern int w_send_barrier        (drbd_dev *, struct drbd_work *, int);
-extern int w_send_read_req       (drbd_dev *, struct drbd_work *, int);
-extern int w_prev_work_done      (drbd_dev *, struct drbd_work *, int);
+extern int w_send_dblock	 (drbd_dev *, struct drbd_work *, int);
+extern int w_send_barrier	 (drbd_dev *, struct drbd_work *, int);
+extern int w_send_read_req	 (drbd_dev *, struct drbd_work *, int);
+extern int w_prev_work_done	 (drbd_dev *, struct drbd_work *, int);
 
 extern void resync_timer_fn(unsigned long data);
 
@@ -1340,7 +1340,7 @@
 #endif
 
 // drbd_receiver.c
-extern int drbd_release_ee(drbd_dev* mdev,struct list_head* list);
+extern int drbd_release_ee(drbd_dev* mdev, struct list_head* list);
 extern struct Tl_epoch_entry* drbd_alloc_ee(drbd_dev *mdev,
 					    u64 id,
 					    sector_t sector,
@@ -1396,13 +1396,13 @@
 extern void drbd_rs_cancel_all(drbd_dev* mdev);
 extern int drbd_rs_del_all(drbd_dev* mdev);
 extern void drbd_rs_failed_io(drbd_dev* mdev, sector_t sector, int size);
-extern int drbd_al_read_log(struct Drbd_Conf *mdev,struct drbd_backing_dev *);
+extern int drbd_al_read_log(struct Drbd_Conf *mdev, struct drbd_backing_dev *);
 extern void __drbd_set_in_sync(drbd_dev* mdev, sector_t sector, int size, const char* file, const unsigned int line);
-#define drbd_set_in_sync(mdev,sector,size) \
-	__drbd_set_in_sync(mdev,sector,size, __FILE__, __LINE__ )
+#define drbd_set_in_sync(mdev, sector, size) \
+	__drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__ )
 extern void __drbd_set_out_of_sync(drbd_dev* mdev, sector_t sector, int size, const char* file, const unsigned int line);
-#define drbd_set_out_of_sync(mdev,sector,size) \
-	__drbd_set_out_of_sync(mdev,sector,size, __FILE__, __LINE__ )
+#define drbd_set_out_of_sync(mdev, sector, size) \
+	__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__ )
 extern void drbd_al_apply_to_bm(struct Drbd_Conf *mdev);
 extern void drbd_al_to_on_disk_bm(struct Drbd_Conf *mdev);
 extern void drbd_al_shrink(struct Drbd_Conf *mdev);
@@ -1425,36 +1425,36 @@
 #define user_isp_mask 1
 #define aftr_isp_mask 1
 
-#define NS(T,S) ({drbd_state_t mask; mask.i=0; mask.T = T##_mask; mask;}), \
-                ({drbd_state_t val; val.i=0; val.T = (S); val;})
-#define NS2(T1,S1,T2,S2) \
-                ({drbd_state_t mask; mask.i=0; mask.T1 = T1##_mask; \
+#define NS(T, S) ({drbd_state_t mask; mask.i=0; mask.T = T##_mask; mask;}), \
+		({drbd_state_t val; val.i=0; val.T = (S); val;})
+#define NS2(T1, S1, T2, S2) \
+		({drbd_state_t mask; mask.i=0; mask.T1 = T1##_mask; \
 		  mask.T2 = T2##_mask; mask;}), \
-                ({drbd_state_t val; val.i=0; val.T1 = (S1); \
-                  val.T2 = (S2); val;})
-#define NS3(T1,S1,T2,S2,T3,S3) \
-                ({drbd_state_t mask; mask.i=0; mask.T1 = T1##_mask; \
+		({drbd_state_t val; val.i=0; val.T1 = (S1); \
+		  val.T2 = (S2); val;})
+#define NS3(T1, S1, T2, S2, T3, S3) \
+		({drbd_state_t mask; mask.i=0; mask.T1 = T1##_mask; \
 		  mask.T2 = T2##_mask; mask.T3 = T3##_mask; mask;}), \
-                ({drbd_state_t val; val.i=0; val.T1 = (S1); \
-                  val.T2 = (S2); val.T3 = (S3); val;})
+		({drbd_state_t val; val.i=0; val.T1 = (S1); \
+		  val.T2 = (S2); val.T3 = (S3); val;})
 
-#define _NS(D,T,S) D,({drbd_state_t ns; ns.i = D->state.i; ns.T = (S); ns;})
-#define _NS2(D,T1,S1,T2,S2) \
-                D,({drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
-                ns.T2 = (S2); ns;})
-#define _NS3(D,T1,S1,T2,S2,T3,S3) \
-                D,({drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
-                ns.T2 = (S2); ns.T3 = (S3); ns;})
+#define _NS(D, T, S) D, ({drbd_state_t ns; ns.i = D->state.i; ns.T = (S); ns;})
+#define _NS2(D, T1, S1, T2, S2) \
+		D, ({drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
+		ns.T2 = (S2); ns;})
+#define _NS3(D, T1, S1, T2, S2, T3, S3) \
+		D, ({drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
+		ns.T2 = (S2); ns.T3 = (S3); ns;})
 
 static inline void drbd_state_lock(drbd_dev *mdev)
 {
 	wait_event(mdev->misc_wait,
-		   !test_and_set_bit(CLUSTER_ST_CHANGE,&mdev->flags));
+		   !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
 }
 
 static inline void drbd_state_unlock(drbd_dev *mdev)
 {
-	clear_bit(CLUSTER_ST_CHANGE,&mdev->flags);
+	clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
 	wake_up(&mdev->misc_wait);
 }
 
@@ -1481,7 +1481,7 @@
 	case Detach:
 	case CallIOEHelper:
 		if (mdev->state.disk > Failed) {
-			_drbd_set_state(_NS(mdev,disk,Failed),
+			_drbd_set_state(_NS(mdev, disk, Failed),
 					ChgStateHard|ScheduleAfter);
 			ERR("Local IO failed. Detaching...\n");
 		}
@@ -1493,9 +1493,9 @@
 {
 	if (error) {
 		unsigned long flags;
-		spin_lock_irqsave(&mdev->req_lock,flags);
-		__drbd_chk_io_error(mdev,forcedetach);
-		spin_unlock_irqrestore(&mdev->req_lock,flags);
+		spin_lock_irqsave(&mdev->req_lock, flags);
+		__drbd_chk_io_error(mdev, forcedetach);
+		spin_unlock_irqrestore(&mdev->req_lock, flags);
 	}
 }
 
@@ -1566,7 +1566,7 @@
 		/* sizeof(struct md_on_disk_07) == 4k
 		 * position: last 4k aligned block of 4k size */
 		if (!bdev->backing_bdev) {
-			if (DRBD_ratelimit(5*HZ,5)) {
+			if (DRBD_ratelimit(5*HZ, 5)) {
 				ERR("bdev->backing_bdev==NULL\n");
 				dump_stack();
 			}
@@ -1582,7 +1582,7 @@
 static inline void
 _drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
 {
-	list_add_tail(&w->list,&q->q);
+	list_add_tail(&w->list, &q->q);
 	up(&q->s);
 }
 
@@ -1590,22 +1590,22 @@
 drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&q->q_lock,flags);
-	list_add(&w->list,&q->q);
+	spin_lock_irqsave(&q->q_lock, flags);
+	list_add(&w->list, &q->q);
 	up(&q->s); /* within the spinlock,
 		      see comment near end of drbd_worker() */
-	spin_unlock_irqrestore(&q->q_lock,flags);
+	spin_unlock_irqrestore(&q->q_lock, flags);
 }
 
 static inline void
 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&q->q_lock,flags);
-	list_add_tail(&w->list,&q->q);
+	spin_lock_irqsave(&q->q_lock, flags);
+	list_add_tail(&w->list, &q->q);
 	up(&q->s); /* within the spinlock,
 		      see comment near end of drbd_worker() */
-	spin_unlock_irqrestore(&q->q_lock,flags);
+	spin_unlock_irqrestore(&q->q_lock, flags);
 }
 
 static inline void wake_asender(drbd_dev *mdev) {
@@ -1615,41 +1615,41 @@
 }
 
 static inline void request_ping(drbd_dev *mdev) {
-	set_bit(SEND_PING,&mdev->flags);
+	set_bit(SEND_PING, &mdev->flags);
 	wake_asender(mdev);
 }
 
 static inline int drbd_send_short_cmd(drbd_dev *mdev, Drbd_Packet_Cmd cmd)
 {
 	Drbd_Header h;
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,cmd,&h,sizeof(h));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
 }
 
 static inline int drbd_send_ping(drbd_dev *mdev)
 {
 	Drbd_Header h;
-	return drbd_send_cmd(mdev,USE_META_SOCKET,Ping,&h,sizeof(h));
+	return drbd_send_cmd(mdev, USE_META_SOCKET, Ping, &h, sizeof(h));
 }
 
 static inline int drbd_send_ping_ack(drbd_dev *mdev)
 {
 	Drbd_Header h;
-	return drbd_send_cmd(mdev,USE_META_SOCKET,PingAck,&h,sizeof(h));
+	return drbd_send_cmd(mdev, USE_META_SOCKET, PingAck, &h, sizeof(h));
 }
 
 static inline void drbd_thread_stop(struct Drbd_thread *thi)
 {
-	_drbd_thread_stop(thi,FALSE,TRUE);
+	_drbd_thread_stop(thi, FALSE, TRUE);
 }
 
 static inline void drbd_thread_stop_nowait(struct Drbd_thread *thi)
 {
-	_drbd_thread_stop(thi,FALSE,FALSE);
+	_drbd_thread_stop(thi, FALSE, FALSE);
 }
 
 static inline void drbd_thread_restart_nowait(struct Drbd_thread *thi)
 {
-	_drbd_thread_stop(thi,TRUE,FALSE);
+	_drbd_thread_stop(thi, TRUE, FALSE);
 }
 
 /* counts how many answer packets packets we expect from our peer,
@@ -1687,16 +1687,16 @@
 		    atomic_read(&mdev->which))
 
 #define dec_ap_pending(mdev)	do {				\
-	typecheck(drbd_dev*,mdev);				\
+	typecheck(drbd_dev*, mdev);				\
 	if (atomic_dec_and_test(&mdev->ap_pending_cnt))		\
 		wake_up(&mdev->misc_wait);			\
 	ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
 
 /* counts how many resync-related answers we still expect from the peer
- *                   increase                   decrease
+ *		     increase			decrease
  * SyncTarget sends RSDataRequest (and expects RSDataReply)
  * SyncSource sends RSDataReply   (and expects WriteAck whith ID_SYNCER)
- *                                         (or NegAck with ID_SYNCER)
+ *					   (or NegAck with ID_SYNCER)
  */
 static inline void inc_rs_pending(drbd_dev* mdev)
 {
@@ -1704,18 +1704,18 @@
 }
 
 #define dec_rs_pending(mdev)	do {				\
-	typecheck(drbd_dev*,mdev);				\
+	typecheck(drbd_dev*, mdev);				\
 	atomic_dec(&mdev->rs_pending_cnt);			\
 	ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
 
 /* counts how many answers we still need to send to the peer.
  * increased on
- *  receive_Data        unless protocol A;
- *                      we need to send a RecvAck (proto B)
- *                      or WriteAck (proto C)
+ *  receive_Data	unless protocol A;
+ *			we need to send a RecvAck (proto B)
+ *			or WriteAck (proto C)
  *  receive_RSDataReply (recv_resync_read) we need to send a WriteAck
  *  receive_DataRequest (receive_RSDataRequest) we need to send back Data
- *  receive_Barrier_*   we need to send a BarrierAck
+ *  receive_Barrier_*	we need to send a BarrierAck
  */
 static inline void inc_unacked(drbd_dev* mdev)
 {
@@ -1723,12 +1723,12 @@
 }
 
 #define dec_unacked(mdev)	do {				\
-	typecheck(drbd_dev*,mdev);				\
+	typecheck(drbd_dev*, mdev);				\
 	atomic_dec(&mdev->unacked_cnt);				\
 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
 
 #define sub_unacked(mdev, n)	do {				\
-	typecheck(drbd_dev*,mdev);				\
+	typecheck(drbd_dev*, mdev);				\
 	atomic_sub(n, &mdev->unacked_cnt);			\
 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
 
@@ -1831,7 +1831,7 @@
 
 	spin_lock_irq(&mdev->req_lock);
 	while (!__inc_ap_bio_cond(mdev)) {
-		prepare_to_wait(&mdev->misc_wait,&wait,TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(&mdev->req_lock);
 		schedule();
 		finish_wait(&mdev->misc_wait, &wait);
@@ -1858,12 +1858,12 @@
 	 */
 	return ((s32)(a) - (s32)(b));
 }
-#define seq_lt(a,b) (seq_cmp((a),(b)) < 0)
-#define seq_gt(a,b) (seq_cmp((a),(b)) > 0)
-#define seq_ge(a,b) (seq_cmp((a),(b)) >= 0)
-#define seq_le(a,b) (seq_cmp((a),(b)) <= 0)
+#define seq_lt(a, b) (seq_cmp((a), (b)) < 0)
+#define seq_gt(a, b) (seq_cmp((a), (b)) > 0)
+#define seq_ge(a, b) (seq_cmp((a), (b)) >= 0)
+#define seq_le(a, b) (seq_cmp((a), (b)) <= 0)
 /* CAUTION: please no side effects in arguments! */
-#define seq_max(a,b) ((u32)(seq_gt((a),(b)) ? (a) : (b)))
+#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
 
 static inline void update_peer_seq(drbd_dev* mdev, unsigned int new_seq)
 {
@@ -1908,7 +1908,7 @@
 static inline void drbd_kick_lo(drbd_dev *mdev)
 {
 	if (!mdev->bc->backing_bdev) {
-		if (DRBD_ratelimit(5*HZ,5)) {
+		if (DRBD_ratelimit(5*HZ, 5)) {
 			ERR("backing_bdev==NULL in drbd_kick_lo! The following call trace is for debuggin purposes only. Don't worry.\n");
 			dump_stack();
 		}

Modified: branches/drbd-8.0-for-linus/drbd/drbd_main.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_main.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_main.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -85,18 +85,18 @@
 MODULE_PARM_DESC(allow_oos, "DONT USE!");
 /* thanks to these macros, if compiled into the kernel (not-module),
  * this becomes the boot parameter drbd.minor_count */
-module_param(minor_count, int,0);
-module_param(allow_oos, bool,0);
+module_param(minor_count, int, 0);
+module_param(allow_oos, int, 0);
 
 #ifdef DRBD_ENABLE_FAULTS
 int enable_faults = 0;
 int fault_rate;
 int fault_count;
 int fault_devs;
-module_param(enable_faults,int,0664);	// bitmap of enabled faults
-module_param(fault_rate,int,0664);	// fault rate % value - applies to all enabled faults
-module_param(fault_count,int,0664);	// count of faults inserted
-module_param(fault_devs,int,0644);      // bitmap of devices to insert faults on
+module_param(enable_faults, int, 0664);	// bitmap of enabled faults
+module_param(fault_rate, int, 0664);	// fault rate % value - applies to all enabled faults
+module_param(fault_count, int, 0664);	// count of faults inserted
+module_param(fault_devs, int, 0644);      // bitmap of devices to insert faults on
 #endif
 
 // module parameter, defined
@@ -108,9 +108,9 @@
 int trace_level= 0;	// Current trace level
 int trace_devs = 0;	// Bitmap of devices to trace
 
-module_param(trace_level,int,0644);
-module_param(trace_type,int,0644);
-module_param(trace_devs,int,0644);
+module_param(trace_level, int, 0644);
+module_param(trace_type, int, 0644);
+module_param(trace_devs, int, 0644);
 #endif
 
 
@@ -158,7 +158,7 @@
 {
 	struct drbd_barrier *b;
 
-	b=kmalloc(sizeof(struct drbd_barrier),GFP_KERNEL);
+	b=kmalloc(sizeof(struct drbd_barrier), GFP_KERNEL);
 	if (!b) return 0;
 	INIT_LIST_HEAD(&b->requests);
 	INIT_LIST_HEAD(&b->w.list);
@@ -190,7 +190,7 @@
  * It returns the previously newest barrier
  * (not the just created barrier) to the caller.
  */
-struct drbd_barrier *_tl_add_barrier(drbd_dev *mdev,struct drbd_barrier *new)
+struct drbd_barrier *_tl_add_barrier(drbd_dev *mdev, struct drbd_barrier *new)
 {
 	struct drbd_barrier *newest_before;
 
@@ -210,7 +210,7 @@
 }
 
 /* when we receive a barrier ack */
-void tl_release(drbd_dev *mdev,unsigned int barrier_nr,
+void tl_release(drbd_dev *mdev, unsigned int barrier_nr,
 		       unsigned int set_size)
 {
 	struct drbd_barrier *b;
@@ -227,7 +227,7 @@
 	 * in protocol A and B, this should not be empty, even though the
 	 * master_bio's could already been completed.  */
 	list_for_each_safe(le, tle, &b->requests) {
-		r = list_entry(le, struct drbd_request,tl_requests);
+		r = list_entry(le, struct drbd_request, tl_requests);
 		_req_mod(r, barrier_acked, 0);
 	}
 	list_del(&b->requests);
@@ -270,7 +270,7 @@
 		struct drbd_request *r;
 
 		list_for_each_safe(le, tle, &b->requests) {
-			r = list_entry(le, struct drbd_request,tl_requests);
+			r = list_entry(le, struct drbd_request, tl_requests);
 			_req_mod(r, connection_lost_while_pending, 0);
 		}
 		tmp = b->next;
@@ -316,10 +316,10 @@
 {
 	enum io_error_handler eh;
 	unsigned long flags;
-	int send,ok=1;
+	int send, ok=1;
 
 	eh = PassOn;
-	if (inc_local_if_state(mdev,Failed)) {
+	if (inc_local_if_state(mdev, Failed)) {
 		eh = mdev->bc->dc.on_io_error;
 		dec_local(mdev);
 	}
@@ -327,12 +327,12 @@
 	if (!forcedetach && eh == PassOn)
 		return 1;
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	if ( (send = (mdev->state.disk == Failed)) ) {
-		_drbd_set_state(_NS(mdev,disk,Diskless),
+		_drbd_set_state(_NS(mdev, disk, Diskless),
 				ChgStateHard|ScheduleAfter);
 	}
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 	if (!send) return ok;
 
@@ -350,7 +350,7 @@
 	/* Releasing the backing device is done in after_state_ch() */
 
 	if (eh == CallIOEHelper) {
-		drbd_khelper(mdev,"local-io-error");
+		drbd_khelper(mdev, "local-io-error");
 	}
 
 	return ok;
@@ -375,54 +375,54 @@
 		      drbd_state_t mask, drbd_state_t val)
 {
 	unsigned long flags;
-	drbd_state_t os,ns;
+	drbd_state_t os, ns;
 	int rv;
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	os = mdev->state;
 	ns.i = (os.i & ~mask.i) | val.i;
 	rv = _drbd_set_state(mdev, ns, f);
 	ns = mdev->state;
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
-	if (rv==SS_Success && !(f&ScheduleAfter)) after_state_ch(mdev,os,ns,f);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
+	if (rv==SS_Success && !(f&ScheduleAfter)) after_state_ch(mdev, os, ns, f);
 
 	return rv;
 }
 
 void drbd_force_state(drbd_dev* mdev, drbd_state_t mask, drbd_state_t val)
 {
-	drbd_change_state(mdev,ChgStateHard,mask,val);
+	drbd_change_state(mdev, ChgStateHard, mask, val);
 }
 
 STATIC int is_valid_state(drbd_dev* mdev, drbd_state_t ns);
 STATIC int is_valid_state_transition(drbd_dev*, drbd_state_t, drbd_state_t);
 STATIC int drbd_send_state_req(drbd_dev *, drbd_state_t, drbd_state_t);
 
-set_st_err_t _req_st_cond(drbd_dev* mdev,drbd_state_t mask, drbd_state_t val)
+set_st_err_t _req_st_cond(drbd_dev* mdev, drbd_state_t mask, drbd_state_t val)
 {
-	drbd_state_t os,ns;
+	drbd_state_t os, ns;
 	unsigned long flags;
 	int rv;
 
-	if (test_and_clear_bit(CL_ST_CHG_SUCCESS,&mdev->flags))
+	if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
 		return SS_CW_Success;
 
-	if (test_and_clear_bit(CL_ST_CHG_FAIL,&mdev->flags))
+	if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
 		return SS_CW_FailedByPeer;
 
 	rv=0;
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	os = mdev->state;
 	ns.i = (os.i & ~mask.i) | val.i;
-	if ( !cl_wide_st_chg(mdev,os,ns) ) rv = SS_CW_NoNeed;
+	if ( !cl_wide_st_chg(mdev, os, ns) ) rv = SS_CW_NoNeed;
 	if (!rv) {
-		rv = is_valid_state(mdev,ns);
+		rv = is_valid_state(mdev, ns);
 		if (rv==SS_Success) {
-			rv = is_valid_state_transition(mdev,ns,os);
+			rv = is_valid_state_transition(mdev, ns, os);
 			if (rv==SS_Success) rv = 0; // cont waiting, otherwise fail.
 		}
 	}
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 	return rv;
 }
@@ -437,40 +437,40 @@
 		       enum chg_state_flags f)
 {
 	unsigned long flags;
-	drbd_state_t os,ns;
+	drbd_state_t os, ns;
 	int rv;
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	os = mdev->state;
 	ns.i = (os.i & ~mask.i) | val.i;
 
-	if (cl_wide_st_chg(mdev,os,ns)) {
-		rv = is_valid_state(mdev,ns);
-		if (rv == SS_Success) rv = is_valid_state_transition(mdev,ns,os);
-		spin_unlock_irqrestore(&mdev->req_lock,flags);
+	if (cl_wide_st_chg(mdev, os, ns)) {
+		rv = is_valid_state(mdev, ns);
+		if (rv == SS_Success) rv = is_valid_state_transition(mdev, ns, os);
+		spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 		if (rv < SS_Success) {
-			if (f & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
+			if (f & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
 			return rv;
 		}
 
 		drbd_state_lock(mdev);
-		if ( !drbd_send_state_req(mdev,mask,val) ) {
+		if ( !drbd_send_state_req(mdev, mask, val) ) {
 			drbd_state_unlock(mdev);
 			rv = SS_CW_FailedByPeer;
-			if (f & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
+			if (f & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
 			return rv;
 		}
 
-		wait_event(mdev->state_wait,(rv=_req_st_cond(mdev,mask,val)));
+		wait_event(mdev->state_wait, (rv=_req_st_cond(mdev, mask, val)));
 
 		if (rv < SS_Success) {
 			// nearly dead code.
 			drbd_state_unlock(mdev);
-			if (f & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
+			if (f & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
 			return rv;
 		}
-		spin_lock_irqsave(&mdev->req_lock,flags);
+		spin_lock_irqsave(&mdev->req_lock, flags);
 		os = mdev->state;
 		ns.i = (os.i & ~mask.i) | val.i;
 		drbd_state_unlock(mdev);
@@ -478,9 +478,9 @@
 
 	rv = _drbd_set_state(mdev, ns, f);
 	ns = mdev->state;
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	if (rv==SS_Success && !(f&ScheduleAfter)) after_state_ch(mdev,os,ns,f);
+	if (rv==SS_Success && !(f&ScheduleAfter)) after_state_ch(mdev, os, ns, f);
 
 	return rv;
 }
@@ -504,9 +504,9 @@
 
 void print_st_err(drbd_dev* mdev, drbd_state_t os, drbd_state_t ns, int err)
 {
-	ERR("State change failed: %s\n",set_st_err_name(err));
-	print_st(mdev," state",os);
-	print_st(mdev,"wanted",ns);
+	ERR("State change failed: %s\n", set_st_err_name(err));
+	print_st(mdev, " state", os);
+	print_st(mdev, "wanted", ns);
 }
 
 
@@ -576,7 +576,7 @@
 	return rv;
 }
 
-STATIC int is_valid_state_transition(drbd_dev* mdev,drbd_state_t ns,drbd_state_t os)
+STATIC int is_valid_state_transition(drbd_dev* mdev, drbd_state_t ns, drbd_state_t os)
 {
 	int rv=SS_Success;
 
@@ -592,7 +592,7 @@
 	return rv;
 }
 
-int _drbd_set_state(drbd_dev* mdev, drbd_state_t ns,enum chg_state_flags flags)
+int _drbd_set_state(drbd_dev* mdev, drbd_state_t ns, enum chg_state_flags flags)
 {
 	drbd_state_t os;
 	int rv=SS_Success, warn_sync_abort=0;
@@ -719,24 +719,24 @@
 		/*  pre-state-change checks ; only look at ns  */
 		/* See drbd_state_sw_errors in drbd_strings.c */
 
-		rv = is_valid_state(mdev,ns);
+		rv = is_valid_state(mdev, ns);
 		if (rv < SS_Success) {
 			/* If the old state was illegal as well, then let
 			   this happen...*/
 
-			if ( is_valid_state(mdev,os) == rv ) {
+			if ( is_valid_state(mdev, os) == rv ) {
 				ERR("Forcing state change from bad state. "
 				    "Error would be: '%s'\n",
 				    set_st_err_name(rv));
-				print_st(mdev,"old",os);
-				print_st(mdev,"new",ns);
+				print_st(mdev, "old", os);
+				print_st(mdev, "new", ns);
 				rv = SS_Success;
 			}
-		} else rv = is_valid_state_transition(mdev,ns,os);
+		} else rv = is_valid_state_transition(mdev, ns, os);
 	}
 
 	if (rv < SS_Success) {
-		if (flags & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
+		if (flags & ChgStateVerbose) print_st_err(mdev, os, ns, rv);
 		return rv;
 	}
 
@@ -746,7 +746,7 @@
 
 #if DUMP_MD >= 2
 	{
-	char *pbp,pb[300];
+	char *pbp, pb[300];
 	pbp = pb;
 	*pbp=0;
 	PSC(role);
@@ -768,8 +768,8 @@
 
 	/**   post-state-change actions   **/
 	if (os.conn >= SyncSource   && ns.conn <= Connected) {
-		set_bit(STOP_SYNC_TIMER,&mdev->flags);
-		mod_timer(&mdev->resync_timer,jiffies);
+		set_bit(STOP_SYNC_TIMER, &mdev->flags);
+		mod_timer(&mdev->resync_timer, jiffies);
 	}
 
 	if ( (os.conn == PausedSyncT || os.conn == PausedSyncS) &&
@@ -777,9 +777,9 @@
 		INFO("Syncer continues.\n");
 		mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
 		if (ns.conn == SyncTarget) {
-			D_ASSERT(!test_bit(STOP_SYNC_TIMER,&mdev->flags));
-			clear_bit(STOP_SYNC_TIMER,&mdev->flags);
-			mod_timer(&mdev->resync_timer,jiffies);
+			D_ASSERT(!test_bit(STOP_SYNC_TIMER, &mdev->flags));
+			clear_bit(STOP_SYNC_TIMER, &mdev->flags);
+			mod_timer(&mdev->resync_timer, jiffies);
 		}
 	}
 
@@ -788,7 +788,7 @@
 		INFO("Resync suspended\n");
 		mdev->rs_mark_time = jiffies;
 		if (ns.conn == PausedSyncT) {
-			set_bit(STOP_SYNC_TIMER,&mdev->flags);
+			set_bit(STOP_SYNC_TIMER, &mdev->flags);
 		}
 	}
 
@@ -808,7 +808,7 @@
 			ascw->ns = ns;
 			ascw->flags = flags;
 			ascw->w.cb = w_after_state_ch;
-			drbd_queue_work(&mdev->data.work,&ascw->w);
+			drbd_queue_work(&mdev->data.work, &ascw->w);
 		} else {
 			WARN("Could not kmalloc an ascw\n");
 		}
@@ -849,7 +849,7 @@
 					     MDF_ConnectedInd|MDF_WasUpToDate|
 					     MDF_PeerOutDated );
 
-		if (test_bit(CRASHED_PRIMARY,&mdev->flags) ||
+		if (test_bit(CRASHED_PRIMARY, &mdev->flags) ||
 		    mdev->state.role == Primary ||
 		    ( mdev->state.pdsk < Inconsistent &&
 		      mdev->state.peer == Primary ) )  mdf |= MDF_PrimaryInd;
@@ -878,7 +878,7 @@
 		     (os.conn < Connected && ns.conn >= Connected) ) {
 			tl_clear(mdev);
 			spin_lock_irq(&mdev->req_lock);
-			_drbd_set_state(_NS(mdev,susp,0),
+			_drbd_set_state(_NS(mdev, susp, 0),
 					ChgStateVerbose | ScheduleAfter );
 			spin_unlock_irq(&mdev->req_lock);
 		}
@@ -963,24 +963,24 @@
 
 		drbd_bm_lock(mdev); // racy...
 
-		drbd_md_set_flag(mdev,MDF_FullSync);
+		drbd_md_set_flag(mdev, MDF_FullSync);
 		drbd_md_sync(mdev);
 
 		drbd_bm_set_all(mdev);
 		drbd_bm_write(mdev);
 
-		drbd_md_clear_flag(mdev,MDF_FullSync);
+		drbd_md_clear_flag(mdev, MDF_FullSync);
 		drbd_md_sync(mdev);
 
 		drbd_bm_unlock(mdev);
 
 		if (ns.conn == StartingSyncT) {
 			spin_lock_irq(&mdev->req_lock);
-			_drbd_set_state(_NS(mdev,conn,WFSyncUUID),
+			_drbd_set_state(_NS(mdev, conn, WFSyncUUID),
 					ChgStateVerbose | ScheduleAfter );
 			spin_unlock_irq(&mdev->req_lock);
 		} else /* StartingSyncS */ {
-			drbd_start_resync(mdev,SyncSource);
+			drbd_start_resync(mdev, SyncSource);
 		}
 	}
 
@@ -989,13 +989,13 @@
 	       os.disk > Inconsistent && ns.disk == Inconsistent ) {
 		drbd_bm_lock(mdev); // racy...
 
-		drbd_md_set_flag(mdev,MDF_FullSync);
+		drbd_md_set_flag(mdev, MDF_FullSync);
 		drbd_md_sync(mdev);
 
 		drbd_bm_set_all(mdev);
 		drbd_bm_write(mdev);
 
-		drbd_md_clear_flag(mdev,MDF_FullSync);
+		drbd_md_clear_flag(mdev, MDF_FullSync);
 		drbd_md_sync(mdev);
 
 		drbd_bm_unlock(mdev);
@@ -1138,7 +1138,7 @@
 }
 
 
-void _drbd_thread_stop(struct Drbd_thread *thi, int restart,int wait)
+void _drbd_thread_stop(struct Drbd_thread *thi, int restart, int wait)
 {
 	drbd_dev *mdev = thi->mdev;
 	Drbd_thread_state ns = restart ? Restarting : Exiting;
@@ -1165,7 +1165,7 @@
 		smp_mb();
 		if (thi->task != current) {
 			if (wait) init_completion(&thi->startstop);
-			force_sig(DRBD_SIGKILL,thi->task);
+			force_sig(DRBD_SIGKILL, thi->task);
 		} else D_ASSERT(!wait);
 	}
 	spin_unlock(&thi->t_lock);
@@ -1190,7 +1190,7 @@
 	}
 
 	if (thi->task != current) {
-		force_sig(DRBD_SIGKILL,thi->task);
+		force_sig(DRBD_SIGKILL, thi->task);
 	}
 
 	spin_unlock(&thi->t_lock);
@@ -1201,7 +1201,7 @@
 			  Drbd_Packet_Cmd cmd, Drbd_Header *h,
 			  size_t size, unsigned msg_flags)
 {
-	int sent,ok;
+	int sent, ok;
 
 	ERR_IF(!h) return FALSE;
 	ERR_IF(!size) return FALSE;
@@ -1210,8 +1210,8 @@
 	h->command = cpu_to_be16(cmd);
 	h->length  = cpu_to_be16(size-sizeof(Drbd_Header));
 
-	dump_packet(mdev,sock,0,(void*)h, __FILE__, __LINE__);
-	sent = drbd_send(mdev,sock,h,size,msg_flags);
+	dump_packet(mdev, sock, 0, (void*)h, __FILE__, __LINE__);
+	sent = drbd_send(mdev, sock, h, size, msg_flags);
 
 	ok = ( sent == size );
 	if (!ok) {
@@ -1264,10 +1264,10 @@
 	if (!drbd_get_data_sock(mdev))
 		return 0;
 
-	dump_packet(mdev,mdev->data.socket,0,(void*)&h, __FILE__, __LINE__);
+	dump_packet(mdev, mdev->data.socket, 0, (void*)&h, __FILE__, __LINE__);
 
-	ok = ( sizeof(h) == drbd_send(mdev,mdev->data.socket,&h,sizeof(h),0) );
-	ok = ok && ( size == drbd_send(mdev,mdev->data.socket,data,size,0) );
+	ok = ( sizeof(h) == drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0) );
+	ok = ok && ( size == drbd_send(mdev, mdev->data.socket, data, size, 0) );
 
 	drbd_put_data_sock(mdev);
 
@@ -1280,7 +1280,7 @@
 
 	p.rate      = cpu_to_be32(sc->rate);
 
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,SyncParam,(Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, SyncParam, (Drbd_Header*)&p, sizeof(p));
 }
 
 int drbd_send_protocol(drbd_dev *mdev)
@@ -1294,8 +1294,8 @@
 	p.want_lose     = cpu_to_be32(mdev->net_conf->want_lose);
 	p.two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
 
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,ReportProtocol,
-			     (Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportProtocol,
+			     (Drbd_Header*)&p, sizeof(p));
 }
 
 int drbd_send_uuids(drbd_dev *mdev)
@@ -1304,7 +1304,7 @@
 	int i;
 	u64 uuid_flags = 0;
 
-	if (!inc_local_if_state(mdev,Negotiating)) return 1; // ok.
+	if (!inc_local_if_state(mdev, Negotiating)) return 1; // ok.
 
 	for (i = Current; i < UUID_SIZE; i++) {
 		/* FIXME howto handle diskless ? */
@@ -1321,8 +1321,8 @@
 
 	dec_local(mdev);
 
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,ReportUUIDs,
-			     (Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportUUIDs,
+			     (Drbd_Header*)&p, sizeof(p));
 }
 
 int drbd_send_sync_uuid(drbd_dev *mdev, u64 val)
@@ -1331,8 +1331,8 @@
 
 	p.uuid = cpu_to_be64(val);
 
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,ReportSyncUUID,
-			     (Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportSyncUUID,
+			     (Drbd_Header*)&p, sizeof(p));
 }
 
 int drbd_send_sizes(drbd_dev *mdev)
@@ -1342,7 +1342,7 @@
 	int q_order_type;
 	int ok;
 
-	if (inc_local_if_state(mdev,Negotiating)) {
+	if (inc_local_if_state(mdev, Negotiating)) {
 		D_ASSERT(mdev->bc->backing_bdev);
 		d_size = drbd_get_max_capacity(mdev->bc);
 		u_size = mdev->bc->dc.disk_size;
@@ -1361,8 +1361,8 @@
 	p.max_segment_size = cpu_to_be32(mdev->rq_queue->max_segment_size);
 	p.queue_order_type = cpu_to_be32(q_order_type);
 
-	ok = drbd_send_cmd(mdev,USE_DATA_SOCKET,ReportSizes,
-			   (Drbd_Header*)&p,sizeof(p));
+	ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportSizes,
+			   (Drbd_Header*)&p, sizeof(p));
 	return ok;
 }
 
@@ -1372,8 +1372,8 @@
 
 	p.state    = cpu_to_be32(mdev->state.i);
 
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,ReportState,
-			     (Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportState,
+			     (Drbd_Header*)&p, sizeof(p));
 }
 
 STATIC int drbd_send_state_req(drbd_dev *mdev, drbd_state_t mask, drbd_state_t val)
@@ -1383,8 +1383,8 @@
 	p.mask    = cpu_to_be32(mask.i);
 	p.val     = cpu_to_be32(val.i);
 
-	return drbd_send_cmd(mdev,USE_DATA_SOCKET,StateChgRequest,
-			     (Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_DATA_SOCKET, StateChgRequest,
+			     (Drbd_Header*)&p, sizeof(p));
 }
 
 int drbd_send_sr_reply(drbd_dev *mdev, int retcode)
@@ -1393,8 +1393,8 @@
 
 	p.retcode    = cpu_to_be32(retcode);
 
-	return drbd_send_cmd(mdev,USE_META_SOCKET,StateChgReply,
-			     (Drbd_Header*)&p,sizeof(p));
+	return drbd_send_cmd(mdev, USE_META_SOCKET, StateChgReply,
+			     (Drbd_Header*)&p, sizeof(p));
 }
 
 
@@ -1413,7 +1413,7 @@
 	p  = vmalloc(PAGE_SIZE); // sleeps. cannot fail.
 	buffer = (unsigned long*)p->payload;
 
-	if (drbd_md_test_flag(mdev->bc,MDF_FullSync)) {
+	if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
 		drbd_bm_set_all(mdev);
 		drbd_bm_write(mdev);
 		if (unlikely(mdev->state.disk <= Failed )) {
@@ -1423,7 +1423,7 @@
 			ERR("Failed to write bitmap to disk!\n");
 		}
 		else {
-			drbd_md_clear_flag(mdev,MDF_FullSync);
+			drbd_md_clear_flag(mdev, MDF_FullSync);
 			drbd_md_sync(mdev);
 		}
 	}
@@ -1438,7 +1438,7 @@
 		if (want) {
 			drbd_bm_get_lel(mdev, bm_i, num_words, buffer);
 		}
-		ok = _drbd_send_cmd(mdev,mdev->data.socket,ReportBitMap,
+		ok = _drbd_send_cmd(mdev, mdev->data.socket, ReportBitMap,
 				   p, sizeof(*p) + want, 0);
 		bm_i += num_words;
 	} while (ok && want);
@@ -1458,7 +1458,7 @@
 	return ok;
 }
 
-int drbd_send_b_ack(drbd_dev *mdev, u32 barrier_nr,u32 set_size)
+int drbd_send_b_ack(drbd_dev *mdev, u32 barrier_nr, u32 set_size)
 {
 	int ok;
 	Drbd_BarrierAck_Packet p;
@@ -1466,7 +1466,7 @@
 	p.barrier  = barrier_nr;
 	p.set_size = cpu_to_be32(set_size);
 
-	ok = drbd_send_cmd(mdev,USE_META_SOCKET,BarrierAck,(Drbd_Header*)&p,sizeof(p));
+	ok = drbd_send_cmd(mdev, USE_META_SOCKET, BarrierAck, (Drbd_Header*)&p, sizeof(p));
 	return ok;
 }
 
@@ -1486,10 +1486,10 @@
 	p.sector   = sector;
 	p.block_id = block_id;
 	p.blksize  = blksize;
-	p.seq_num  = cpu_to_be32(atomic_add_return(1,&mdev->packet_seq));
+	p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
 
 	if (!mdev->meta.socket || mdev->state.conn < Connected) return FALSE;
-	ok=drbd_send_cmd(mdev,USE_META_SOCKET,cmd,(Drbd_Header*)&p,sizeof(p));
+	ok=drbd_send_cmd(mdev, USE_META_SOCKET, cmd, (Drbd_Header*)&p, sizeof(p));
 	return ok;
 }
 
@@ -1499,26 +1499,26 @@
 	const int header_size = sizeof(Drbd_Data_Packet) - sizeof(Drbd_Header);
 	int data_size  = ((Drbd_Header*)dp)->length - header_size;
 
-	return _drbd_send_ack(mdev,cmd,dp->sector,cpu_to_be32(data_size),
+	return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
 			      dp->block_id);
 }
 
 int drbd_send_ack_rp(drbd_dev *mdev, Drbd_Packet_Cmd cmd,
 		     Drbd_BlockRequest_Packet *rp)
 {
-	return _drbd_send_ack(mdev,cmd,rp->sector,rp->blksize,rp->block_id);
+	return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
 }
 
 int drbd_send_ack(drbd_dev *mdev, Drbd_Packet_Cmd cmd, struct Tl_epoch_entry *e)
 {
-	return _drbd_send_ack(mdev,cmd,
+	return _drbd_send_ack(mdev, cmd,
 			      cpu_to_be64(e->sector),
 			      cpu_to_be32(e->size),
 			      e->block_id);
 }
 
 int drbd_send_drequest(drbd_dev *mdev, int cmd,
-		       sector_t sector,int size, u64 block_id)
+		       sector_t sector, int size, u64 block_id)
 {
 	int ok;
 	Drbd_BlockRequest_Packet p;
@@ -1529,7 +1529,7 @@
 
 	/* FIXME BIO_RW_SYNC ? */
 
-	ok = drbd_send_cmd(mdev,USE_DATA_SOCKET,cmd,(Drbd_Header*)&p,sizeof(p));
+	ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, (Drbd_Header*)&p, sizeof(p));
 	return ok;
 }
 
@@ -1594,7 +1594,7 @@
 		    int offset, size_t size)
 {
 	mm_segment_t oldfs = get_fs();
-	int sent,ok;
+	int sent, ok;
 	int len   = size;
 
 #ifdef SHOW_SENDPAGE_USAGE
@@ -1634,8 +1634,8 @@
 
 	set_fs(KERNEL_DS);
 	do {
-		sent = mdev->data.socket->ops->sendpage(mdev->data.socket,page,
-							offset,len,
+		sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
+							offset, len,
 							MSG_NOSIGNAL);
 		if (sent == -EAGAIN) {
 			if (we_should_drop_the_connection(mdev,
@@ -1646,7 +1646,7 @@
 		}
 		if (sent <= 0) {
 			WARN("%s: size=%d len=%d sent=%d\n",
-			     __func__,(int)size,len,sent);
+			     __func__, (int)size, len, sent);
 			break;
 		}
 		len    -= sent;
@@ -1695,7 +1695,7 @@
 	p.sector   = cpu_to_be64(req->sector);
 	p.block_id = (unsigned long)req;
 	p.seq_num  = cpu_to_be32( req->seq_num =
-				  atomic_add_return(1,&mdev->packet_seq) );
+				  atomic_add_return(1, &mdev->packet_seq) );
 	dp_flags = 0;
 	if (req->master_bio->bi_rw & BIO_RW_BARRIER) {
 		dp_flags |= DP_HARDBARRIER;
@@ -1709,14 +1709,14 @@
 	}
 
 	p.dp_flags = cpu_to_be32(dp_flags);
-	dump_packet(mdev,mdev->data.socket,0,(void*)&p, __FILE__, __LINE__);
-	set_bit(UNPLUG_REMOTE,&mdev->flags);
-	ok = sizeof(p) == drbd_send(mdev,mdev->data.socket,&p,sizeof(p),MSG_MORE);
+	dump_packet(mdev, mdev->data.socket, 0, (void*)&p, __FILE__, __LINE__);
+	set_bit(UNPLUG_REMOTE, &mdev->flags);
+	ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE);
 	if (ok) {
 		if (mdev->net_conf->wire_protocol == DRBD_PROT_A) {
-			ok = _drbd_send_bio(mdev,req->master_bio);
+			ok = _drbd_send_bio(mdev, req->master_bio);
 		} else {
-			ok = _drbd_send_zc_bio(mdev,req->master_bio);
+			ok = _drbd_send_zc_bio(mdev, req->master_bio);
 		}
 	}
 
@@ -1749,9 +1749,9 @@
 	if (!drbd_get_data_sock(mdev))
 		return 0;
 
-	dump_packet(mdev,mdev->data.socket,0,(void*)&p, __FILE__, __LINE__);
-	ok = sizeof(p) == drbd_send(mdev,mdev->data.socket,&p,sizeof(p),MSG_MORE);
-	if (ok) ok = _drbd_send_zc_bio(mdev,e->private_bio);
+	dump_packet(mdev, mdev->data.socket, 0, (void*)&p, __FILE__, __LINE__);
+	ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE);
+	if (ok) ok = _drbd_send_zc_bio(mdev, e->private_bio);
 
 	drbd_put_data_sock(mdev);
 	return ok;
@@ -1783,7 +1783,7 @@
 	struct kvec iov;
 #endif
 	struct msghdr msg;
-	int rv,sent=0;
+	int rv, sent=0;
 
 	if (!sock) return -1000;
 
@@ -1825,7 +1825,7 @@
 		rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
 #endif
 		if (rv == -EAGAIN) {
-			if (we_should_drop_the_connection(mdev,sock))
+			if (we_should_drop_the_connection(mdev, sock))
 				break;
 			else
 				continue;
@@ -1837,7 +1837,7 @@
 			 * we don't care for now!
 			 * eventually this should be sorted out be the proper
 			 * use of the SIGNAL_ASENDER bit... */
-			if (DRBD_ratelimit(5*HZ,5)) {
+			if (DRBD_ratelimit(5*HZ, 5)) {
 				DBG("Got a signal in drbd_send(,%c,)!\n",
 				    sock == mdev->meta.socket ? 'm' : 's');
 				// dump_stack();
@@ -1861,9 +1861,9 @@
 			ERR("%s_sendmsg returned %d\n",
 			    sock == mdev->meta.socket ? "msock" : "sock",
 			    rv);
-			drbd_force_state(mdev, NS(conn,BrokenPipe));
+			drbd_force_state(mdev, NS(conn, BrokenPipe));
 		} else
-			drbd_force_state(mdev, NS(conn,Timeout));
+			drbd_force_state(mdev, NS(conn, Timeout));
 	}
 
 	return sent;
@@ -1878,7 +1878,7 @@
 	mdev = minor_to_mdev(MINOR(inode->i_rdev));
 	if (!mdev) return -ENODEV;
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	/* to have a stable mdev->state.role and no race with updating open_cnt */
 
 	if (mdev->state.role != Primary) {
@@ -1890,7 +1890,7 @@
 	}
 
 	if (!rv) mdev->open_cnt++;
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 	return rv;
 }
@@ -1918,7 +1918,7 @@
 {
 	drbd_dev *mdev = q->queuedata;
 
-	MTRACE(TraceTypeUnplug,TraceLvlSummary,
+	MTRACE(TraceTypeUnplug, TraceLvlSummary,
 	       INFO("got unplugged ap_bio_count=%d\n",
 		    atomic_read(&mdev->ap_bio_cnt));
 	       );
@@ -1932,13 +1932,13 @@
 	spin_lock_irq(&mdev->req_lock);
 	if (mdev->state.pdsk >= Inconsistent && mdev->state.conn >= Connected) {
 		D_ASSERT(mdev->state.role == Primary);
-		if (test_and_clear_bit(UNPLUG_REMOTE,&mdev->flags)) {
+		if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
 			/* add to the data.work queue,
 			 * unless already queued.
 			 * XXX this might be a good addition to drbd_queue_work
 			 * anyways, to detect "double queuing" ... */
 			if (list_empty(&mdev->unplug_work.list))
-				drbd_queue_work(&mdev->data.work,&mdev->unplug_work);
+				drbd_queue_work(&mdev->data.work, &mdev->unplug_work);
 		}
 	}
 	spin_unlock_irq(&mdev->req_lock);
@@ -1970,20 +1970,20 @@
 
 	drbd_set_defaults(mdev);
 
-	atomic_set(&mdev->ap_bio_cnt,0);
-	atomic_set(&mdev->ap_pending_cnt,0);
-	atomic_set(&mdev->rs_pending_cnt,0);
-	atomic_set(&mdev->unacked_cnt,0);
-	atomic_set(&mdev->local_cnt,0);
-	atomic_set(&mdev->net_cnt,0);
-	atomic_set(&mdev->packet_seq,0);
+	atomic_set(&mdev->ap_bio_cnt, 0);
+	atomic_set(&mdev->ap_pending_cnt, 0);
+	atomic_set(&mdev->rs_pending_cnt, 0);
+	atomic_set(&mdev->unacked_cnt, 0);
+	atomic_set(&mdev->local_cnt, 0);
+	atomic_set(&mdev->net_cnt, 0);
+	atomic_set(&mdev->packet_seq, 0);
 	atomic_set(&mdev->pp_in_use, 0);
 
 	init_MUTEX(&mdev->md_io_mutex);
 	init_MUTEX(&mdev->data.mutex);
 	init_MUTEX(&mdev->meta.mutex);
-	sema_init(&mdev->data.work.s,0);
-	sema_init(&mdev->meta.work.s,0);
+	sema_init(&mdev->data.work.s, 0);
+	sema_init(&mdev->meta.work.s, 0);
 
 	spin_lock_init(&mdev->data.work.q_lock);
 	spin_lock_init(&mdev->meta.work.q_lock);
@@ -2024,7 +2024,7 @@
 	drbd_thread_init(mdev, &mdev->asender, drbd_asender);
 
 #ifdef __arch_um__
-	INFO("mdev = 0x%p\n",mdev);
+	INFO("mdev = 0x%p\n", mdev);
 #endif
 }
 
@@ -2074,7 +2074,7 @@
 
 	/* no need to lock it, I'm the only thread alive */
 	if (mdev->epoch_size !=  0)
-		ERR("epoch_size:%d\n",mdev->epoch_size);
+		ERR("epoch_size:%d\n", mdev->epoch_size);
 	mdev->al_writ_cnt  =
 	mdev->bm_writ_cnt  =
 	mdev->read_cnt     =
@@ -2088,8 +2088,8 @@
 	mdev->rs_mark_left =
 	mdev->rs_mark_time = 0;
 	D_ASSERT(mdev->net_conf == NULL);
-	drbd_set_my_capacity(mdev,0);
-	drbd_bm_resize(mdev,0);
+	drbd_set_my_capacity(mdev, 0);
+	drbd_bm_resize(mdev, 0);
 
 	// just in case
 	drbd_free_resources(mdev);
@@ -2180,7 +2180,7 @@
 	for (i=0;i< number;i++) {
 		page = alloc_page(GFP_HIGHUSER);
 		if (!page) goto Enomem;
-		set_page_private(page,(unsigned long)drbd_pp_pool);
+		set_page_private(page, (unsigned long)drbd_pp_pool);
 		drbd_pp_pool = page;
 	}
 	drbd_pp_vacant = number;
@@ -2217,7 +2217,7 @@
 
 	if (minor_table) {
 		if (drbd_proc)
-			remove_proc_entry("drbd",&proc_root);
+			remove_proc_entry("drbd", &proc_root);
 		i=minor_count;
 		while (i--) {
 			drbd_dev        *mdev  = minor_to_mdev(i);
@@ -2242,24 +2242,24 @@
 			if (mdev->bitmap) drbd_bm_cleanup(mdev);
 			if (mdev->resync) lc_free(mdev->resync);
 
-			rr = drbd_release_ee(mdev,&mdev->active_ee);
-			if (rr) ERR("%d EEs in active list found!\n",rr);
+			rr = drbd_release_ee(mdev, &mdev->active_ee);
+			if (rr) ERR("%d EEs in active list found!\n", rr);
 
-			rr = drbd_release_ee(mdev,&mdev->sync_ee);
-			if (rr) ERR("%d EEs in sync list found!\n",rr);
+			rr = drbd_release_ee(mdev, &mdev->sync_ee);
+			if (rr) ERR("%d EEs in sync list found!\n", rr);
 
-			rr = drbd_release_ee(mdev,&mdev->read_ee);
-			if (rr) ERR("%d EEs in read list found!\n",rr);
+			rr = drbd_release_ee(mdev, &mdev->read_ee);
+			if (rr) ERR("%d EEs in read list found!\n", rr);
 
-			rr = drbd_release_ee(mdev,&mdev->done_ee);
-			if (rr) ERR("%d EEs in done list found!\n",rr);
+			rr = drbd_release_ee(mdev, &mdev->done_ee);
+			if (rr) ERR("%d EEs in done list found!\n", rr);
 
-			rr = drbd_release_ee(mdev,&mdev->net_ee);
-			if (rr) ERR("%d EEs in net list found!\n",rr);
+			rr = drbd_release_ee(mdev, &mdev->net_ee);
+			if (rr) ERR("%d EEs in net list found!\n", rr);
 
 			ERR_IF (!list_empty(&mdev->data.work.q)) {
 				struct list_head *lp;
-				list_for_each(lp,&mdev->data.work.q) {
+				list_for_each(lp, &mdev->data.work.q) {
 					DUMPP(lp);
 				}
 			};
@@ -2308,7 +2308,7 @@
 	struct gendisk *disk;
 	request_queue_t *q;
 
-	mdev = kzalloc(sizeof(drbd_dev),GFP_KERNEL);
+	mdev = kzalloc(sizeof(drbd_dev), GFP_KERNEL);
 	if (!mdev) goto Enomem;
 
 	mdev->minor = minor;
@@ -2335,7 +2335,7 @@
 	disk->private_data = mdev;
 	add_disk(disk);
 
-	mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR,minor));
+	mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
 	// we have no partitions. we contain only ourselves.
 	mdev->this_bdev->bd_contains = mdev->this_bdev;
 
@@ -2352,7 +2352,7 @@
 	// no need to lock access, we are still initializing the module.
 	if (!tl_init(mdev)) goto Enomem;
 
-	mdev->app_reads_hash=kzalloc(APP_R_HSIZE*sizeof(void*),GFP_KERNEL);
+	mdev->app_reads_hash=kzalloc(APP_R_HSIZE*sizeof(void*), GFP_KERNEL);
 	if (!mdev->app_reads_hash) goto Enomem;
 
 	return mdev;
@@ -2370,41 +2370,9 @@
 {
 	int err;
 
-#if 0
-// warning LGE "DEBUGGING"
-/* I am too lazy to calculate this by hand	-lge
- */
-#define SZO(x) printk(KERN_ERR "sizeof(" #x ") = %d\n", sizeof(x))
-	SZO(struct Drbd_Conf);
-	SZO(struct buffer_head);
-	SZO(Drbd_Polymorph_Packet);
-	SZO(struct drbd_socket);
-	SZO(struct bm_extent);
-	SZO(struct lc_element);
-	SZO(struct semaphore);
-	SZO(struct drbd_request);
-	SZO(struct bio);
-	SZO(wait_queue_head_t);
-	SZO(spinlock_t);
-	SZO(Drbd_Header);
-	SZO(Drbd_HandShake_Packet);
-	SZO(Drbd_Barrier_Packet);
-	SZO(Drbd_BarrierAck_Packet);
-	SZO(Drbd_SyncParam_Packet);
-	SZO(Drbd06_Parameter_P);
-	SZO(Drbd_Data_Packet);
-	SZO(Drbd_BlockAck_Packet);
-	printk(KERN_ERR "AL_EXTENTS_PT = %d\n",AL_EXTENTS_PT);
-	printk(KERN_ERR "DRBD_MAX_SECTORS = %llu\n",DRBD_MAX_SECTORS);
-	printk(KERN_ERR "DRBD_MAX_SECTORS_FLEX = %llu\n",DRBD_MAX_SECTORS_FLEX);
-#define OOF(t,m) printk(KERN_ERR "offsetof("#t","#m") = %d\n", offsetof(t,m))
-	OOF(struct Drbd_Conf,bitmap);
-	//OOF(struct drbd_bitmap,bm_set);
-	return -EBUSY;
-#endif
 #ifdef __arch_um__
 	printk(KERN_INFO "drbd_module = 0x%p core = 0x%p\n",
-	       THIS_MODULE,THIS_MODULE->module_core);
+	       THIS_MODULE, THIS_MODULE->module_core);
 #endif
 
 	if (sizeof(Drbd_HandShake_Packet) != 80) {
@@ -2415,7 +2383,7 @@
 
 	if (1 > minor_count||minor_count > 255) {
 		printk(KERN_ERR DEVICE_NAME
-			": invalid minor_count (%d)\n",minor_count);
+			": invalid minor_count (%d)\n", minor_count);
 #ifdef MODULE
 		return -EINVAL;
 #else
@@ -2445,7 +2413,7 @@
 	init_waitqueue_head(&drbd_pp_wait);
 
 	drbd_proc = NULL; // play safe for drbd_cleanup
-	minor_table = kzalloc(sizeof(drbd_dev *)*minor_count,GFP_KERNEL);
+	minor_table = kzalloc(sizeof(drbd_dev *)*minor_count, GFP_KERNEL);
 	if (!minor_table) goto Enomem;
 
 	if ((err = drbd_create_mempools()))
@@ -2470,7 +2438,7 @@
 
 	printk(KERN_INFO DEVICE_NAME ": initialised. "
 	       "Version: " REL_VERSION " (api:%d/proto:%d)\n",
-	       API_VERSION,PRO_VERSION);
+	       API_VERSION, PRO_VERSION);
 	printk(KERN_INFO DEVICE_NAME ": %s\n", drbd_buildtag());
 	printk(KERN_INFO DEVICE_NAME": registered as block device major %d\n", DRBD_MAJOR);
 	printk(KERN_INFO DEVICE_NAME": minor_table @ 0x%p\n", minor_table);
@@ -2553,18 +2521,18 @@
 	sector_t sector;
 	int i;
 
-	if (!test_and_clear_bit(MD_DIRTY,&mdev->flags)) return;
+	if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) return;
 	del_timer(&mdev->md_sync_timer);
 
 	// We use here Failed and not Attaching because we try to write
 	// metadata even if we detach due to a disk failure!
-	if (!inc_local_if_state(mdev,Failed)) return;
+	if (!inc_local_if_state(mdev, Failed)) return;
 
 	INFO("Writing meta data super block now.\n");
 
 	down(&mdev->md_io_mutex);
 	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
-	memset(buffer,0,512);
+	memset(buffer, 0, 512);
 
 	buffer->la_size=cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
 	for (i = Current; i < UUID_SIZE; i++)
@@ -2580,20 +2548,20 @@
 
 	buffer->bm_offset = cpu_to_be32(mdev->bc->md.bm_offset);
 
-	D_ASSERT(drbd_md_ss__(mdev,mdev->bc) == mdev->bc->md.md_offset);
+	D_ASSERT(drbd_md_ss__(mdev, mdev->bc) == mdev->bc->md.md_offset);
 	sector = mdev->bc->md.md_offset;
 
 #if 0
 	/* FIXME sooner or later I'd like to use the MD_DIRTY flag everywhere,
 	 * so we can avoid unneccessary md writes.
 	 */
-	ERR_IF (!test_bit(MD_DIRTY,&mdev->flags)) {
+	ERR_IF (!test_bit(MD_DIRTY, &mdev->flags)) {
 		dump_stack();
 	}
 #endif
 
-	if (drbd_md_sync_page_io(mdev,mdev->bc,sector,WRITE)) {
-		clear_bit(MD_DIRTY,&mdev->flags);
+	if (drbd_md_sync_page_io(mdev, mdev->bc, sector, WRITE)) {
+		clear_bit(MD_DIRTY, &mdev->flags);
 	} else {
 		/* this was a try anyways ... */
 		ERR("meta data update failed!\n");
@@ -2619,14 +2587,14 @@
 int drbd_md_read(drbd_dev *mdev, struct drbd_backing_dev *bdev)
 {
 	struct meta_data_on_disk * buffer;
-	int i,rv = NoError;
+	int i, rv = NoError;
 
-	if (!inc_local_if_state(mdev,Attaching)) return MDIOError;
+	if (!inc_local_if_state(mdev, Attaching)) return MDIOError;
 
 	down(&mdev->md_io_mutex);
 	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
 
-	if ( ! drbd_md_sync_page_io(mdev,bdev,bdev->md.md_offset,READ) ) {
+	if ( ! drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ) ) {
 		/* NOTE: cant do normal error processing here as this is
 		   called BEFORE disk is attached */
 		ERR("Error while reading metadata.\n");
@@ -2693,8 +2661,8 @@
  */
 void drbd_md_mark_dirty(drbd_dev *mdev)
 {
-	set_bit(MD_DIRTY,&mdev->flags);
-	mod_timer(&mdev->md_sync_timer,jiffies + 5*HZ );
+	set_bit(MD_DIRTY, &mdev->flags);
+	mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ );
 }
 
 
@@ -2705,8 +2673,8 @@
 	for ( i=History_start ; i<History_end ; i++ ) {
 		mdev->bc->md.uuid[i+1] = mdev->bc->md.uuid[i];
 
-		MTRACE(TraceTypeUuid,TraceLvlAll,
-		       drbd_print_uuid(mdev,i+1);
+		MTRACE(TraceTypeUuid, TraceLvlAll,
+		       drbd_print_uuid(mdev, i+1);
 			);
 	}
 }
@@ -2723,8 +2691,8 @@
 
 	mdev->bc->md.uuid[idx] = val;
 
-	MTRACE(TraceTypeUuid,TraceLvlSummary,
-	       drbd_print_uuid(mdev,idx);
+	MTRACE(TraceTypeUuid, TraceLvlSummary,
+	       drbd_print_uuid(mdev, idx);
 		);
 
 	drbd_md_mark_dirty(mdev);
@@ -2736,11 +2704,11 @@
 	if (mdev->bc->md.uuid[idx]) {
 		drbd_uuid_move_history(mdev);
 		mdev->bc->md.uuid[History_start]=mdev->bc->md.uuid[idx];
-		MTRACE(TraceTypeUuid,TraceLvlMetrics,
-		       drbd_print_uuid(mdev,History_start);
+		MTRACE(TraceTypeUuid, TraceLvlMetrics,
+		       drbd_print_uuid(mdev, History_start);
 			);
 	}
-	_drbd_uuid_set(mdev,idx,val);
+	_drbd_uuid_set(mdev, idx, val);
 }
 
 void drbd_uuid_new_current(drbd_dev *mdev)
@@ -2748,8 +2716,8 @@
 	INFO("Creating new current UUID\n");
 	D_ASSERT(mdev->bc->md.uuid[Bitmap] == 0);
 	mdev->bc->md.uuid[Bitmap] = mdev->bc->md.uuid[Current];
-	MTRACE(TraceTypeUuid,TraceLvlMetrics,
-	       drbd_print_uuid(mdev,Bitmap);
+	MTRACE(TraceTypeUuid, TraceLvlMetrics,
+	       drbd_print_uuid(mdev, Bitmap);
 		);
 
 	get_random_bytes(&mdev->bc->md.uuid[Current], sizeof(u64));
@@ -2759,8 +2727,8 @@
 		mdev->bc->md.uuid[Current] &= ~((u64)1);
 	}
 
-	MTRACE(TraceTypeUuid,TraceLvlSummary,
-	       drbd_print_uuid(mdev,Current);
+	MTRACE(TraceTypeUuid, TraceLvlSummary,
+	       drbd_print_uuid(mdev, Current);
 		);
 
 	drbd_md_mark_dirty(mdev);
@@ -2775,9 +2743,9 @@
 		mdev->bc->md.uuid[History_start]=mdev->bc->md.uuid[Bitmap];
 		mdev->bc->md.uuid[Bitmap]=0;
 
-		MTRACE(TraceTypeUuid,TraceLvlMetrics,
-		       drbd_print_uuid(mdev,History_start);
-		       drbd_print_uuid(mdev,Bitmap);
+		MTRACE(TraceTypeUuid, TraceLvlMetrics,
+		       drbd_print_uuid(mdev, History_start);
+		       drbd_print_uuid(mdev, Bitmap);
 			);
 	} else {
 		if (mdev->bc->md.uuid[Bitmap]) WARN("bm UUID already set");
@@ -2785,8 +2753,8 @@
 		mdev->bc->md.uuid[Bitmap] = val;
 		mdev->bc->md.uuid[Bitmap] &= ~((u64)1);
 
-		MTRACE(TraceTypeUuid,TraceLvlMetrics,
-		       drbd_print_uuid(mdev,Bitmap);
+		MTRACE(TraceTypeUuid, TraceLvlMetrics,
+		       drbd_print_uuid(mdev, Bitmap);
 			);
 	}
 	drbd_md_mark_dirty(mdev);
@@ -2818,7 +2786,7 @@
 {
 	drbd_dev* mdev = (drbd_dev*) data;
 
-	drbd_queue_work_front(&mdev->data.work,&mdev->md_sync_work);
+	drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
 }
 
 STATIC int w_md_sync(drbd_dev *mdev, struct drbd_work *w, int unused)
@@ -2877,7 +2845,7 @@
 unsigned int
 _drbd_insert_fault(drbd_dev *mdev, unsigned int type)
 {
-	static struct fault_random_state rrs = {0,0};
+	static struct fault_random_state rrs = {0, 0};
 
 	unsigned int ret = (
 		(fault_devs == 0 || ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
@@ -2912,7 +2880,7 @@
 /* Pretty print a UUID value */
 void
 drbd_print_uuid(drbd_dev *mdev, unsigned int idx) {
-	INFO(" uuid[%s] now %016llX\n",_drbd_uuid_str(idx),mdev->bc->md.uuid[idx]);
+	INFO(" uuid[%s] now %016llX\n", _drbd_uuid_str(idx), mdev->bc->md.uuid[idx]);
 }
 
 
@@ -2942,8 +2910,8 @@
 
 */
 void
-drbd_print_buffer(const char *prefix,unsigned int flags,int size,
-		  const void *buffer,const void *buffer_va,
+drbd_print_buffer(const char *prefix, unsigned int flags, int size,
+		  const void *buffer, const void *buffer_va,
 		  unsigned int length)
 
 #define LINE_SIZE       16
@@ -2952,8 +2920,8 @@
 	const unsigned char *pstart;
 	const unsigned char *pstart_va;
 	const unsigned char *pend;
-	char bytes_str[LINE_SIZE*3+8],ascii_str[LINE_SIZE+8];
-	char *pbytes=bytes_str,*pascii=ascii_str;
+	char bytes_str[LINE_SIZE*3+8], ascii_str[LINE_SIZE+8];
+	char *pbytes=bytes_str, *pascii=ascii_str;
 	int  offset=0;
 	long sizemask;
 	int  field_width;
@@ -3001,8 +2969,8 @@
 	while (p < pend_str) {
 		if (p < (const unsigned char *)buffer || p >= pend) {
 			// Before start of buffer or after end- print spaces
-			pbytes += sprintf(pbytes,"%*c ",field_width,' ');
-			pascii += sprintf(pascii,"%*c",size,' ');
+			pbytes += sprintf(pbytes, "%*c ", field_width, ' ');
+			pascii += sprintf(pascii, "%*c", size, ' ');
 			p += size;
 		}
 		else {
@@ -3021,7 +2989,7 @@
 				break;
 			}
 
-			pbytes += sprintf(pbytes,"%0*x ",field_width,val);
+			pbytes += sprintf(pbytes, "%0*x ", field_width, val);
 
 			for (index = size; index; index--) {
 				*pascii++ = isprint(*p) ? *p : '.';
@@ -3038,8 +3006,8 @@
 			       prefix,
 			       (flags & DBGPRINT_BUFFADDR)
 			       ? (long)pstart_va : (long)offset,
-			       LINE_ENTRIES*(field_width+1),bytes_str,
-			       LINE_SIZE,ascii_str);
+			       LINE_ENTRIES*(field_width+1), bytes_str,
+			       LINE_SIZE, ascii_str);
 
 			// Move onto next line
 			pstart_va += (p-pstart);
@@ -3096,9 +3064,9 @@
 
 char *_dump_block_id(u64 block_id, char *buff) {
     if (is_syncer_block_id(block_id))
-	strcpy(buff,"SyncerId");
+	strcpy(buff, "SyncerId");
     else
-	sprintf(buff,"%llx",block_id);
+	sprintf(buff, "%llx", block_id);
 
     return buff;
 }
@@ -3110,7 +3078,7 @@
 	char *sockname = sock == mdev->meta.socket ? "meta" : "data";
 	int cmd = (recv == 2) ? p->head.command : be16_to_cpu(p->head.command);
 	char tmp[300];
-	drbd_state_t m,v;
+	drbd_state_t m, v;
 
 	switch (cmd) {
 	case HandShake:
@@ -3123,7 +3091,7 @@
 	case Data:
 		INFOP("%s (sector %llus, id %s, seq %u, f %x)\n", cmdname(cmd),
 		      (unsigned long long)be64_to_cpu(p->Data.sector),
-		      _dump_block_id(p->Data.block_id,tmp),
+		      _dump_block_id(p->Data.block_id, tmp),
 		      be32_to_cpu(p->Data.seq_num),
 		      be32_to_cpu(p->Data.dp_flags)
 			);
@@ -3133,7 +3101,7 @@
 	case RSDataReply:
 		INFOP("%s (sector %llus, id %s)\n", cmdname(cmd),
 		      (unsigned long long)be64_to_cpu(p->Data.sector),
-		      _dump_block_id(p->Data.block_id,tmp)
+		      _dump_block_id(p->Data.block_id, tmp)
 			);
 		break;
 
@@ -3146,7 +3114,7 @@
 		INFOP("%s (sector %llus, size %u, id %s, seq %u)\n", cmdname(cmd),
 		      (long long)be64_to_cpu(p->BlockAck.sector),
 		      be32_to_cpu(p->BlockAck.blksize),
-		      _dump_block_id(p->BlockAck.block_id,tmp),
+		      _dump_block_id(p->BlockAck.block_id, tmp),
 		      be32_to_cpu(p->BlockAck.seq_num)
 			);
 		break;
@@ -3156,7 +3124,7 @@
 		INFOP("%s (sector %llus, size %u, id %s)\n", cmdname(cmd),
 		      (long long)be64_to_cpu(p->BlockRequest.sector),
 		      be32_to_cpu(p->BlockRequest.blksize),
-		      _dump_block_id(p->BlockRequest.block_id,tmp)
+		      _dump_block_id(p->BlockRequest.block_id, tmp)
 			);
 		break;
 
@@ -3185,14 +3153,14 @@
 	case ReportState:
 		v.i = be32_to_cpu(p->State.state);
 		m.i = 0xffffffff;
-		dump_st(tmp,sizeof(tmp),m,v);
+		dump_st(tmp, sizeof(tmp), m, v);
 		INFOP("%s (s %x {%s})\n", cmdname(cmd), v.i, tmp);
 		break;
 
 	case StateChgRequest:
 		m.i = be32_to_cpu(p->ReqState.mask);
 		v.i = be32_to_cpu(p->ReqState.val);
-		dump_st(tmp,sizeof(tmp),m,v);
+		dump_st(tmp, sizeof(tmp), m, v);
 		INFOP("%s (m %x v %x {%s})\n", cmdname(cmd), m.i, v.i, tmp);
 		break;
 
@@ -3210,7 +3178,7 @@
 			break;
 		/* fall through... */
 	default:
-		INFOP("%s (%u)\n",cmdname(cmd), cmd);
+		INFOP("%s (%u)\n", cmdname(cmd), cmd);
 		break;
 	}
 }
@@ -3233,7 +3201,7 @@
 
 	INFO("%s %s Bio:%p - %soffset " SECTOR_FORMAT ", size %x\n",
 	     complete? "<<<":">>>",
-	     bio_rw(bio)==WRITE?"Write":"Read",bio,
+	     bio_rw(bio)==WRITE?"Write":"Read", bio,
 	     complete? (drbd_bio_uptodate(bio)? "Success, ":"Failed, ") : "",
 	     bio->bi_sector << SECTOR_SHIFT,
 	     bio->bi_size);
@@ -3242,7 +3210,7 @@
 	    ((bio_rw(bio) == WRITE) ^ complete) ) {
 		printk(KERN_DEBUG "  ind     page   offset   length\n");
 		__bio_for_each_segment(bvec, bio, segno, 0) {
-			printk(KERN_DEBUG "  [%d] %p %8.8x %8.8x\n",segno,
+			printk(KERN_DEBUG "  [%d] %p %8.8x %8.8x\n", segno,
 			       bvec->bv_page, bvec->bv_offset, bvec->bv_len);
 
 			if (trace_level >= TraceLvlAll) {
@@ -3251,7 +3219,7 @@
 
 				bvec_buf = bvec_kmap_irq(bvec, &flags);
 
-				drbd_print_buffer("    ",DBGPRINT_BUFFADDR,1,
+				drbd_print_buffer("    ", DBGPRINT_BUFFADDR, 1,
 						  bvec_buf,
 						  faddr,
 						  (bvec->bv_len <= 0x80)? bvec->bv_len : 0x80);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_nl.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_nl.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_nl.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -58,7 +58,7 @@
 		fields \
 		default: \
 			if (tag & T_MANDATORY) { \
-				ERR("Unknown tag: %d\n",tag_number(tag)); \
+				ERR("Unknown tag: %d\n", tag_number(tag)); \
 				return 0; \
 			} \
 		} \
@@ -66,22 +66,22 @@
 	} \
 	return 1; \
 }
-#define INTEGER(pn,pr,member) \
+#define INTEGER(pn, pr, member) \
 	case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
 		 arg->member = *(int*)(tags); \
 		 break;
-#define INT64(pn,pr,member) \
+#define INT64(pn, pr, member) \
 	case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
 		 arg->member = *(u64*)(tags); \
 		 break;
-#define BIT(pn,pr,member) \
+#define BIT(pn, pr, member) \
 	case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
 		 arg->member = *(char*)(tags) ? 1 : 0; \
 		 break;
-#define STRING(pn,pr,member,len) \
+#define STRING(pn, pr, member, len) \
 	case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
 		 arg->member ## _len = dlen; \
-		 memcpy(arg->member,tags,min_t(size_t,dlen,len)); \
+		 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
 		 break;
 #include "linux/drbd_nl.h"
 
@@ -94,25 +94,25 @@
 	return tags; \
 }
 
-#define INTEGER(pn,pr,member) \
+#define INTEGER(pn, pr, member) \
 	*tags++ = pn | pr | TT_INTEGER; \
 	*tags++ = sizeof(int); \
 	*(int*)tags = arg->member; \
 	tags = (unsigned short*)((char*)tags+sizeof(int));
-#define INT64(pn,pr,member) \
+#define INT64(pn, pr, member) \
 	*tags++ = pn | pr | TT_INT64; \
 	*tags++ = sizeof(u64); \
 	*(u64*)tags = arg->member; \
 	tags = (unsigned short*)((char*)tags+sizeof(u64));
-#define BIT(pn,pr,member) \
+#define BIT(pn, pr, member) \
 	*tags++ = pn | pr | TT_BIT; \
 	*tags++ = sizeof(char); \
 	*(char*)tags = arg->member; \
 	tags = (unsigned short*)((char*)tags+sizeof(char));
-#define STRING(pn,pr,member,len) \
+#define STRING(pn, pr, member, len) \
 	*tags++ = pn | pr | TT_STRING; \
 	*tags++ = arg->member ## _len; \
-	memcpy(tags,arg->member, arg->member ## _len); \
+	memcpy(tags, arg->member, arg->member ## _len); \
 	tags = (unsigned short*)((char*)tags + arg->member ## _len);
 #include "linux/drbd_nl.h"
 
@@ -171,12 +171,12 @@
 				"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 				NULL };
 
-	snprintf(mb,12,"minor-%d",mdev_to_minor(mdev));
+	snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
 
-	INFO("helper command: %s %s\n",usermode_helper,cmd);
+	INFO("helper command: %s %s\n", usermode_helper, cmd);
 
-	drbd_bcast_ev_helper(mdev,cmd);
-	return call_usermodehelper(usermode_helper,argv,envp,1);
+	drbd_bcast_ev_helper(mdev, cmd);
+	return call_usermodehelper(usermode_helper, argv, envp, 1);
 }
 
 drbd_disks_t drbd_try_outdate_peer(drbd_dev *mdev)
@@ -195,9 +195,9 @@
 
 	D_ASSERT( fp > DontCare );
 
-	if (fp == Stonith) drbd_request_state(mdev,NS(susp,1));
+	if (fp == Stonith) drbd_request_state(mdev, NS(susp, 1));
 
-	r=drbd_khelper(mdev,"outdate-peer");
+	r=drbd_khelper(mdev, "outdate-peer");
 
 	switch( (r>>8) & 0xff ) {
 	case 3: /* peer is inconsistent */
@@ -213,7 +213,7 @@
 	case 6: /* Peer is primary, voluntarily outdate myself */
 		WARN("Peer is primary, outdating myself.\n");
 		nps = DUnknown;
-		drbd_request_state(mdev,NS(disk,Outdated));
+		drbd_request_state(mdev, NS(disk, Outdated));
 		break;
 	case 7:
 		if (fp != Stonith) {
@@ -224,19 +224,19 @@
 	default:
 		/* The script is broken ... */
 		nps = DUnknown;
-		drbd_request_state(mdev,NS(disk,Outdated));
-		ERR("outdate-peer helper broken, returned %d \n",(r>>8)&0xff);
+		drbd_request_state(mdev, NS(disk, Outdated));
+		ERR("outdate-peer helper broken, returned %d \n", (r>>8)&0xff);
 		return nps;
 	}
 
-	INFO("outdate-peer helper returned %d \n",(r>>8)&0xff);
+	INFO("outdate-peer helper returned %d \n", (r>>8)&0xff);
 	return nps;
 }
 
 
 int drbd_set_role(drbd_dev *mdev, drbd_role_t new_role, int force)
 {
-	int r=0,forced = 0, try=0;
+	int r=0, forced = 0, try=0;
 	drbd_state_t mask, val;
 	drbd_disks_t nps;
 
@@ -248,7 +248,7 @@
 	val.i  = 0; val.role  = new_role;
 
 	while (try++ < 3) {
-		r = _drbd_request_state(mdev,mask,val,0);
+		r = _drbd_request_state(mdev, mask, val, 0);
 		if ( r == SS_NoUpToDateDisk && force &&
 		    ( mdev->state.disk == Inconsistent ||
 		      mdev->state.disk == Outdated ) ) {
@@ -296,7 +296,7 @@
 			continue;
 		}
 		if (r < SS_Success) {
-			r = drbd_request_state(mdev,mask,val); // Be verbose.
+			r = drbd_request_state(mdev, mask, val); // Be verbose.
 			if (r < SS_Success) goto fail;
 		}
 		break;
@@ -372,7 +372,7 @@
 	struct primary primary_args;
 
 	memset(&primary_args, 0, sizeof(struct primary));
-	if (!primary_from_tags(mdev,nlp->tag_list,&primary_args)) {
+	if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
 		reply->ret_code=UnknownMandatoryTag;
 		return 0;
 	}
@@ -400,7 +400,7 @@
 	default:
 		/* v07 style fixed size indexed meta data */
 		bdev->md.md_size_sect = MD_RESERVED_SECT;
-		bdev->md.md_offset = drbd_md_ss__(mdev,bdev);
+		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
 		bdev->md.al_offset = MD_AL_OFFSET;
 		bdev->md.bm_offset = MD_BM_OFFSET;
 		break;
@@ -413,15 +413,15 @@
 		break;
 	case DRBD_MD_INDEX_INTERNAL:
 	case DRBD_MD_INDEX_FLEX_INT:
-		bdev->md.md_offset = drbd_md_ss__(mdev,bdev);
+		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
 		/* al size is still fixed */
 		bdev->md.al_offset = -MD_AL_MAX_SIZE;
                 //LGE FIXME max size check missing.
 		/* we need (slightly less than) ~ this much bitmap sectors: */
 		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
-		md_size_sect = ALIGN(md_size_sect,BM_SECT_PER_EXT);
+		md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
 		md_size_sect = BM_SECT_TO_EXT(md_size_sect);
-		md_size_sect = ALIGN(md_size_sect,8);
+		md_size_sect = ALIGN(md_size_sect, 8);
 
 		/* plus the "drbd meta data super block",
 		 * and the activity log; */
@@ -437,13 +437,13 @@
 char* ppsize(char* buf, unsigned long long size)
 {
 	// Needs 9 bytes at max.
-	static char units[] = { 'K','M','G','T','P','E' };
+	static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
 	int base = 0;
 	while (size >= 10000 ) {
 		size = size >> 10;
 		base++;
 	}
-	sprintf(buf,"%lu %cB",(long)size,units[base]);
+	sprintf(buf, "%lu %cB", (long)size, units[base]);
 
 	return buf;
 }
@@ -467,14 +467,14 @@
 	la_size = mdev->bc->md.la_size_sect;
 
 	// TODO: should only be some assert here, not (re)init...
-	drbd_md_set_sector_offsets(mdev,mdev->bc);
+	drbd_md_set_sector_offsets(mdev, mdev->bc);
 
-	size = drbd_new_dev_size(mdev,mdev->bc);
+	size = drbd_new_dev_size(mdev, mdev->bc);
 
 	if ( drbd_get_capacity(mdev->this_bdev) != size ||
 	    drbd_bm_capacity(mdev) != size ) {
 		int err;
-		err = drbd_bm_resize(mdev,size);
+		err = drbd_bm_resize(mdev, size);
 		if (unlikely(err)) {
 			/* currently there is only one error: ENOMEM! */
 			size = drbd_bm_capacity(mdev)>>1;
@@ -490,9 +490,9 @@
 			rv = err;
 		}
 		// racy, see comments above.
-		drbd_set_my_capacity(mdev,size);
+		drbd_set_my_capacity(mdev, size);
 		mdev->bc->md.la_size_sect = size;
-		INFO("size = %s (%llu KB)\n",ppsize(ppb,size>>1),
+		INFO("size = %s (%llu KB)\n", ppsize(ppb, size>>1),
 		     (unsigned long long)size>>1);
 	}
 	if (rv < 0) goto out;
@@ -509,7 +509,7 @@
 	}
 
 	if (la_size_changed || md_moved) {
-		if ( inc_local_if_state(mdev,Attaching) ) {
+		if ( inc_local_if_state(mdev, Attaching) ) {
 			drbd_al_shrink(mdev); // All extents inactive.
 			rv = drbd_bm_write(mdev);  // write bitmap
 			// Write mdev->la_size to on disk.
@@ -535,7 +535,7 @@
 	m_size = drbd_get_max_capacity(bdev);
 
 	if (p_size && m_size) {
-		size=min_t(sector_t,p_size,m_size);
+		size=min_t(sector_t, p_size, m_size);
 	} else {
 		if (la_size) {
 			size=la_size;
@@ -572,7 +572,7 @@
  */
 STATIC int drbd_check_al_size(drbd_dev *mdev)
 {
-	struct lru_cache *n,*t;
+	struct lru_cache *n, *t;
 	struct lc_element *e;
 	unsigned int in_use;
 	int i;
@@ -596,7 +596,7 @@
 	spin_lock_irq(&mdev->al_lock);
 	if (t) {
 		for (i=0; i < t->nr_elements; i++) {
-			e = lc_entry(t,i);
+			e = lc_entry(t, i);
 			if (e->refcnt)
 				ERR("refcnt(%d)==%d\n",
 				    e->lc_number, e->refcnt);
@@ -629,7 +629,7 @@
 
 	max_seg_s = min(b->max_sectors * b->hardsect_size, max_seg_s);
 
-	MTRACE(TraceTypeRq,TraceLvlSummary,
+	MTRACE(TraceTypeRq, TraceLvlSummary,
 	       DUMPI(b->max_sectors);
 	       DUMPI(b->max_phys_segments);
 	       DUMPI(b->max_hw_segments);
@@ -654,7 +654,7 @@
 	// workaround here:
 	if (q->max_segment_size == 0) q->max_segment_size = max_seg_s;
 
-	MTRACE(TraceTypeRq,TraceLvlSummary,
+	MTRACE(TraceTypeRq, TraceLvlSummary,
 	       DUMPI(q->max_sectors);
 	       DUMPI(q->max_phys_segments);
 	       DUMPI(q->max_hw_segments);
@@ -686,8 +686,8 @@
 	struct drbd_backing_dev* nbc=NULL; // new_backing_conf
 	struct inode *inode, *inode2;
 	struct lru_cache* resync_lru = NULL;
-	drbd_state_t ns,os;
-	int rv,ntries=0;
+	drbd_state_t ns, os;
+	int rv, ntries=0;
 
 	/* if you want to reconfigure, please tear down first */
 	if (mdev->state.disk > Diskless) {
@@ -709,23 +709,23 @@
 		schedule_timeout(HZ/10);
 	}
 
-	nbc = kmalloc(sizeof(struct drbd_backing_dev),GFP_KERNEL);
+	nbc = kmalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
 	if (!nbc) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
 
 	if ( !(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_local(mdev) ) {
-		memcpy(&nbc->dc,&mdev->bc->dc,sizeof(struct disk_conf));
+		memcpy(&nbc->dc, &mdev->bc->dc, sizeof(struct disk_conf));
 		dec_local(mdev);
 	} else {
-		memset(&nbc->dc,0,sizeof(struct disk_conf));
+		memset(&nbc->dc, 0, sizeof(struct disk_conf));
 		nbc->dc.disk_size   = DRBD_DISK_SIZE_SECT_DEF;
 		nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
 		nbc->dc.fencing     = DRBD_FENCING_DEF;
 	}
 
-	if (!disk_conf_from_tags(mdev,nlp->tag_list,&nbc->dc)) {
+	if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
 		retcode=UnknownMandatoryTag;
 		goto fail;
 	}
@@ -738,7 +738,7 @@
 		goto fail;
 	}
 
-	nbc->lo_file = filp_open(nbc->dc.backing_dev,O_RDWR,0);
+	nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
 	if (IS_ERR(nbc->lo_file)) {
 		ERR("open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
 		    PTR_ERR(nbc->lo_file));
@@ -754,7 +754,7 @@
 		goto fail;
 	}
 
-	nbc->md_file = filp_open(nbc->dc.meta_dev,O_RDWR,0);
+	nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
 	if (IS_ERR(nbc->md_file)) {
 		ERR("open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
 		    PTR_ERR(nbc->md_file));
@@ -781,7 +781,7 @@
 		goto fail;
 	}
 
-	resync_lru = lc_alloc("resync",31, sizeof(struct bm_extent),mdev);
+	resync_lru = lc_alloc("resync", 31, sizeof(struct bm_extent), mdev);
 	if (!resync_lru) {
 		retcode=KMallocFailed;
 		goto fail;
@@ -844,13 +844,13 @@
 		goto release_bdev2_fail;
 	}
 
-	if ((retcode = drbd_request_state(mdev,NS(disk,Attaching))) < SS_Success ) {
+	if ((retcode = drbd_request_state(mdev, NS(disk, Attaching))) < SS_Success ) {
 		goto release_bdev2_fail;
 	}
 
-	drbd_md_set_sector_offsets(mdev,nbc);
+	drbd_md_set_sector_offsets(mdev, nbc);
 
-	retcode = drbd_md_read(mdev,nbc);
+	retcode = drbd_md_read(mdev, nbc);
 	if (retcode != NoError) {
 		goto force_diskless;
 	}
@@ -862,13 +862,13 @@
 	}
 
 	// Prevent shrinking of consistent devices !
-	if (drbd_md_test_flag(nbc,MDF_Consistent) &&
-	   drbd_new_dev_size(mdev,nbc) < nbc->md.la_size_sect) {
+	if (drbd_md_test_flag(nbc, MDF_Consistent) &&
+	   drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
 		retcode = LDDeviceTooSmall;
 		goto force_diskless;
 	}
 
-	if (!drbd_al_read_log(mdev,nbc)) {
+	if (!drbd_al_read_log(mdev, nbc)) {
 		retcode = MDIOError;
 		goto force_diskless;
 	}
@@ -883,7 +883,7 @@
 	nbc = NULL;
 	resync_lru = NULL;
 
-	if (drbd_md_test_flag(mdev->bc,MDF_PrimaryInd)) {
+	if (drbd_md_test_flag(mdev->bc, MDF_PrimaryInd)) {
 		set_bit(CRASHED_PRIMARY, &mdev->flags);
 	} else {
 		clear_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -914,24 +914,24 @@
 	 * so we can automatically recover from a crash of a
 	 * degraded but active "cluster" after a certain timeout.
 	 */
-	clear_bit(USE_DEGR_WFC_T,&mdev->flags);
+	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
 	if ( mdev->state.role != Primary &&
-	     drbd_md_test_flag(mdev->bc,MDF_PrimaryInd) &&
-	    !drbd_md_test_flag(mdev->bc,MDF_ConnectedInd) ) {
-		set_bit(USE_DEGR_WFC_T,&mdev->flags);
+	     drbd_md_test_flag(mdev->bc, MDF_PrimaryInd) &&
+	    !drbd_md_test_flag(mdev->bc, MDF_ConnectedInd) ) {
+		set_bit(USE_DEGR_WFC_T, &mdev->flags);
 	}
 
 	drbd_bm_lock(mdev); // racy...
 	drbd_determin_dev_size(mdev);
 
-	if (drbd_md_test_flag(mdev->bc,MDF_FullSync)) {
+	if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
 		INFO("Assuming that all blocks are out of sync (aka FullSync)\n");
 		drbd_bm_set_all(mdev);
 		if (unlikely(drbd_bm_write(mdev) < 0)) {
 			retcode = MDIOError;
 			goto unlock_bm;
 		}
-		drbd_md_clear_flag(mdev,MDF_FullSync);
+		drbd_md_clear_flag(mdev, MDF_FullSync);
 	} else {
 		if (unlikely(drbd_bm_read(mdev) < 0)) {
 			retcode = MDIOError;
@@ -955,8 +955,8 @@
 	   If MDF_WasUpToDate is not set go into Outdated disk state,
 	   otherwise into Consistent state.
 	*/
-	if (drbd_md_test_flag(mdev->bc,MDF_Consistent)) {
-		if (drbd_md_test_flag(mdev->bc,MDF_WasUpToDate)) {
+	if (drbd_md_test_flag(mdev->bc, MDF_Consistent)) {
+		if (drbd_md_test_flag(mdev->bc, MDF_WasUpToDate)) {
 			ns.disk = Consistent;
 		} else {
 			ns.disk = Outdated;
@@ -965,7 +965,7 @@
 		ns.disk = Inconsistent;
 	}
 
-	if (drbd_md_test_flag(mdev->bc,MDF_PeerOutDated)) {
+	if (drbd_md_test_flag(mdev->bc, MDF_PeerOutDated)) {
 		ns.pdsk = Outdated;
 	}
 
@@ -990,7 +990,7 @@
 	rv = _drbd_set_state(mdev, ns, ChgStateVerbose);
 	ns = mdev->state;
 	spin_unlock_irq(&mdev->req_lock);
-	if (rv==SS_Success) after_state_ch(mdev,os,ns,ChgStateVerbose);
+	if (rv==SS_Success) after_state_ch(mdev, os, ns, ChgStateVerbose);
 
 	if (rv < SS_Success) {
 		goto unlock_bm;
@@ -998,7 +998,7 @@
 
 	drbd_bm_unlock(mdev);
 
-	if (inc_local_if_state(mdev,Attaching)) {
+	if (inc_local_if_state(mdev, Attaching)) {
 		if (mdev->state.role == Primary) mdev->bc->md.uuid[Current] |=  (u64)1;
 		else                            mdev->bc->md.uuid[Current] &= ~(u64)1;
 		dec_local(mdev);
@@ -1012,7 +1012,7 @@
  unlock_bm:
 	drbd_bm_unlock(mdev);
  force_diskless:
-	drbd_force_state(mdev,NS(disk,Diskless));
+	drbd_force_state(mdev, NS(disk, Diskless));
 	drbd_md_sync(mdev);
  release_bdev2_fail:
 	if (nbc) BD_RELEASE(nbc->md_bdev);
@@ -1034,7 +1034,7 @@
 			  struct drbd_nl_cfg_reply *reply)
 {
 	fsync_bdev(mdev->this_bdev);
-	reply->ret_code = drbd_request_state(mdev,NS(disk,Diskless));
+	reply->ret_code = drbd_request_state(mdev, NS(disk, Diskless));
 
 	return 0;
 }
@@ -1044,7 +1044,7 @@
 STATIC int drbd_nl_net_conf(drbd_dev *mdev, struct drbd_nl_cfg_req *nlp,
 			    struct drbd_nl_cfg_reply *reply)
 {
-	int i,ns;
+	int i, ns;
 	enum ret_codes retcode;
 	struct net_conf *new_conf = NULL;
 	struct crypto_hash *tfm = NULL;
@@ -1058,17 +1058,17 @@
 		goto fail;
 	}
 
-	new_conf = kmalloc(sizeof(struct net_conf),GFP_KERNEL);
+	new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
 	if (!new_conf) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
 
 	if ( !(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_net(mdev)) {
-		memcpy(new_conf,mdev->net_conf,sizeof(struct net_conf));
+		memcpy(new_conf, mdev->net_conf, sizeof(struct net_conf));
 		dec_local(mdev);
 	} else {
-		memset(new_conf,0,sizeof(struct net_conf));
+		memset(new_conf, 0, sizeof(struct net_conf));
 		new_conf->timeout         = DRBD_TIMEOUT_DEF;
 		new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
 		new_conf->ping_int        = DRBD_PING_INT_DEF;
@@ -1087,7 +1087,7 @@
 		new_conf->rr_conflict     = DRBD_RR_CONFLICT_DEF;
 	}
 
-	if (!net_conf_from_tags(mdev,nlp->tag_list,new_conf)) {
+	if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
 		retcode=UnknownMandatoryTag;
 		goto fail;
 	}
@@ -1129,7 +1129,7 @@
 #undef O_PORT
 
 	if (new_conf->cram_hmac_alg[0] != 0) {
-		snprintf(hmac_name,HMAC_NAME_L,"hmac(%s)",new_conf->cram_hmac_alg);
+		snprintf(hmac_name, HMAC_NAME_L, "hmac(%s)", new_conf->cram_hmac_alg);
 		tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
 		if (IS_ERR(tfm)) {
 			tfm = NULL;
@@ -1212,7 +1212,7 @@
 	}
 	mdev->cram_hmac_tfm = tfm;
 
-	retcode = drbd_request_state(mdev,NS(conn,Unconnected));
+	retcode = drbd_request_state(mdev, NS(conn, Unconnected));
 
 	reply->ret_code = retcode;
 	return 0;
@@ -1232,23 +1232,23 @@
 {
 	int retcode;
 
-	retcode = _drbd_request_state(mdev,NS(conn,Disconnecting),0);	// silently.
+	retcode = _drbd_request_state(mdev, NS(conn, Disconnecting), 0);	// silently.
 
 	if (retcode == SS_NothingToDo) goto done;
 	else if (retcode == SS_AlreadyStandAlone) goto done;
 	else if (retcode == SS_PrimaryNOP) {
 		// Our statche checking code wants to see the peer outdated.
-		retcode = drbd_request_state(mdev,NS2(conn,Disconnecting,
-						      pdsk,Outdated));
+		retcode = drbd_request_state(mdev, NS2(conn, Disconnecting,
+						      pdsk, Outdated));
 	} else if (retcode == SS_CW_FailedByPeer) {
 		// The peer probabely wants to see us outdated.
-		retcode = _drbd_request_state(mdev,NS2(conn,Disconnecting,
-						       disk,Outdated),0);
+		retcode = _drbd_request_state(mdev, NS2(conn, Disconnecting,
+						       disk, Outdated), 0);
 		if (retcode == SS_IsDiskLess) {
 			// We are diskless and our peer wants to outdate us.
 			// So, simply go away, and let the peer try to
 			// outdate us with its 'outdate-peer' handler later.
-			retcode = drbd_request_state(mdev,NS(conn,StandAlone));
+			retcode = drbd_request_state(mdev, NS(conn, StandAlone));
 		}
 	}
 
@@ -1275,7 +1275,7 @@
 	int retcode=NoError;
 
 	memset(&rs, 0, sizeof(struct resize));
-	if (!resize_from_tags(mdev,nlp->tag_list,&rs)) {
+	if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
 		retcode=UnknownMandatoryTag;
 		goto fail;
 	}
@@ -1320,7 +1320,7 @@
 	drbd_dev *odev;
 	int err;
 
-	memcpy(&sc,&mdev->sync_conf,sizeof(struct syncer_conf));
+	memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
 
 	if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
 		sc.rate       = DRBD_RATE_DEF;
@@ -1328,7 +1328,7 @@
 		sc.al_extents = DRBD_AL_EXTENTS_DEF;
 	}
 
-	if (!syncer_conf_from_tags(mdev,nlp->tag_list,&sc)) {
+	if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
 		retcode=UnknownMandatoryTag;
 		goto fail;
 	}
@@ -1353,7 +1353,7 @@
 	ERR_IF (sc.al_extents < 7) sc.al_extents = 127; // arbitrary minimum
 #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
 	if (sc.al_extents > AL_MAX) {
-		ERR("sc.al_extents > %d\n",AL_MAX);
+		ERR("sc.al_extents > %d\n", AL_MAX);
 		sc.al_extents = AL_MAX;
 	}
 #undef AL_MAX
@@ -1372,7 +1372,7 @@
 	}
 
 	if (mdev->state.conn >= Connected)
-		drbd_send_sync_param(mdev,&sc);
+		drbd_send_sync_param(mdev, &sc);
 
 	drbd_alter_sa(mdev, sc.after);
 
@@ -1384,8 +1384,8 @@
 STATIC int drbd_nl_invalidate(drbd_dev *mdev, struct drbd_nl_cfg_req *nlp,
 			      struct drbd_nl_cfg_reply *reply)
 {
-	reply->ret_code = drbd_request_state(mdev,NS2(conn,StartingSyncT,
-						      disk,Inconsistent));
+	reply->ret_code = drbd_request_state(mdev, NS2(conn, StartingSyncT,
+						      disk, Inconsistent));
 	return 0;
 }
 
@@ -1393,8 +1393,8 @@
 				   struct drbd_nl_cfg_reply *reply)
 {
 
-	reply->ret_code = drbd_request_state(mdev,NS2(conn,StartingSyncS,
-						      pdsk,Inconsistent));
+	reply->ret_code = drbd_request_state(mdev, NS2(conn, StartingSyncS,
+						      pdsk, Inconsistent));
 
 	return 0;
 }
@@ -1404,7 +1404,7 @@
 {
 	int retcode=NoError;
 
-	if (drbd_request_state(mdev,NS(user_isp,1)) == SS_NothingToDo)
+	if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NothingToDo)
 		retcode = PauseFlagAlreadySet;
 
 	reply->ret_code = retcode;
@@ -1416,7 +1416,7 @@
 {
 	int retcode=NoError;
 
-	if (drbd_request_state(mdev,NS(user_isp,0)) == SS_NothingToDo)
+	if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NothingToDo)
 		retcode = PauseFlagAlreadyClear;
 
 	reply->ret_code = retcode;
@@ -1426,7 +1426,7 @@
 STATIC int drbd_nl_suspend_io(drbd_dev *mdev, struct drbd_nl_cfg_req *nlp,
 			      struct drbd_nl_cfg_reply *reply)
 {
-	reply->ret_code = drbd_request_state(mdev,NS(susp,1));
+	reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
 
 	return 0;
 }
@@ -1434,7 +1434,7 @@
 STATIC int drbd_nl_resume_io(drbd_dev *mdev, struct drbd_nl_cfg_req *nlp,
 			     struct drbd_nl_cfg_reply *reply)
 {
-	reply->ret_code = drbd_request_state(mdev,NS(susp,0));
+	reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
 	return 0;
 }
 
@@ -1442,18 +1442,18 @@
 			   struct drbd_nl_cfg_reply *reply)
 {
 	int retcode;
-	drbd_state_t os,ns;
+	drbd_state_t os, ns;
 
 	spin_lock_irq(&mdev->req_lock);
 	os = mdev->state;
 	if (mdev->state.disk < Outdated) {
 		retcode = -999;
 	} else {
-		retcode = _drbd_set_state(_NS(mdev,disk,Outdated),ChgStateVerbose);
+		retcode = _drbd_set_state(_NS(mdev, disk, Outdated), ChgStateVerbose);
 	}
 	ns = mdev->state;
 	spin_unlock_irq(&mdev->req_lock);
-	if (retcode==SS_Success) after_state_ch(mdev,os,ns, ChgStateVerbose);
+	if (retcode==SS_Success) after_state_ch(mdev, os, ns, ChgStateVerbose);
 
 	if (retcode == -999) {
 		retcode = DiskLowerThanOutdated;
@@ -1475,15 +1475,15 @@
 	tl = reply->tag_list;
 
 	if (inc_local(mdev)) {
-		tl = disk_conf_to_tags(mdev,&mdev->bc->dc,tl);
+		tl = disk_conf_to_tags(mdev, &mdev->bc->dc, tl);
 		dec_local(mdev);
 	}
 
 	if (inc_net(mdev)) {
-		tl = net_conf_to_tags(mdev,mdev->net_conf,tl);
+		tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
 		dec_net(mdev);
 	}
-	tl = syncer_conf_to_tags(mdev,&mdev->sync_conf,tl);
+	tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
 
 	*tl++ = TT_END; /* Close the tag list */
 
@@ -1497,7 +1497,7 @@
 
 	tl = reply->tag_list;
 
-	tl = get_state_to_tags(mdev,(struct get_state*)&mdev->state,tl);
+	tl = get_state_to_tags(mdev, (struct get_state*)&mdev->state, tl);
 	*tl++ = TT_END; /* Close the tag list */
 
 	return (int)((char*)tl - (char*)reply->tag_list);
@@ -1514,12 +1514,12 @@
 		// This is a hand crafted add tag ;)
 		*tl++ = T_uuids;
 		*tl++ = UUID_SIZE*sizeof(u64);
-		memcpy(tl,mdev->bc->md.uuid,UUID_SIZE*sizeof(u64));
+		memcpy(tl, mdev->bc->md.uuid, UUID_SIZE*sizeof(u64));
 		tl=(unsigned short*)((char*)tl + UUID_SIZE*sizeof(u64));
 		dec_local(mdev);
 		*tl++ = T_uuids_flags;
 		*tl++ = sizeof(int);
-		memcpy(tl,&mdev->bc->md.flags,sizeof(int));
+		memcpy(tl, &mdev->bc->md.flags, sizeof(int));
 		tl=(unsigned short*)((char*)tl + sizeof(int));
 	}
 	*tl++ = TT_END; /* Close the tag list */
@@ -1538,7 +1538,7 @@
 	// This is a hand crafted add tag ;)
 	*tl++ = T_use_degraded;
 	*tl++ = sizeof(char);
-	*((char*)tl) = test_bit(USE_DEGR_WFC_T,&mdev->flags) ? 1 : 0 ;
+	*((char*)tl) = test_bit(USE_DEGR_WFC_T, &mdev->flags) ? 1 : 0 ;
 	tl=(unsigned short*)((char*)tl + sizeof(char));
 	*tl++ = TT_END;
 
@@ -1618,7 +1618,7 @@
 	struct cn_msg *cn_reply;
 	struct drbd_nl_cfg_reply* reply;
 	drbd_dev *mdev;
-	int retcode,rr;
+	int retcode, rr;
 	int reply_size = sizeof(struct cn_msg)
 		+ sizeof(struct drbd_nl_cfg_reply)
 		+ sizeof(short int);
@@ -1643,7 +1643,7 @@
 	cm = cnd_table + nlp->packet_type;
 	reply_size += cm->reply_body_size;
 
-	if ( !(cn_reply = kmalloc(reply_size,GFP_KERNEL)) ) {
+	if ( !(cn_reply = kmalloc(reply_size, GFP_KERNEL)) ) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
@@ -1654,7 +1654,7 @@
 	reply->ret_code = NoError; // Might by modified by cm->function.
 	// reply->tag_list; might be modified by cm->fucntion.
 
-	rr = cm->function(mdev,nlp,reply);
+	rr = cm->function(mdev, nlp, reply);
 
 	cn_reply->id = req->id;
 	cn_reply->seq = req->seq;
@@ -1666,7 +1666,7 @@
 
 	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
 	if (rr && rr != -ESRCH) {
-		printk(KERN_INFO DEVICE_NAME " cn_netlink_send()=%d\n",rr);
+		printk(KERN_INFO DEVICE_NAME " cn_netlink_send()=%d\n", rr);
 	}
 	kfree(cn_reply);
 	module_put(THIS_MODULE);
@@ -1690,13 +1690,13 @@
 
 	// WARN("drbd_bcast_state() got called\n");
 
-	tl = get_state_to_tags(mdev,(struct get_state*)&mdev->state,tl);
+	tl = get_state_to_tags(mdev, (struct get_state*)&mdev->state, tl);
 	*tl++ = TT_END; /* Close the tag list */
 
 	cn_reply->id.idx = CN_IDX_DRBD;
 	cn_reply->id.val = CN_VAL_DRBD;
 
-	cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
+	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
 	cn_reply->ack = 0; // not used here.
 	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
 		(int)((char*)tl - (char*)reply->tag_list);
@@ -1727,14 +1727,14 @@
 	str_len = strlen(helper_name)+1;
 	*tl++ = T_helper;
 	*tl++ = str_len;
-	memcpy(tl,helper_name,str_len);
+	memcpy(tl, helper_name, str_len);
 	tl=(unsigned short*)((char*)tl + str_len);
 	*tl++ = TT_END; /* Close the tag list */
 
 	cn_reply->id.idx = CN_IDX_DRBD;
 	cn_reply->id.val = CN_VAL_DRBD;
 
-	cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
+	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
 	cn_reply->ack = 0; // not used here.
 	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
 		(int)((char*)tl - (char*)reply->tag_list);
@@ -1784,7 +1784,7 @@
 	cn_reply->id.idx = CN_IDX_DRBD;
 	cn_reply->id.val = CN_VAL_DRBD;
 
-	cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
+	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
 	cn_reply->ack = 0; // not used here.
 	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
 		(int)((char*)tl - (char*)reply->tag_list);
@@ -1814,7 +1814,7 @@
 	err = cn_init();
 	if(err) return err;
 #endif
-	err = cn_add_callback(&cn_id_drbd,"cn_drbd",&drbd_connector_callback);
+	err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
 	if (err) {
 		printk(KERN_ERR DEVICE_NAME "cn_drbd failed to register\n");
 		return err;
@@ -1857,7 +1857,7 @@
 
 	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
 	if (rr && rr != -ESRCH) {
-		printk(KERN_INFO DEVICE_NAME " cn_netlink_send()=%d\n",rr);
+		printk(KERN_INFO DEVICE_NAME " cn_netlink_send()=%d\n", rr);
 	}
 }
 

Modified: branches/drbd-8.0-for-linus/drbd/drbd_proc.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_proc.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_proc.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -98,14 +98,14 @@
 		seq_printf(seq, "] ");
 	}
 	res = 1000L - res;
-	seq_printf(seq,"sync'ed:%3lu.%lu%% ", res / 10, res % 10);
+	seq_printf(seq, "sync'ed:%3lu.%lu%% ", res / 10, res % 10);
 	/* if more than 1 GB display in MB */
 	if (mdev->rs_total > 0x100000L) {
-		seq_printf(seq,"(%lu/%lu)M\n\t",
+		seq_printf(seq, "(%lu/%lu)M\n\t",
 			    (unsigned long) Bit2KB(rs_left) >> 10,
 			    (unsigned long) Bit2KB(mdev->rs_total) >> 10 );
 	} else {
-		seq_printf(seq,"(%lu/%lu)K\n\t",
+		seq_printf(seq, "(%lu/%lu)K\n\t",
 			    (unsigned long) Bit2KB(rs_left),
 			    (unsigned long) Bit2KB(mdev->rs_total) );
 	}
@@ -139,7 +139,7 @@
 	dbdt = Bit2KB(db/dt);
 	if (dbdt > 1000)
 		seq_printf(seq, " speed: %ld,%03ld",
-			dbdt/1000,dbdt % 1000);
+			dbdt/1000, dbdt % 1000);
 	else
 		seq_printf(seq, " speed: %ld", dbdt);
 
@@ -151,11 +151,11 @@
 	dbdt = Bit2KB(db/dt);
 	if (dbdt > 1000)
 		seq_printf(seq, " (%ld,%03ld)",
-			dbdt/1000,dbdt % 1000);
+			dbdt/1000, dbdt % 1000);
 	else
 		seq_printf(seq, " (%ld)", dbdt);
 
-	seq_printf(seq," K/sec\n");
+	seq_printf(seq, " K/sec\n");
 }
 
 #if 0
@@ -163,7 +163,7 @@
 {
 	struct bm_extent *bme = (struct bm_extent *)e;
 
-	seq_printf(seq,"%5d %s %s\n",bme->rs_left,
+	seq_printf(seq, "%5d %s %s\n", bme->rs_left,
 		   bme->flags & BME_NO_WRITES ? "NO_WRITES" : "---------",
 		   bme->flags & BME_LOCKED ? "LOCKED" : "------"
 		   );
@@ -172,12 +172,12 @@
 
 STATIC int drbd_seq_show(struct seq_file *seq, void *v)
 {
-	int i,hole=0;
+	int i, hole=0;
 	const char *sn;
 	drbd_dev *mdev;
 
 	seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d)\n%s\n",
-		    API_VERSION,PRO_VERSION, drbd_buildtag());
+		    API_VERSION, PRO_VERSION, drbd_buildtag());
 
 	/*
 	  cs .. connection state
@@ -239,17 +239,17 @@
 		}
 		if ( mdev->state.conn == SyncSource ||
 		     mdev->state.conn == SyncTarget ) {
-			drbd_syncer_progress(mdev,seq);
+			drbd_syncer_progress(mdev, seq);
 		}
 		if (mdev->resync) {
-			lc_printf_stats(seq,mdev->resync);
+			lc_printf_stats(seq, mdev->resync);
 		}
 		if (mdev->act_log) {
-			lc_printf_stats(seq,mdev->act_log);
+			lc_printf_stats(seq, mdev->act_log);
 		}
 #if 0
 		if (mdev->resync) {
-			lc_dump(mdev->resync,seq,"rs_left",
+			lc_dump(mdev->resync, seq, "rs_left",
 				resync_dump_detail);
 		}
 #endif

Modified: branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_receiver.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_receiver.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -67,10 +67,10 @@
 
 #if 0
 #define CHECK_LIST_LIMIT 1000
-void check_list(drbd_dev *mdev,struct list_head *list,char *t)
+void check_list(drbd_dev *mdev, struct list_head *list, char *t)
 {
-	struct list_head *le,*la;
-	int forward=0,backward=0;
+	struct list_head *le, *la;
+	int forward=0, backward=0;
 
 	le=list;
 	do {
@@ -79,13 +79,13 @@
 		if (le->prev != la) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s list fucked.\n",
-			       mdev_to_minor(mdev),t);
+			       mdev_to_minor(mdev), t);
 			break;
 		}
 		if (forward++ > CHECK_LIST_LIMIT) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s forward > 1000\n",
-			       mdev_to_minor(mdev),t);
+			       mdev_to_minor(mdev), t);
 			break;
 		}
 	} while(le != list);
@@ -97,20 +97,20 @@
 		if (le->next != la) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s list fucked.\n",
-			       mdev_to_minor(mdev),t);
+			       mdev_to_minor(mdev), t);
 			break;
 		}
 		if (backward++ > CHECK_LIST_LIMIT) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s backward > 1000\n",
-			       mdev_to_minor(mdev),t);
+			       mdev_to_minor(mdev), t);
 			break;
 		}
 	} while(le != list);
 
 	if (forward != backward) {
 		printk(KERN_ERR DEVICE_NAME "%d: forward=%d, backward=%d\n",
-		       mdev_to_minor(mdev),forward,backward);
+		       mdev_to_minor(mdev), forward, backward);
 	}
 }
 #endif
@@ -131,7 +131,7 @@
 	 * not make sense.
 	 */
 
-	spin_lock_irqsave(&drbd_pp_lock,flags);
+	spin_lock_irqsave(&drbd_pp_lock, flags);
 	/* This lock needs to lock out irq because we might call drdb_pp_free()
 	   from IRQ context.
 	   FIXME but why irq _save_ ?
@@ -141,7 +141,7 @@
 		drbd_pp_pool = (struct page*)page_private(page);
 		drbd_pp_vacant--;
 	}
-	spin_unlock_irqrestore(&drbd_pp_lock,flags);
+	spin_unlock_irqrestore(&drbd_pp_lock, flags);
 	if (page) goto got_page;
 
 	drbd_kick_lo(mdev);
@@ -150,12 +150,12 @@
 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
 
 		/* try the pool again, maybe the drbd_kick_lo set some free */
-		spin_lock_irqsave(&drbd_pp_lock,flags);
+		spin_lock_irqsave(&drbd_pp_lock, flags);
 		if ( (page = drbd_pp_pool) ) {
 			drbd_pp_pool = (struct page*)page_private(page);
 			drbd_pp_vacant--;
 		}
-		spin_unlock_irqrestore(&drbd_pp_lock,flags);
+		spin_unlock_irqrestore(&drbd_pp_lock, flags);
 
 		if (page) break;
 
@@ -188,12 +188,12 @@
 	return page;
 }
 
-STATIC void drbd_pp_free(drbd_dev *mdev,struct page *page)
+STATIC void drbd_pp_free(drbd_dev *mdev, struct page *page)
 {
 	unsigned long flags=0;
 	int free_it;
 
-	spin_lock_irqsave(&drbd_pp_lock,flags);
+	spin_lock_irqsave(&drbd_pp_lock, flags);
 	if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
 		free_it = 1;
 	} else {
@@ -202,7 +202,7 @@
 		drbd_pp_vacant++;
 		free_it = 0;
 	}
-	spin_unlock_irqrestore(&drbd_pp_lock,flags);
+	spin_unlock_irqrestore(&drbd_pp_lock, flags);
 
 	atomic_dec(&mdev->pp_in_use);
 
@@ -251,7 +251,7 @@
 		return NULL;
 	}
 
-	bio = bio_alloc(GFP_KERNEL, div_ceil(data_size,PAGE_SIZE));
+	bio = bio_alloc(GFP_KERNEL, div_ceil(data_size, PAGE_SIZE));
 	if (!bio) {
 		ERR("alloc_ee: Allocation of a bio failed\n");
 		goto fail1;
@@ -268,7 +268,7 @@
 			goto fail2;
 		}
 		if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
-			drbd_pp_free(mdev,page);
+			drbd_pp_free(mdev, page);
 			ERR("alloc_ee: bio_add_page(s=%llu,"
 			    "data_size=%u,ds=%u) failed\n",
 			    (unsigned long long)sector, data_size, ds);
@@ -307,16 +307,16 @@
 	e->barrier_nr2 = 0;
 	e->flags = 0;
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("allocated EE sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)sector,data_size,e);
+		    (unsigned long long)sector, data_size, e);
 	       );
 
 	return e;
 
  fail2:
 	__bio_for_each_segment(bvec, bio, i, 0) {
-		drbd_pp_free(mdev,bvec->bv_page);
+		drbd_pp_free(mdev, bvec->bv_page);
 	}
 	bio_put(bio);
  fail1:
@@ -331,13 +331,13 @@
 	struct bio_vec *bvec;
 	int i;
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("Free EE sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)e->sector,e->size,e);
+		    (unsigned long long)e->sector, e->size, e);
 	       );
 
 	__bio_for_each_segment(bvec, bio, i, 0) {
-		drbd_pp_free(mdev,bvec->bv_page);
+		drbd_pp_free(mdev, bvec->bv_page);
 	}
 
 	bio_put(bio);
@@ -348,7 +348,7 @@
 }
 
 /* currently on module unload only */
-int drbd_release_ee(drbd_dev *mdev,struct list_head* list)
+int drbd_release_ee(drbd_dev *mdev, struct list_head* list)
 {
 	int count=0;
 	struct Tl_epoch_entry* e;
@@ -359,7 +359,7 @@
 		le = list->next;
 		list_del(le);
 		e = list_entry(le, struct Tl_epoch_entry, w.list);
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 		count++;
 	}
 	spin_unlock_irq(&mdev->req_lock);
@@ -371,7 +371,7 @@
 STATIC void reclaim_net_ee(drbd_dev *mdev)
 {
 	struct Tl_epoch_entry *e;
-	struct list_head *le,*tle;
+	struct list_head *le, *tle;
 
 	/* The EEs are always appended to the end of the list. Since
 	   they are sent in order over the wire, they have to finish
@@ -382,7 +382,7 @@
 		e = list_entry(le, struct Tl_epoch_entry, w.list);
 		if ( drbd_bio_has_active_page(e->private_bio) ) break;
 		list_del(le);
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 	}
 }
 
@@ -401,11 +401,11 @@
 	LIST_HEAD(work_list);
 	struct Tl_epoch_entry *e, *t;
 	int ok=1;
-	int do_clear_bit = test_bit(WRITE_ACK_PENDING,&mdev->flags);
+	int do_clear_bit = test_bit(WRITE_ACK_PENDING, &mdev->flags);
 
 	spin_lock_irq(&mdev->req_lock);
 	reclaim_net_ee(mdev);
-	list_splice_init(&mdev->done_ee,&work_list);
+	list_splice_init(&mdev->done_ee, &work_list);
 	spin_unlock_irq(&mdev->req_lock);
 
 	/* possible callbacks here:
@@ -413,16 +413,16 @@
 	 * all ignore the last argument.
 	 */
 	list_for_each_entry_safe(e, t, &work_list, w.list) {
-		MTRACE(TraceTypeEE,TraceLvlAll,
+		MTRACE(TraceTypeEE, TraceLvlAll,
 		       INFO("Process EE on done_ee sec=%llus size=%u ee=%p\n",
-			    (unsigned long long)e->sector,e->size,e);
+			    (unsigned long long)e->sector, e->size, e);
 			);
 		// list_del not necessary, next/prev members not touched
-		if (e->w.cb(mdev,&e->w,0) == 0) ok = 0;
-		drbd_free_ee(mdev,e);
+		if (e->w.cb(mdev, &e->w, 0) == 0) ok = 0;
+		drbd_free_ee(mdev, e);
 	}
 	if (do_clear_bit)
-		clear_bit(WRITE_ACK_PENDING,&mdev->flags);
+		clear_bit(WRITE_ACK_PENDING, &mdev->flags);
 	wake_up(&mdev->ee_wait);
 
 	return ok;
@@ -450,20 +450,20 @@
 			++n;
 		}
 		if (!hlist_unhashed(&e->colision)) hlist_del_init(&e->colision);
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 	}
 
 	sub_unacked(mdev, n);
 }
 
-void _drbd_wait_ee_list_empty(drbd_dev *mdev,struct list_head *head)
+void _drbd_wait_ee_list_empty(drbd_dev *mdev, struct list_head *head)
 {
 	DEFINE_WAIT(wait);
 	MUST_HOLD(&mdev->req_lock);
 
 	/* avoids spin_lock/unlock and calling prepare_to_wait in the fast path */
 	while (!list_empty(head)) {
-		prepare_to_wait(&mdev->ee_wait,&wait,TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(&mdev->req_lock);
 		drbd_kick_lo(mdev);
 		schedule();
@@ -472,14 +472,14 @@
 	}
 }
 
-void drbd_wait_ee_list_empty(drbd_dev *mdev,struct list_head *head)
+void drbd_wait_ee_list_empty(drbd_dev *mdev, struct list_head *head)
 {
 	spin_lock_irq(&mdev->req_lock);
 	_drbd_wait_ee_list_empty(mdev, head);
 	spin_unlock_irq(&mdev->req_lock);
 }
 
-STATIC struct socket* drbd_accept(drbd_dev *mdev,struct socket* sock)
+STATIC struct socket* drbd_accept(drbd_dev *mdev, struct socket* sock)
 {
 	struct socket *newsock;
 	int err = 0;
@@ -536,7 +536,7 @@
 	return rv;
 }
 
-int drbd_recv(drbd_dev *mdev,void *buf, size_t size)
+int drbd_recv(drbd_dev *mdev, void *buf, size_t size)
 {
 	mm_segment_t oldfs;
 	struct iovec iov;
@@ -557,7 +557,7 @@
 	set_fs(KERNEL_DS);
 
 	for(;;) {
-		rv = sock_recvmsg(mdev->data.socket,&msg,size,msg.msg_flags);
+		rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
 		if (rv == size) break;
 
 		/* Note:
@@ -569,7 +569,7 @@
 			if (rv == -ECONNRESET)
 				INFO("sock was reset by peer\n");
 			else if (rv != -ERESTARTSYS)
-				ERR("sock_recvmsg returned %d\n",rv);
+				ERR("sock_recvmsg returned %d\n", rv);
 			break;
 		} else if (rv == 0) {
 			INFO("sock was shut down by peer\n");
@@ -585,7 +585,7 @@
 
 	set_fs(oldfs);
 
-	if (rv != size) drbd_force_state(mdev,NS(conn,BrokenPipe));
+	if (rv != size) drbd_force_state(mdev, NS(conn, BrokenPipe));
 
 	return rv;
 }
@@ -644,7 +644,7 @@
 STATIC struct socket *drbd_wait_for_connect(drbd_dev *mdev)
 {
 	int err;
-	struct socket *sock,*sock2;
+	struct socket *sock, *sock2;
 
 	err = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock2);
 	if (err) {
@@ -666,11 +666,11 @@
 	if (err) {
 		ERR("Unable to bind sock2 (%d)\n", err);
 		sock_release(sock2);
-		drbd_force_state(mdev,NS(conn,Disconnecting));
+		drbd_force_state(mdev, NS(conn, Disconnecting));
 		return NULL;
 	}
 
-	sock = drbd_accept(mdev,sock2);
+	sock = drbd_accept(mdev, sock2);
 	sock_release(sock2);
 
 	return sock;
@@ -679,14 +679,14 @@
 STATIC int drbd_do_handshake(drbd_dev *mdev);
 STATIC int drbd_do_auth(drbd_dev *mdev);
 
-STATIC int drbd_send_fp(drbd_dev *mdev,struct socket *sock,Drbd_Packet_Cmd cmd)
+STATIC int drbd_send_fp(drbd_dev *mdev, struct socket *sock, Drbd_Packet_Cmd cmd)
 {
 	Drbd_Header *h = (Drbd_Header *) &mdev->data.sbuf.head;
 
-	return _drbd_send_cmd(mdev,sock,cmd,h,sizeof(*h),0);
+	return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
 }
 
-STATIC Drbd_Packet_Cmd drbd_recv_fp(drbd_dev *mdev,struct socket *sock)
+STATIC Drbd_Packet_Cmd drbd_recv_fp(drbd_dev *mdev, struct socket *sock)
 {
 	Drbd_Header *h = (Drbd_Header *) &mdev->data.sbuf.head;
 	int rr;
@@ -709,13 +709,13 @@
  */
 int drbd_connect(drbd_dev *mdev)
 {
-	struct socket *s, *sock,*msock;
-	int try,h;
+	struct socket *s, *sock, *msock;
+	int try, h;
 
 	D_ASSERT(mdev->state.conn >= Unconnected);
 	D_ASSERT(!mdev->data.socket);
 
-	if (drbd_request_state(mdev,NS(conn,WFConnection)) < SS_Success ) return 0;
+	if (drbd_request_state(mdev, NS(conn, WFConnection)) < SS_Success ) return 0;
 	clear_bit(DISCARD_CONCURRENT, &mdev->flags);
 
 	sock  = NULL;
@@ -755,7 +755,7 @@
 
 		s=drbd_wait_for_connect(mdev);
 		if (s) {
-			switch(drbd_recv_fp(mdev,s)) {
+			switch(drbd_recv_fp(mdev, s)) {
 			case HandShakeS:
 				if (sock) sock_release(sock);
 				sock = s;
@@ -811,7 +811,7 @@
 	mdev->meta.socket = msock;
 	mdev->last_received = jiffies;
 
-	if (drbd_request_state(mdev,NS(conn,WFReportParams)) < SS_Success) return 0;
+	if (drbd_request_state(mdev, NS(conn, WFReportParams)) < SS_Success) return 0;
 	D_ASSERT(mdev->asender.task == NULL);
 
 	h = drbd_do_handshake(mdev);
@@ -827,17 +827,17 @@
 	sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
 	sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
 
-	atomic_set(&mdev->packet_seq,0);
+	atomic_set(&mdev->packet_seq, 0);
 	mdev->peer_seq=0;
 
 	drbd_thread_start(&mdev->asender);
 
 	drbd_send_protocol(mdev);
-	drbd_send_sync_param(mdev,&mdev->sync_conf);
+	drbd_send_sync_param(mdev, &mdev->sync_conf);
 	drbd_send_sizes(mdev);
 	drbd_send_uuids(mdev);
 	drbd_send_state(mdev);
-	clear_bit(USE_DEGR_WFC_T,&mdev->flags);
+	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
 
 	return 1;
 }
@@ -846,10 +846,10 @@
 {
 	int r;
 
-	r = drbd_recv(mdev,h,sizeof(*h));
+	r = drbd_recv(mdev, h, sizeof(*h));
 
 	if (unlikely( r != sizeof(*h) )) {
-		ERR("short read expecting header on sock: r=%d\n",r);
+		ERR("short read expecting header on sock: r=%d\n", r);
 		return FALSE;
 	};
 	h->command = be16_to_cpu(h->command);
@@ -882,7 +882,7 @@
 		drbd_kick_lo(mdev);
 
 	spin_lock_irq(&mdev->req_lock);
-	_drbd_wait_ee_list_empty(mdev,&mdev->active_ee);
+	_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
 	epoch_size = mdev->epoch_size;
 	mdev->epoch_size = 0;
 	spin_unlock_irq(&mdev->req_lock);
@@ -891,10 +891,10 @@
 	 * to make sure this BarrierAck will not be received before the asender
 	 * had a chance to send all the write acks corresponding to this epoch,
 	 * wait_for that bit to clear... */
-	set_bit(WRITE_ACK_PENDING,&mdev->flags);
+	set_bit(WRITE_ACK_PENDING, &mdev->flags);
 	wake_asender(mdev);
 	rv = wait_event_interruptible(mdev->ee_wait,
-			      !test_bit(WRITE_ACK_PENDING,&mdev->flags));
+			      !test_bit(WRITE_ACK_PENDING, &mdev->flags));
 
 	if (rv == 0 && mdev->state.conn >= Connected)
 		rv = drbd_send_b_ack(mdev, p->barrier, epoch_size);
@@ -914,20 +914,20 @@
 	struct bio_vec *bvec;
 	struct page *page;
 	struct bio *bio;
-	int ds,i,rr;
+	int ds, i, rr;
 
-	e = drbd_alloc_ee(mdev,id,sector,data_size,GFP_KERNEL);
+	e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_KERNEL);
 	if (!e) return 0;
 	bio = e->private_bio;
 	ds = data_size;
 	bio_for_each_segment(bvec, bio, i) {
 		page = bvec->bv_page;
-		rr = drbd_recv(mdev,kmap(page),min_t(int,ds,PAGE_SIZE));
+		rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
 		kunmap(page);
-		if ( rr != min_t(int,ds,PAGE_SIZE) ) {
-			drbd_free_ee(mdev,e);
+		if ( rr != min_t(int, ds, PAGE_SIZE) ) {
+			drbd_free_ee(mdev, e);
 			WARN("short read receiving data: read %d expected %d\n",
-			     rr, min_t(int,ds,PAGE_SIZE));
+			     rr, min_t(int, ds, PAGE_SIZE));
 			return 0;
 		}
 		ds -= rr;
@@ -951,11 +951,11 @@
 
 	data=kmap(page);
 	while(data_size) {
-		rr = drbd_recv(mdev,data,min_t(int,data_size,PAGE_SIZE));
-		if ( rr != min_t(int,data_size,PAGE_SIZE) ) {
+		rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
+		if ( rr != min_t(int, data_size, PAGE_SIZE) ) {
 			rv = 0;
 			WARN("short read receiving data: read %d expected %d\n",
-			     rr, min_t(int,data_size,PAGE_SIZE));
+			     rr, min_t(int, data_size, PAGE_SIZE));
 			goto out;
 		}
 
@@ -963,7 +963,7 @@
 	}
 	kunmap(page);
  out:
-	drbd_pp_free(mdev,page);
+	drbd_pp_free(mdev, page);
 	return rv;
 }
 
@@ -983,13 +983,13 @@
 {
 	struct bio_vec *bvec;
 	struct bio *bio;
-	int rr,i,expect;
+	int rr, i, expect;
 
 	bio = req->master_bio;
 	D_ASSERT( sector == bio->bi_sector );
 
 	bio_for_each_segment(bvec, bio, i) {
-		expect = min_t(int,data_size,bvec->bv_len);
+		expect = min_t(int, data_size, bvec->bv_len);
 		rr=drbd_recv(mdev,
 			     kmap(bvec->bv_page)+bvec->bv_offset,
 			     expect);
@@ -1019,12 +1019,12 @@
 
 	if (likely( drbd_bio_uptodate(e->private_bio) )) {
 		drbd_set_in_sync(mdev, sector, e->size);
-		ok = drbd_send_ack(mdev,RSWriteAck,e);
+		ok = drbd_send_ack(mdev, RSWriteAck, e);
 	} else {
 		// Record failure to sync
 		drbd_rs_failed_io(mdev, sector, e->size);
 
-		ok = drbd_send_ack(mdev,NegAck,e);
+		ok = drbd_send_ack(mdev, NegAck, e);
 		ok&= drbd_io_error(mdev, FALSE);
 	}
 	dec_unacked(mdev);
@@ -1032,11 +1032,11 @@
 	return ok;
 }
 
-STATIC int recv_resync_read(drbd_dev *mdev,sector_t sector,int data_size)
+STATIC int recv_resync_read(drbd_dev *mdev, sector_t sector, int data_size)
 {
 	struct Tl_epoch_entry *e;
 
-	e = read_in_block(mdev,ID_SYNCER,sector,data_size);
+	e = read_in_block(mdev, ID_SYNCER, sector, data_size);
 	if (!e) return FALSE;
 
 	dec_rs_pending(mdev);
@@ -1049,25 +1049,25 @@
 	 * respective _drbd_clear_done_ee */
 
 	spin_lock_irq(&mdev->req_lock);
-	list_add(&e->w.list,&mdev->sync_ee);
+	list_add(&e->w.list, &mdev->sync_ee);
 	spin_unlock_irq(&mdev->req_lock);
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("submit EE (RS)WRITE sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)e->sector,e->size,e);
+		    (unsigned long long)e->sector, e->size, e);
 	       );
-	drbd_generic_make_request(mdev,WRITE,DRBD_FAULT_RS_WR,e->private_bio);
+	drbd_generic_make_request(mdev, WRITE, DRBD_FAULT_RS_WR, e->private_bio);
 	/* accounting done in endio */
 
 	maybe_kick_lo(mdev);
 	return TRUE;
 }
 
-STATIC int receive_DataReply(drbd_dev *mdev,Drbd_Header* h)
+STATIC int receive_DataReply(drbd_dev *mdev, Drbd_Header* h)
 {
 	drbd_request_t *req;
 	sector_t sector;
-	unsigned int header_size,data_size;
+	unsigned int header_size, data_size;
 	int ok;
 	Drbd_Data_Packet *p = (Drbd_Data_Packet*)h;
 
@@ -1087,7 +1087,7 @@
 	sector = be64_to_cpu(p->sector);
 
 	spin_lock_irq(&mdev->req_lock);
-	req = _ar_id_to_req(mdev,p->block_id, sector);
+	req = _ar_id_to_req(mdev, p->block_id, sector);
 	spin_unlock_irq(&mdev->req_lock);
 	if (unlikely(!req)) {
 		ERR("Got a corrupt block_id/sector pair(1).\n");
@@ -1097,7 +1097,7 @@
 	/* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
 	 * special casing it there for the various failure cases.
 	 * still no race with drbd_fail_pending_reads */
-	ok = recv_dless_read(mdev,req,sector,data_size);
+	ok = recv_dless_read(mdev, req, sector, data_size);
 
 	if (ok) req_mod(req, data_received, 0);
 	/* else: nothing. handled from drbd_disconnect...
@@ -1107,10 +1107,10 @@
 	return ok;
 }
 
-STATIC int receive_RSDataReply(drbd_dev *mdev,Drbd_Header* h)
+STATIC int receive_RSDataReply(drbd_dev *mdev, Drbd_Header* h)
 {
 	sector_t sector;
-	unsigned int header_size,data_size;
+	unsigned int header_size, data_size;
 	int ok;
 	Drbd_Data_Packet *p = (Drbd_Data_Packet*)h;
 
@@ -1139,15 +1139,15 @@
 		 * in case we are Primary SyncTarget,
 		 * verify there are no pending write request to that area.
 		 */
-		ok = recv_resync_read(mdev,sector,data_size);
+		ok = recv_resync_read(mdev, sector, data_size);
 		if (!ok) dec_local(mdev);
 	} else {
-		if (DRBD_ratelimit(5*HZ,5))
+		if (DRBD_ratelimit(5*HZ, 5))
 			ERR("Can not write resync data to local disk.\n");
 
-		ok = drbd_drain_block(mdev,data_size);
+		ok = drbd_drain_block(mdev, data_size);
 
-		drbd_send_ack_dp(mdev,NegAck,p);
+		drbd_send_ack_dp(mdev, NegAck, p);
 	}
 
 	return ok;
@@ -1164,7 +1164,7 @@
 	struct Tl_epoch_entry *e = (struct Tl_epoch_entry*)w;
 	sector_t sector = e->sector;
 	// unsigned int epoch_size;
-	int ok=1,pcmd;
+	int ok=1, pcmd;
 
 	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
 		if (likely(drbd_bio_uptodate(e->private_bio))) {
@@ -1172,9 +1172,9 @@
 				mdev->state.conn <= PausedSyncT &&
 				e->flags & EE_MAY_SET_IN_SYNC) ?
 				RSWriteAck : WriteAck;
-			ok &= drbd_send_ack(mdev,pcmd,e);
+			ok &= drbd_send_ack(mdev, pcmd, e);
 			if (pcmd==RSWriteAck)
-				drbd_set_in_sync(mdev,sector,e->size);
+				drbd_set_in_sync(mdev, sector, e->size);
 		} else {
 			/* FIXME I think we should send a NegAck regardless of
 			 * which protocol is in effect.
@@ -1182,7 +1182,7 @@
 			 * NegAck is sent. basically that means that drbd_process_done_ee
 			 * may not list_del() the ee before this callback did run...
 			 * maybe even move the list_del(e) in here... */
-			ok = drbd_send_ack(mdev,NegAck,e);
+			ok = drbd_send_ack(mdev, NegAck, e);
 			ok&= drbd_io_error(mdev, FALSE);
 			/* we expect it to be marked out of sync anyways...
 			 * maybe assert this?  */
@@ -1212,7 +1212,7 @@
 	int ok=1;
 
 	D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-	ok = drbd_send_ack(mdev,DiscardAck,e);
+	ok = drbd_send_ack(mdev, DiscardAck, e);
 
 	spin_lock_irq(&mdev->req_lock);
 	D_ASSERT(!hlist_unhashed(&e->colision));
@@ -1253,8 +1253,8 @@
 	int ret = 0;
 	spin_lock(&mdev->peer_seq_lock);
 	for (;;) {
-		prepare_to_wait(&mdev->seq_wait,&wait,TASK_INTERRUPTIBLE);
-		if (seq_le(packet_seq,mdev->peer_seq+1))
+		prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
+		if (seq_le(packet_seq, mdev->peer_seq+1))
 			break;
 		spin_unlock(&mdev->peer_seq_lock);
 		if (signal_pending(current)) {
@@ -1272,7 +1272,7 @@
 }
 
 // mirrored write
-STATIC int receive_Data(drbd_dev *mdev,Drbd_Header* h)
+STATIC int receive_Data(drbd_dev *mdev, Drbd_Header* h)
 {
 	sector_t sector;
 	struct Tl_epoch_entry *e;
@@ -1297,20 +1297,20 @@
 		/* data is submitted to disk at the end of this function.
 		 * corresponding dec_local done either below (on error),
 		 * or in drbd_endio_write_sec. */
-		if (DRBD_ratelimit(5*HZ,5))
+		if (DRBD_ratelimit(5*HZ, 5))
 			ERR("Can not write mirrored data block to local disk.\n");
 		spin_lock(&mdev->peer_seq_lock);
 		if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
 			mdev->peer_seq++;
 		spin_unlock(&mdev->peer_seq_lock);
 
-		drbd_send_ack_dp(mdev,NegAck,p);
+		drbd_send_ack_dp(mdev, NegAck, p);
 		mdev->epoch_size++; // spin lock ?
-		return drbd_drain_block(mdev,data_size);
+		return drbd_drain_block(mdev, data_size);
 	}
 
 	sector = be64_to_cpu(p->sector);
-	e = read_in_block(mdev,p->block_id,sector,data_size);
+	e = read_in_block(mdev, p->block_id, sector, data_size);
 	if (!e) {
 		dec_local(mdev);
 		return FALSE;
@@ -1338,7 +1338,7 @@
 		 * we may sleep in drbd_wait_peer_seq */
 		const sector_t sector = e->sector;
 		const int size = e->size;
-		const int discard = test_bit(DISCARD_CONCURRENT,&mdev->flags);
+		const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
 		DEFINE_WAIT(wait);
 		drbd_request_t *i;
 		struct hlist_node *n;
@@ -1387,15 +1387,15 @@
 
 		spin_lock_irq(&mdev->req_lock);
 
-		hlist_add_head(&e->colision,ee_hash_slot(mdev,sector));
+		hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
 
 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
-		slot = tl_hash_slot(mdev,sector);
+		slot = tl_hash_slot(mdev, sector);
 		first = 1;
 		for(;;) {
 			int have_unacked = 0;
 			int have_conflict = 0;
-			prepare_to_wait(&mdev->misc_wait,&wait,TASK_INTERRUPTIBLE);
+			prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
 			hlist_for_each_entry(i, n, slot, colision) {
 				if (OVERLAPS) {
 					if (first) {
@@ -1421,7 +1421,7 @@
 				inc_unacked(mdev);
 				mdev->epoch_size++;
 				e->w.cb = e_send_discard_ack;
-				list_add_tail(&e->w.list,&mdev->done_ee);
+				list_add_tail(&e->w.list, &mdev->done_ee);
 
 				spin_unlock_irq(&mdev->req_lock);
 
@@ -1447,7 +1447,7 @@
 			if (first) {
 				first = 0;
 				ALERT("Concurrent write! [W AFTERWARDS] "
-				     "sec=%llus\n",(unsigned long long)sector);
+				     "sec=%llus\n", (unsigned long long)sector);
 			} else if (discard) {
 				/* we had none on the first iteration.
 				 * there must be none now. */
@@ -1515,7 +1515,7 @@
 		e->private_bio->bi_rw |= BIO_RW_BARRIER;
 		mdev->next_barrier_nr = 0;
 	}
-	list_add(&e->w.list,&mdev->active_ee);
+	list_add(&e->w.list, &mdev->active_ee);
 	spin_unlock_irq(&mdev->req_lock);
 
 	if (barrier_nr) {
@@ -1544,17 +1544,17 @@
 
 	if (mdev->state.pdsk == Diskless) {
 		// In case we have the only disk of the cluster,
-		drbd_set_out_of_sync(mdev,e->sector,e->size);
+		drbd_set_out_of_sync(mdev, e->sector, e->size);
 		e->flags |= EE_CALL_AL_COMPLETE_IO;
 		drbd_al_begin_io(mdev, e->sector);
 	}
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("submit EE (DATA)WRITE sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)e->sector,e->size,e);
+		    (unsigned long long)e->sector, e->size, e);
 	       );
 	/* FIXME drbd_al_begin_io in case we have two primaries... */
-	drbd_generic_make_request(mdev,WRITE,DRBD_FAULT_DT_WR,e->private_bio);
+	drbd_generic_make_request(mdev, WRITE, DRBD_FAULT_DT_WR, e->private_bio);
 	/* accounting done in endio */
 
 	maybe_kick_lo(mdev);
@@ -1565,11 +1565,11 @@
 	 * but we drop the connection anyways, so we don't have a chance to
 	 * receive a barrier... atomic_inc(&mdev->epoch_size); */
 	dec_local(mdev);
-	drbd_free_ee(mdev,e);
+	drbd_free_ee(mdev, e);
 	return FALSE;
 }
 
-STATIC int receive_DataRequest(drbd_dev *mdev,Drbd_Header *h)
+STATIC int receive_DataRequest(drbd_dev *mdev, Drbd_Header *h)
 {
 	sector_t sector;
 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
@@ -1588,24 +1588,24 @@
 
 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
 		ERR("%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
-				(unsigned long long)sector,size);
+				(unsigned long long)sector, size);
 		return FALSE;
 	}
 	if ( sector + (size>>9) > capacity) {
 		ERR("%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
-				(unsigned long long)sector,size);
+				(unsigned long long)sector, size);
 		return FALSE;
 	}
 
 	if (!inc_local_if_state(mdev, UpToDate)) {
-		if (DRBD_ratelimit(5*HZ,5))
+		if (DRBD_ratelimit(5*HZ, 5))
 			ERR("Can not satisfy peer's read request, no local data.\n");
-		drbd_send_ack_rp(mdev,h->command == DataRequest ? NegDReply :
-				 NegRSDReply ,p);
+		drbd_send_ack_rp(mdev, h->command == DataRequest ? NegDReply :
+				 NegRSDReply , p);
 		return TRUE;
 	}
 
-	e = drbd_alloc_ee(mdev,p->block_id,sector,size,GFP_KERNEL);
+	e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_KERNEL);
 	if (!e) {
 		dec_local(mdev);
 		return FALSE;
@@ -1626,12 +1626,12 @@
 		 * resync data block.
 		 * the drbd_work_queue mechanism is made for this...
 		 */
-		if (!drbd_rs_begin_io(mdev,sector)) {
+		if (!drbd_rs_begin_io(mdev, sector)) {
 			/* we have been interrupted,
 			 * probably connection lost! */
 			D_ASSERT(signal_pending(current));
 			dec_local(mdev);
-			drbd_free_ee(mdev,e);
+			drbd_free_ee(mdev, e);
 			return 0;
 		}
 		break;
@@ -1640,17 +1640,17 @@
 	}
 
 	spin_lock_irq(&mdev->req_lock);
-	list_add(&e->w.list,&mdev->read_ee);
+	list_add(&e->w.list, &mdev->read_ee);
 	spin_unlock_irq(&mdev->req_lock);
 
 	inc_unacked(mdev);
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("submit EE READ sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)e->sector,e->size,e);
+		    (unsigned long long)e->sector, e->size, e);
 	       );
 	/* FIXME actually, it could be a READA originating from the peer ... */
-	drbd_generic_make_request(mdev,READ,fault_type,e->private_bio);
+	drbd_generic_make_request(mdev, READ, fault_type, e->private_bio);
 	maybe_kick_lo(mdev);
 
 	return TRUE;
@@ -1687,7 +1687,7 @@
 		     "Using discard-least-changes instead\n");
 	case DiscardZeroChg:
 		if (ch_peer == 0 && ch_self == 0) {
-			rv=test_bit(DISCARD_CONCURRENT,&mdev->flags) ? -1 : 1;
+			rv=test_bit(DISCARD_CONCURRENT, &mdev->flags) ? -1 : 1;
 			break;
 		} else {
 			if (ch_peer == 0) { rv =  1; break; }
@@ -1699,7 +1699,7 @@
 		else if (ch_self > ch_peer) rv =  1;
 		else /* ( ch_self == ch_peer ) */ {
 			// Well, then use something else.
-			rv=test_bit(DISCARD_CONCURRENT,&mdev->flags) ? -1 : 1;
+			rv=test_bit(DISCARD_CONCURRENT, &mdev->flags) ? -1 : 1;
 		}
 		break;
 	case DiscardLocal:
@@ -1742,9 +1742,9 @@
 	case CallHelper:
 		hg = drbd_asb_recover_0p(mdev);
 		if (hg == -1 && mdev->state.role==Primary) {
-			self = drbd_set_role(mdev,Secondary,0);
+			self = drbd_set_role(mdev, Secondary, 0);
 			if (self != SS_Success) {
-				drbd_khelper(mdev,"pri-lost-after-sb");
+				drbd_khelper(mdev, "pri-lost-after-sb");
 			} else {
 				WARN("Sucessfully gave up primary role.\n");
 				rv = hg;
@@ -1780,9 +1780,9 @@
 	case CallHelper:
 		hg = drbd_asb_recover_0p(mdev);
 		if (hg == -1) {
-			self = drbd_set_role(mdev,Secondary,0);
+			self = drbd_set_role(mdev, Secondary, 0);
 			if (self != SS_Success) {
-				drbd_khelper(mdev,"pri-lost-after-sb");
+				drbd_khelper(mdev, "pri-lost-after-sb");
 			} else {
 				WARN("Sucessfully gave up primary role.\n");
 				rv = hg;
@@ -1793,7 +1793,7 @@
 	return rv;
 }
 
-STATIC void drbd_uuid_dump(drbd_dev *mdev,char* text,u64* uuid)
+STATIC void drbd_uuid_dump(drbd_dev *mdev, char* text, u64* uuid)
 {
 	INFO("%s %016llX:%016llX:%016llX:%016llX\n",
 	     text,
@@ -1816,7 +1816,7 @@
 STATIC int drbd_uuid_compare(drbd_dev *mdev, int *rule_nr)
 {
 	u64 self, peer;
-	int i,j;
+	int i, j;
 
 	self = mdev->bc->md.uuid[Current] & ~((u64)1);
 	peer = mdev->p_uuid[Current] & ~((u64)1);
@@ -1835,22 +1835,22 @@
 
 	*rule_nr = 4;
 	if (self == peer) { // Common power [off|failure]
-		int rct,dc; // roles at crash time
+		int rct, dc; // roles at crash time
 
 		rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
 			( mdev->p_uuid[UUID_FLAGS] & 2 );
 		// lowest bit is set when we were primary
 		// next bit (weight 2) is set when peer was primary
 
-		MTRACE(TraceTypeUuid,TraceLvlMetrics, DUMPI(rct); );
+		MTRACE(TraceTypeUuid, TraceLvlMetrics, DUMPI(rct); );
 
 		switch(rct) {
 		case 0: /* !self_pri && !peer_pri */ return 0;
 		case 1: /*  self_pri && !peer_pri */ return 1;
 		case 2: /* !self_pri &&  peer_pri */ return -1;
 		case 3: /*  self_pri &&  peer_pri */
-			dc = test_bit(DISCARD_CONCURRENT,&mdev->flags);
-			MTRACE(TraceTypeUuid,TraceLvlMetrics, DUMPI(dc); );
+			dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+			MTRACE(TraceTypeUuid, TraceLvlMetrics, DUMPI(dc); );
 			return dc ? -1 : 1;
 		}
 	}
@@ -1899,25 +1899,25 @@
 STATIC drbd_conns_t drbd_sync_handshake(drbd_dev *mdev, drbd_role_t peer_role,
 					drbd_disks_t peer_disk)
 {
-	int hg,rule_nr;
+	int hg, rule_nr;
 	drbd_conns_t rv = conn_mask;
 	drbd_disks_t mydisk;
 
 	mydisk = mdev->state.disk;
 	if (mydisk == Negotiating) mydisk = mdev->new_state_tmp.disk;
 
-	hg = drbd_uuid_compare(mdev,&rule_nr);
+	hg = drbd_uuid_compare(mdev, &rule_nr);
 
-	MTRACE(TraceTypeUuid,TraceLvlSummary,
+	MTRACE(TraceTypeUuid, TraceLvlSummary,
 	       INFO("drbd_sync_handshake:\n");
-	       drbd_uuid_dump(mdev,"self",mdev->bc->md.uuid);
-	       drbd_uuid_dump(mdev,"peer",mdev->p_uuid);
-	       INFO("uuid_compare()=%d by rule %d\n",hg,rule_nr);
+	       drbd_uuid_dump(mdev, "self", mdev->bc->md.uuid);
+	       drbd_uuid_dump(mdev, "peer", mdev->p_uuid);
+	       INFO("uuid_compare()=%d by rule %d\n", hg, rule_nr);
 	    );
 
 	if (hg == -1000) {
 		ALERT("Unrelated data, dropping connection!\n");
-		drbd_force_state(mdev,NS(conn,Disconnecting));
+		drbd_force_state(mdev, NS(conn, Disconnecting));
 		return conn_mask;
 	}
 
@@ -1951,8 +1951,8 @@
 			if (forced) {
 				WARN("Doing a full sync, since"
 				     " UUIDs where ambiguous.\n");
-				drbd_uuid_dump(mdev,"self",mdev->bc->md.uuid);
-				drbd_uuid_dump(mdev,"peer",mdev->p_uuid);
+				drbd_uuid_dump(mdev, "self", mdev->bc->md.uuid);
+				drbd_uuid_dump(mdev, "peer", mdev->p_uuid);
 				hg=hg*2;
 			}
 		}
@@ -1974,15 +1974,15 @@
 
 	if (hg == -100) {
 		ALERT("Split-Brain detected, dropping connection!\n");
-		drbd_uuid_dump(mdev,"self",mdev->bc->md.uuid);
-		drbd_uuid_dump(mdev,"peer",mdev->p_uuid);
-		drbd_force_state(mdev,NS(conn,Disconnecting));
+		drbd_uuid_dump(mdev, "self", mdev->bc->md.uuid);
+		drbd_uuid_dump(mdev, "peer", mdev->p_uuid);
+		drbd_force_state(mdev, NS(conn, Disconnecting));
 		return conn_mask;
 	}
 
 	if (hg > 0 && mydisk <= Inconsistent) {
 		ERR("I shall become SyncSource, but I am inconsistent!\n");
-		drbd_force_state(mdev,NS(conn,Disconnecting));
+		drbd_force_state(mdev, NS(conn, Disconnecting));
 		return conn_mask;
 	}
 
@@ -1990,11 +1990,11 @@
 	    mdev->state.role == Primary && mdev->state.disk >= Consistent ) {
 		switch(mdev->net_conf->rr_conflict) {
 		case CallHelper:
-			drbd_khelper(mdev,"pri-lost");
+			drbd_khelper(mdev, "pri-lost");
 			// fall through
 		case Disconnect:
 			ERR("I shall become SyncTarget, but I am primary!\n");
-			drbd_force_state(mdev,NS(conn,Disconnecting));
+			drbd_force_state(mdev, NS(conn, Disconnecting));
 			return conn_mask;
 		case Violently:
 			WARN("Becoming SyncTarget, violating the stable-data"
@@ -2003,7 +2003,7 @@
 	}
 
 	if (abs(hg) >= 2) {
-		drbd_md_set_flag(mdev,MDF_FullSync);
+		drbd_md_set_flag(mdev, MDF_FullSync);
 		drbd_md_sync(mdev);
 
 		drbd_bm_set_all(mdev);
@@ -2012,7 +2012,7 @@
 			return conn_mask;
 		}
 
-		drbd_md_clear_flag(mdev,MDF_FullSync);
+		drbd_md_clear_flag(mdev, MDF_FullSync);
 		drbd_md_sync(mdev);
 	}
 
@@ -2102,11 +2102,11 @@
 	return TRUE;
 
  disconnect:
-	drbd_force_state(mdev,NS(conn,Disconnecting));
+	drbd_force_state(mdev, NS(conn, Disconnecting));
 	return FALSE;
 }
 
-STATIC int receive_SyncParam(drbd_dev *mdev,Drbd_Header *h)
+STATIC int receive_SyncParam(drbd_dev *mdev, Drbd_Header *h)
 {
 	int ok = TRUE;
 	Drbd_SyncParam_Packet *p = (Drbd_SyncParam_Packet*)h;
@@ -2155,7 +2155,7 @@
 
 	if (p_size == 0 && mdev->state.disk == Diskless) {
 		ERR("some backing storage is needed\n");
-		drbd_force_state(mdev,NS(conn,Disconnecting));
+		drbd_force_state(mdev, NS(conn, Disconnecting));
 		return FALSE;
 	}
 
@@ -2182,12 +2182,12 @@
 		}
 
 		// Never shrink a device with usable data.
-		if (drbd_new_dev_size(mdev,mdev->bc) <
+		if (drbd_new_dev_size(mdev, mdev->bc) <
 		   drbd_get_capacity(mdev->this_bdev) &&
 		   mdev->state.disk >= Outdated ) {
 			dec_local(mdev);
 			ERR("The peer's disk size is too small!\n");
-			drbd_force_state(mdev,NS(conn,Disconnecting));
+			drbd_force_state(mdev, NS(conn, Disconnecting));
 			mdev->bc->dc.disk_size = my_usize;
 			return FALSE;
 		}
@@ -2209,17 +2209,17 @@
 		dec_local(mdev);
 	} else {
 		// I am diskless, need to accept the peer's size.
-		drbd_set_my_capacity(mdev,p_size);
+		drbd_set_my_capacity(mdev, p_size);
 	}
 
 	if (mdev->p_uuid && mdev->state.conn <= Connected && inc_local(mdev)) {
-		nconn=drbd_sync_handshake(mdev,mdev->state.peer,mdev->state.pdsk);
+		nconn=drbd_sync_handshake(mdev, mdev->state.peer, mdev->state.pdsk);
 		dec_local(mdev);
 
 		if (nconn == conn_mask) return FALSE;
 
-		if (drbd_request_state(mdev,NS(conn,nconn)) < SS_Success) {
-			drbd_force_state(mdev,NS(conn,Disconnecting));
+		if (drbd_request_state(mdev, NS(conn, nconn)) < SS_Success) {
+			drbd_force_state(mdev, NS(conn, Disconnecting));
 			return FALSE;
 		}
 	}
@@ -2230,7 +2230,7 @@
 			drbd_setup_queue_param(mdev, max_seg_s);
 		}
 
-		drbd_setup_order_type(mdev,be32_to_cpu(p->queue_order_type));
+		drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
 		dec_local(mdev);
 	}
 
@@ -2301,7 +2301,7 @@
 STATIC int receive_req_state(drbd_dev *mdev, Drbd_Header *h)
 {
 	Drbd_Req_State_Packet *p = (Drbd_Req_State_Packet*)h;
-	drbd_state_t mask,val;
+	drbd_state_t mask, val;
 	int rv;
 
 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
@@ -2311,16 +2311,16 @@
 	mask.i = be32_to_cpu(p->mask);
 	val.i = be32_to_cpu(p->val);
 
-	if (test_bit(DISCARD_CONCURRENT,&mdev->flags)) drbd_state_lock(mdev);
+	if (test_bit(DISCARD_CONCURRENT, &mdev->flags)) drbd_state_lock(mdev);
 
 	mask = convert_state(mask);
 	val = convert_state(val);
 
-	rv = drbd_change_state(mdev,ChgStateVerbose,mask,val);
+	rv = drbd_change_state(mdev, ChgStateVerbose, mask, val);
 
-	if (test_bit(DISCARD_CONCURRENT,&mdev->flags)) drbd_state_unlock(mdev);
+	if (test_bit(DISCARD_CONCURRENT, &mdev->flags)) drbd_state_unlock(mdev);
 
-	drbd_send_sr_reply(mdev,rv);
+	drbd_send_sr_reply(mdev, rv);
 	drbd_md_sync(mdev);
 
 	return TRUE;
@@ -2329,8 +2329,8 @@
 STATIC int receive_state(drbd_dev *mdev, Drbd_Header *h)
 {
 	Drbd_State_Packet *p = (Drbd_State_Packet*)h;
-	drbd_conns_t nconn,oconn;
-	drbd_state_t os,ns,peer_state;
+	drbd_conns_t nconn, oconn;
+	drbd_state_t os, ns, peer_state;
 	int rv;
 
 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
@@ -2348,8 +2348,8 @@
 
 	if (mdev->p_uuid && oconn <= Connected &&
 	    peer_state.disk >= Negotiating &&
-	    inc_local_if_state(mdev,Negotiating) ) {
-		nconn=drbd_sync_handshake(mdev,peer_state.role,peer_state.disk);
+	    inc_local_if_state(mdev, Negotiating) ) {
+		nconn=drbd_sync_handshake(mdev, peer_state.role, peer_state.disk);
 		dec_local(mdev);
 
 		if (nconn == conn_mask) return FALSE;
@@ -2367,11 +2367,11 @@
 	   ns.disk == Negotiating ) ns.disk = UpToDate;
 	if ((nconn == Connected || nconn == WFBitMapT) &&
 	   ns.pdsk == Negotiating ) ns.pdsk = UpToDate;
-	rv = _drbd_set_state(mdev,ns,ChgStateVerbose | ChgStateHard);
+	rv = _drbd_set_state(mdev, ns, ChgStateVerbose | ChgStateHard);
 	spin_unlock_irq(&mdev->req_lock);
 
 	if (rv < SS_Success) {
-		drbd_force_state(mdev,NS(conn,Disconnecting));
+		drbd_force_state(mdev, NS(conn, Disconnecting));
 		return FALSE;
 	}
 
@@ -2388,7 +2388,7 @@
 	}
 
 	if (rv==SS_Success) {
-		after_state_ch(mdev,os,ns,ChgStateVerbose | ChgStateHard);
+		after_state_ch(mdev, os, ns, ChgStateVerbose | ChgStateHard);
 	}
 
 	mdev->net_conf->want_lose = 0;
@@ -2414,10 +2414,10 @@
 
 	/* Here the _drbd_uuid_ functions are right, current should
 	   _not_ be rotated into the history */
-	_drbd_uuid_set(mdev,Current,be64_to_cpu(p->uuid));
-	_drbd_uuid_set(mdev,Bitmap,0UL);
+	_drbd_uuid_set(mdev, Current, be64_to_cpu(p->uuid));
+	_drbd_uuid_set(mdev, Bitmap, 0UL);
 
-	drbd_start_resync(mdev,SyncTarget);
+	drbd_start_resync(mdev, SyncTarget);
 
 	return TRUE;
 }
@@ -2453,17 +2453,17 @@
 		drbd_bm_merge_lel(mdev, bm_i, num_words, buffer);
 		bm_i += num_words;
 
-		if (!drbd_recv_header(mdev,h))
+		if (!drbd_recv_header(mdev, h))
 			goto out;
 		D_ASSERT(h->command == ReportBitMap);
 	}
 
 	if (mdev->state.conn == WFBitMapS) {
-		drbd_start_resync(mdev,SyncSource);
+		drbd_start_resync(mdev, SyncSource);
 	} else if (mdev->state.conn == WFBitMapT) {
 		ok = drbd_send_bitmap(mdev);
 		if (!ok) goto out;
-		ok = drbd_request_state(mdev,NS(conn,WFSyncUUID));
+		ok = drbd_request_state(mdev, NS(conn, WFSyncUUID));
 		D_ASSERT( ok == SS_Success );
 	} else {
 		ERR("unexpected cstate (%s) in receive_bitmap\n",
@@ -2477,19 +2477,19 @@
 	return ok;
 }
 
-STATIC int receive_skip(drbd_dev *mdev,Drbd_Header *h)
+STATIC int receive_skip(drbd_dev *mdev, Drbd_Header *h)
 {
 	// TODO zero copy sink :)
 	static char sink[128];
-	int size,want,r;
+	int size, want, r;
 
 	WARN("skipping unknown optional packet type %d, l: %d!\n",
 	     h->command, h->length );
 
 	size = h->length;
 	while (size > 0) {
-		want = min_t(int,size,sizeof(sink));
-		r = drbd_recv(mdev,sink,want);
+		want = min_t(int, size, sizeof(sink));
+		r = drbd_recv(mdev, sink, want);
 		ERR_IF(r < 0) break;
 		size -= r;
 	}
@@ -2502,7 +2502,7 @@
 	return TRUE; // cannot fail.
 }
 
-typedef int (*drbd_cmd_handler_f)(drbd_dev*,Drbd_Header*);
+typedef int (*drbd_cmd_handler_f)(drbd_dev*, Drbd_Header*);
 
 static drbd_cmd_handler_f drbd_default_handler[] = {
 	[Data]             = receive_Data,
@@ -2536,7 +2536,7 @@
 	Drbd_Header *header = &mdev->data.rbuf.head;
 
 	while (get_t_state(&mdev->receiver) == Running) {
-		if (!drbd_recv_header(mdev,header))
+		if (!drbd_recv_header(mdev, header))
 			break;
 
 		if (header->command < MAX_CMD)
@@ -2551,17 +2551,17 @@
 		if (unlikely(!handler)) {
 			ERR("unknown packet type %d, l: %d!\n",
 			    header->command, header->length);
-			drbd_force_state(mdev,NS(conn,ProtocolError));
+			drbd_force_state(mdev, NS(conn, ProtocolError));
 			break;
 		}
-		if (unlikely(!handler(mdev,header))) {
+		if (unlikely(!handler(mdev, header))) {
 			ERR("error receiving %s, l: %d!\n",
 			    cmdname(header->command), header->length);
-			drbd_force_state(mdev,NS(conn,ProtocolError));
+			drbd_force_state(mdev, NS(conn, ProtocolError));
 			break;
 		}
 
-		dump_packet(mdev,mdev->data.socket,2,&mdev->data.rbuf, __FILE__, __LINE__);
+		dump_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf, __FILE__, __LINE__);
 	}
 }
 
@@ -2585,7 +2585,7 @@
 			list_add(&req->w.list, &workset);
 		}
 	}
-	memset(mdev->app_reads_hash,0,APP_R_HSIZE*sizeof(void*));
+	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void*));
 
 	while(!list_empty(&workset)) {
 		le = workset.next;
@@ -2601,7 +2601,7 @@
 {
 	struct drbd_work prev_work_done;
 	enum fencing_policy fp;
-	drbd_state_t os,ns;
+	drbd_state_t os, ns;
 	int rv=SS_UnknownError;
 
 	D_ASSERT(mdev->state.conn < Connected);
@@ -2624,10 +2624,10 @@
 	up(&mdev->data.mutex);
 
 	spin_lock_irq(&mdev->req_lock);
-	_drbd_wait_ee_list_empty(mdev,&mdev->active_ee);
-	_drbd_wait_ee_list_empty(mdev,&mdev->sync_ee);
+	_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+	_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
 	_drbd_clear_done_ee(mdev);
-	_drbd_wait_ee_list_empty(mdev,&mdev->read_ee);
+	_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
 	reclaim_net_ee(mdev);
 	spin_unlock_irq(&mdev->req_lock);
 
@@ -2649,21 +2649,21 @@
 	drbd_rs_cancel_all(mdev);
 	mdev->rs_total=0;
 	mdev->rs_failed=0;
-	atomic_set(&mdev->rs_pending_cnt,0);
+	atomic_set(&mdev->rs_pending_cnt, 0);
 	wake_up(&mdev->misc_wait);
 
 	/* make sure syncer is stopped and w_resume_next_sg queued */
 	del_timer_sync(&mdev->resync_timer);
-	set_bit(STOP_SYNC_TIMER,&mdev->flags);
+	set_bit(STOP_SYNC_TIMER, &mdev->flags);
 	resync_timer_fn((unsigned long)mdev);
 
 	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
 	 * w_make_resync_request etc. which may still be on the worker queue
 	 * to be "canceled" */
-	set_bit(WORK_PENDING,&mdev->flags);
+	set_bit(WORK_PENDING, &mdev->flags);
 	prev_work_done.cb = w_prev_work_done;
-	drbd_queue_work(&mdev->data.work,&prev_work_done);
-	wait_event(mdev->misc_wait, !test_bit(WORK_PENDING,&mdev->flags));
+	drbd_queue_work(&mdev->data.work, &prev_work_done);
+	wait_event(mdev->misc_wait, !test_bit(WORK_PENDING, &mdev->flags));
 
 	if (mdev->p_uuid) {
 		kfree(mdev->p_uuid);
@@ -2683,7 +2683,7 @@
 		if ( fp >= Resource &&
 		    mdev->state.pdsk >= DUnknown ) {
 			drbd_disks_t nps = drbd_try_outdate_peer(mdev);
-			drbd_request_state(mdev,NS(pdsk,nps));
+			drbd_request_state(mdev, NS(pdsk, nps));
 		}
 	}
 
@@ -2693,15 +2693,15 @@
 		// Do not restart in case we are Disconnecting
 		ns = os;
 		ns.conn = Unconnected;
-		rv=_drbd_set_state(mdev,ns,ChgStateVerbose);
+		rv=_drbd_set_state(mdev, ns, ChgStateVerbose);
 	}
 	spin_unlock_irq(&mdev->req_lock);
 	if (rv == SS_Success) {
-		after_state_ch(mdev,os,ns,ChgStateVerbose);
+		after_state_ch(mdev, os, ns, ChgStateVerbose);
 	}
 
 	if (os.conn == Disconnecting) {
-		wait_event( mdev->misc_wait,atomic_read(&mdev->net_cnt) == 0 );
+		wait_event( mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0 );
 		if (mdev->ee_hash) {
 			kfree(mdev->ee_hash);
 			mdev->ee_hash = NULL;
@@ -2719,7 +2719,7 @@
 		}
 		kfree(mdev->net_conf);
 		mdev->net_conf=NULL;
-		drbd_request_state(mdev, NS(conn,StandAlone));
+		drbd_request_state(mdev, NS(conn, StandAlone));
 	}
 
 	/* they do trigger all the time.
@@ -2762,7 +2762,7 @@
 		return 0;
 	}
 
-	memset(p,0,sizeof(*p));
+	memset(p, 0, sizeof(*p));
 	p->protocol_version = cpu_to_be32(PRO_VERSION);
 	ok = _drbd_send_cmd( mdev, mdev->data.socket, HandShake,
 	                     (Drbd_Header *)p, sizeof(*p), 0 );
@@ -2787,7 +2787,7 @@
 	rv = drbd_send_handshake(mdev);
 	if (!rv) goto break_c_loop;
 
-	rv = drbd_recv_header(mdev,&p->head);
+	rv = drbd_recv_header(mdev, &p->head);
 	if (!rv) goto break_c_loop;
 
 	if (p->head.command != HandShake) {
@@ -2809,7 +2809,7 @@
 		return 0;
 	}
 
-	dump_packet(mdev,mdev->data.socket,2,&mdev->data.rbuf, __FILE__, __LINE__);
+	dump_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf, __FILE__, __LINE__);
 
 	p->protocol_version = be32_to_cpu(p->protocol_version);
 
@@ -2882,17 +2882,17 @@
 	rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
 				(u8*)mdev->net_conf->shared_secret, key_len);
 	if (rv) {
-		ERR("crypto_hash_setkey() failed with %d\n",rv);
+		ERR("crypto_hash_setkey() failed with %d\n", rv);
 		rv = 0;
 		goto fail;
 	}
 
 	get_random_bytes(my_challenge, CHALLENGE_LEN);
 
-	rv = drbd_send_cmd2(mdev,AuthChallenge,my_challenge,CHALLENGE_LEN);
+	rv = drbd_send_cmd2(mdev, AuthChallenge, my_challenge, CHALLENGE_LEN);
 	if (!rv) goto fail;
 
-	rv = drbd_recv_header(mdev,&p);
+	rv = drbd_recv_header(mdev, &p);
 	if (!rv) goto fail;
 
 	if (p.command != AuthChallenge) {
@@ -2908,7 +2908,7 @@
 		goto fail;
 	}
 
-	peers_ch = kmalloc(p.length,GFP_KERNEL);
+	peers_ch = kmalloc(p.length, GFP_KERNEL);
 	if (peers_ch == NULL) {
 		ERR("kmalloc of peers_ch failed\n");
 		rv = 0;
@@ -2924,7 +2924,7 @@
 	}
 
 	resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
-	response = kmalloc(resp_size,GFP_KERNEL);
+	response = kmalloc(resp_size, GFP_KERNEL);
 	if (response == NULL) {
 		ERR("kmalloc of response failed\n");
 		rv = 0;
@@ -2937,15 +2937,15 @@
 
 	rv = crypto_hash_digest(&desc, &sg, sg.length, response);
 	if (rv) {
-		ERR( "crypto_hash_digest() failed with %d\n",rv);
+		ERR( "crypto_hash_digest() failed with %d\n", rv);
 		rv = 0;
 		goto fail;
 	}
 
-	rv = drbd_send_cmd2(mdev,AuthResponse,response,resp_size);
+	rv = drbd_send_cmd2(mdev, AuthResponse, response, resp_size);
 	if (!rv) goto fail;
 
-	rv = drbd_recv_header(mdev,&p);
+	rv = drbd_recv_header(mdev, &p);
 	if (!rv) goto fail;
 
 	if (p.command != AuthResponse) {
@@ -2969,7 +2969,7 @@
 		goto fail;
 	}
 
-	right_response = kmalloc(resp_size,GFP_KERNEL);
+	right_response = kmalloc(resp_size, GFP_KERNEL);
 	if (response == NULL) {
 		ERR("kmalloc of right_response failed\n");
 		rv = 0;
@@ -2982,16 +2982,16 @@
 
 	rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
 	if (rv) {
-		ERR( "crypto_hash_digest() failed with %d\n",rv);
+		ERR( "crypto_hash_digest() failed with %d\n", rv);
 		rv = 0;
 		goto fail;
 	}
 
-	rv = ! memcmp(response,right_response,resp_size);
+	rv = ! memcmp(response, right_response, resp_size);
 
 	if (rv) {
 		INFO("Peer authenticated using %d bytes of '%s' HMAC\n",
-		     resp_size,mdev->net_conf->cram_hmac_alg);
+		     resp_size, mdev->net_conf->cram_hmac_alg);
 	}
 
  fail:
@@ -3021,7 +3021,7 @@
 		}
 		if (h < 0) {
 			WARN("Discarding network configuration.\n");
-			drbd_force_state(mdev,NS(conn,Disconnecting));
+			drbd_force_state(mdev, NS(conn, Disconnecting));
 		}
 	} while ( h == 0 );
 
@@ -3056,9 +3056,9 @@
 	int retcode = be32_to_cpu(p->retcode);
 
 	if (retcode >= SS_Success) {
-		set_bit(CL_ST_CHG_SUCCESS,&mdev->flags);
+		set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
 	} else {
-		set_bit(CL_ST_CHG_FAIL,&mdev->flags);
+		set_bit(CL_ST_CHG_FAIL, &mdev->flags);
 		ERR("Requested state change failed by peer: %s\n",
 		    set_st_err_name(retcode));
 	}
@@ -3088,10 +3088,10 @@
 	sector_t sector = be64_to_cpu(p->sector);
 	int blksize = be32_to_cpu(p->blksize);
 
-	update_peer_seq(mdev,be32_to_cpu(p->seq_num));
+	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
 	if ( is_syncer_block_id(p->block_id)) {
-		drbd_set_in_sync(mdev,sector,blksize);
+		drbd_set_in_sync(mdev, sector, blksize);
 		dec_rs_pending(mdev);
 	} else {
 		spin_lock_irq(&mdev->req_lock);
@@ -3106,15 +3106,15 @@
 		switch (be16_to_cpu(h->command)) {
 		case RSWriteAck:
 			D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-			_req_mod(req,write_acked_by_peer_and_sis,0);
+			_req_mod(req, write_acked_by_peer_and_sis, 0);
 			break;
 		case WriteAck:
 			D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-			_req_mod(req,write_acked_by_peer,0);
+			_req_mod(req, write_acked_by_peer, 0);
 			break;
 		case RecvAck:
 			D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
-			_req_mod(req,recv_acked_by_peer,0);
+			_req_mod(req, recv_acked_by_peer, 0);
 			break;
 		case DiscardAck:
 			D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
@@ -3139,10 +3139,10 @@
 	sector_t sector = be64_to_cpu(p->sector);
 	drbd_request_t *req;
 
-	if (DRBD_ratelimit(5*HZ,5))
+	if (DRBD_ratelimit(5*HZ, 5))
 		WARN("Got NegAck packet. Peer is in troubles?\n");
 
-	update_peer_seq(mdev,be32_to_cpu(p->seq_num));
+	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
 	if (is_syncer_block_id(p->block_id)) {
 		sector_t sector = be64_to_cpu(p->sector);
@@ -3173,7 +3173,7 @@
 	sector_t sector = be64_to_cpu(p->sector);
 
 	spin_lock_irq(&mdev->req_lock);
-	req = _ar_id_to_req(mdev,p->block_id, sector);
+	req = _ar_id_to_req(mdev, p->block_id, sector);
 	if (unlikely(!req)) {
 		spin_unlock_irq(&mdev->req_lock);
 		ERR("Got a corrupt block_id/sector pair(3).\n");
@@ -3183,13 +3183,13 @@
 	/* FIXME explicitly warn if protocol != C */
 
 	ERR("Got NegDReply; Sector %llus, len %u; Fail original request.\n",
-	    (unsigned long long)sector,be32_to_cpu(p->blksize));
+	    (unsigned long long)sector, be32_to_cpu(p->blksize));
 
 	_req_mod(req, neg_acked, 0);
 	spin_unlock_irq(&mdev->req_lock);
 
 // warning LGE "ugly and wrong"
-	drbd_khelper(mdev,"pri-on-incon-degr");
+	drbd_khelper(mdev, "pri-on-incon-degr");
 
 	return TRUE;
 }
@@ -3206,8 +3206,8 @@
 
 	dec_rs_pending(mdev);
 
-	if (inc_local_if_state(mdev,Failed)) {
-		drbd_rs_complete_io(mdev,sector);
+	if (inc_local_if_state(mdev, Failed)) {
+		drbd_rs_complete_io(mdev, sector);
 		drbd_rs_failed_io(mdev, sector, size);
 		dec_local(mdev);
 	}
@@ -3219,7 +3219,7 @@
 {
 	Drbd_BarrierAck_Packet *p = (Drbd_BarrierAck_Packet*)h;
 
-	tl_release(mdev,p->barrier,be32_to_cpu(p->set_size));
+	tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
 	dec_ap_pending(mdev);
 
 	return TRUE;
@@ -3235,7 +3235,7 @@
 	drbd_dev *mdev = thi->mdev;
 	Drbd_Header *h = &mdev->meta.rbuf.head;
 
-	int rv,len;
+	int rv, len;
 	void *buf    = h;
 	int received = 0;
 	int expect   = sizeof(Drbd_Header);
@@ -3252,8 +3252,8 @@
 		[NegAck]    ={ sizeof(Drbd_BlockAck_Packet),  got_NegAck },
 		[NegDReply] ={ sizeof(Drbd_BlockAck_Packet),  got_NegDReply },
 		[NegRSDReply]={sizeof(Drbd_BlockAck_Packet),  got_NegRSDReply},
-		[BarrierAck]={ sizeof(Drbd_BarrierAck_Packet),got_BarrierAck },
-		[StateChgReply]={sizeof(Drbd_RqS_Reply_Packet),got_RqSReply },
+		[BarrierAck]={ sizeof(Drbd_BarrierAck_Packet), got_BarrierAck },
+		[StateChgReply]={sizeof(Drbd_RqS_Reply_Packet), got_RqSReply },
 	};
 
 	sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
@@ -3284,7 +3284,7 @@
 		drbd_tcp_flush(mdev->meta.socket);
 
 		rv = drbd_recv_short(mdev, mdev->meta.socket,
-				     buf,expect-received);
+				     buf, expect-received);
 		clear_bit(SIGNAL_ASENDER, &mdev->flags);
 
 		flush_signals(current);
@@ -3313,7 +3313,7 @@
 				ERR("PingAck did not arrive in time.\n");
 				goto err;
 			}
-			set_bit(SEND_PING,&mdev->flags);
+			set_bit(SEND_PING, &mdev->flags);
 			continue;
 		} else if (rv == -EINTR) {
 			continue;
@@ -3333,14 +3333,14 @@
 			}
 			expect = asender_tbl[cmd].pkt_size;
 			ERR_IF(len != expect-sizeof(Drbd_Header)) {
-				dump_packet(mdev,mdev->meta.socket,1,(void*)h, __FILE__, __LINE__);
+				dump_packet(mdev, mdev->meta.socket, 1, (void*)h, __FILE__, __LINE__);
 				DUMPI(expect);
 			}
 		}
 		if (received == expect) {
 			D_ASSERT(cmd != -1);
-			dump_packet(mdev,mdev->meta.socket,1,(void*)h, __FILE__, __LINE__);
-			if (!asender_tbl[cmd].process(mdev,h)) goto err;
+			dump_packet(mdev, mdev->meta.socket, 1, (void*)h, __FILE__, __LINE__);
+			if (!asender_tbl[cmd].process(mdev, h)) goto err;
 
 			buf      = h;
 			received = 0;
@@ -3353,7 +3353,7 @@
 	err:
 		clear_bit(SIGNAL_ASENDER, &mdev->flags);
 		if (mdev->state.conn >= Connected)
-			drbd_force_state(mdev,NS(conn,NetworkFailure));
+			drbd_force_state(mdev, NS(conn, NetworkFailure));
 	}
 
 	D_ASSERT(mdev->state.conn < Connected);

Modified: branches/drbd-8.0-for-linus/drbd/drbd_req.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_req.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_req.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -61,7 +61,7 @@
 
 //#define VERBOSE_REQUEST_CODE
 #if defined(VERBOSE_REQUEST_CODE) || defined(ENABLE_DYNAMIC_TRACE)
-void _print_req_mod(drbd_request_t *req,drbd_req_event_t what)
+void _print_req_mod(drbd_request_t *req, drbd_req_event_t what)
 {
 	drbd_dev *mdev = req->mdev;
 	const int rw = (req->master_bio == NULL ||
@@ -93,16 +93,16 @@
 }
 
 # ifdef ENABLE_DYNAMIC_TRACE
-#  define print_rq_state(R,T) MTRACE(TraceTypeRq,TraceLvlMetrics,_print_rq_state(R,T);)
-#  define print_req_mod(T,W)  MTRACE(TraceTypeRq,TraceLvlMetrics,_print_req_mod(T,W);)
+#  define print_rq_state(R, T) MTRACE(TraceTypeRq, TraceLvlMetrics, _print_rq_state(R, T);)
+#  define print_req_mod(T, W)  MTRACE(TraceTypeRq, TraceLvlMetrics, _print_req_mod(T, W);)
 # else
-#  define print_rq_state(R,T) _print_rq_state(R,T)
-#  define print_req_mod(T,W)  _print_req_mod(T,W)
+#  define print_rq_state(R, T) _print_rq_state(R, T)
+#  define print_req_mod(T, W)  _print_req_mod(T, W)
 # endif
 
 #else
-#define print_rq_state(R,T)
-#define print_req_mod(T,W)
+#define print_rq_state(R, T)
+#define print_req_mod(T, W)
 #endif
 
 static void _req_is_done(drbd_dev *mdev, drbd_request_t *req, const int rw)
@@ -124,12 +124,12 @@
 		 * Other places where we set out-of-sync:
 		 * READ with local io-error */
 		if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) {
-			drbd_set_out_of_sync(mdev,req->sector,req->size);
+			drbd_set_out_of_sync(mdev, req->sector, req->size);
 		}
 
 		if ( (s & RQ_NET_OK) && (s & RQ_LOCAL_OK) &&
 		    (s & RQ_NET_SIS) ) {
-			drbd_set_in_sync(mdev,req->sector,req->size);
+			drbd_set_in_sync(mdev, req->sector, req->size);
 		}
 
 		/* one might be tempted to move the drbd_al_complete_io
@@ -143,7 +143,7 @@
 		 * we would forget to resync the corresponding extent.
 		 */
 		if (s & RQ_LOCAL_MASK) {
-			if (inc_local_if_state(mdev,Failed)) {
+			if (inc_local_if_state(mdev, Failed)) {
 				drbd_al_complete_io(mdev, req->sector);
 				dec_local(mdev);
 			} else {
@@ -193,7 +193,7 @@
 			/* before we can signal completion to the upper layers,
 			 * we may need to close the current epoch */
 			if (req->epoch == mdev->newest_barrier->br_number)
-				set_bit(ISSUE_BARRIER,&mdev->flags);
+				set_bit(ISSUE_BARRIER, &mdev->flags);
 
 			/* we need to do the conflict detection stuff,
 			 * if we have the ee_hash (two_primaries) and
@@ -206,7 +206,7 @@
 				 * there must be no conflicting requests, since
 				 * they must have been failed on the spot */
 #define OVERLAPS overlaps(sector, size, i->sector, i->size)
-				slot = tl_hash_slot(mdev,sector);
+				slot = tl_hash_slot(mdev, sector);
 				hlist_for_each_entry(i, n, slot, colision) {
 					if (OVERLAPS) {
 						ALERT("LOGIC BUG: completed: %p %llus +%u; other: %p %llus +%u\n",
@@ -229,7 +229,7 @@
 				 * we just have to do a wake_up.  */
 #undef OVERLAPS
 #define OVERLAPS overlaps(sector, size, e->sector, e->size)
-				slot = ee_hash_slot(mdev,req->sector);
+				slot = ee_hash_slot(mdev, req->sector);
 				hlist_for_each_entry(e, n, slot, colision) {
 					if (OVERLAPS) {
 						wake_up(&mdev->misc_wait);
@@ -242,7 +242,7 @@
 
 static void _complete_master_bio(drbd_dev *mdev, drbd_request_t *req, int error)
 {
-	dump_bio(mdev,req->master_bio,1);
+	dump_bio(mdev, req->master_bio, 1);
 	bio_endio(req->master_bio, req->master_bio->bi_size, error);
 	req->master_bio = NULL;
 	dec_ap_bio(mdev);
@@ -299,7 +299,7 @@
 
 		if (rw == WRITE) {
 			/* for writes we need to do some extra housekeeping */
-			_about_to_complete_local_write(mdev,req);
+			_about_to_complete_local_write(mdev, req);
 		}
 
 		/* FIXME not yet implemented...
@@ -311,7 +311,7 @@
 		 * then again, if it is a READ, it is not in the TL at all.
 		 * is it still leagal to complete a READ during freeze? */
 
-		_complete_master_bio(mdev,req,
+		_complete_master_bio(mdev, req,
 			  ok ? 0 : ( error ? error : -EIO ) );
 	} else {
 		/* only WRITE requests can end up here without a master_bio */
@@ -323,7 +323,7 @@
 		 * or protocol C WriteAck,
 		 * or protocol A or B BarrierAck,
 		 * or killed from the transfer log due to connection loss. */
-		_req_is_done(mdev,req,rw);
+		_req_is_done(mdev, req, rw);
 	}
 	/* else: network part and not DONE yet. that is
 	 * protocol A or B, barrier ack still pending... */
@@ -376,7 +376,7 @@
 	BUG_ON(mdev->tl_hash == NULL);
 
 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
-	slot = tl_hash_slot(mdev,sector);
+	slot = tl_hash_slot(mdev, sector);
 	hlist_for_each_entry(i, n, slot, colision) {
 		if (OVERLAPS) {
 			ALERT("%s[%u] Concurrent local write detected!"
@@ -393,7 +393,7 @@
 		BUG_ON(mdev->ee_hash == NULL);
 #undef OVERLAPS
 #define OVERLAPS overlaps(e->sector, e->size, sector, size)
-		slot = ee_hash_slot(mdev,sector);
+		slot = ee_hash_slot(mdev, sector);
 		hlist_for_each_entry(e, n, slot, colision) {
 			if (OVERLAPS) {
 				ALERT("%s[%u] Concurrent remote write detected!"
@@ -439,10 +439,10 @@
 	MUST_HOLD(&mdev->req_lock);
 
 	if (error && ( bio_rw(req->master_bio) != READA ) ) {
-		ERR("got an _req_mod() errno of %d\n",error);
+		ERR("got an _req_mod() errno of %d\n", error);
 	}
 
-	print_req_mod(req,what);
+	print_req_mod(req, what);
 
 	switch(what) {
 	default:
@@ -484,7 +484,7 @@
 		req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
 		req->rq_state &= ~RQ_LOCAL_PENDING;
 
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		dec_local(mdev);
 		break;
 
@@ -498,14 +498,14 @@
 		      (unsigned long long)req->sector, req->size);
 		/* and now: check how to handle local io error.
 		 * FIXME see comment below in read_completed_with_error */
-		__drbd_chk_io_error(mdev,FALSE);
-		_req_may_be_done(req,error);
+		__drbd_chk_io_error(mdev, FALSE);
+		_req_may_be_done(req, error);
 		dec_local(mdev);
 		break;
 
 	case read_completed_with_error:
 		if (bio_rw(req->master_bio) != READA) {
-			drbd_set_out_of_sync(mdev,req->sector,req->size);
+			drbd_set_out_of_sync(mdev, req->sector, req->size);
 		}
 		req->rq_state |= RQ_LOCAL_COMPLETED;
 		req->rq_state &= ~RQ_LOCAL_PENDING;
@@ -515,7 +515,7 @@
 		dec_local(mdev);
 		if (bio_rw(req->master_bio) == READA) {
 			/* it is legal to fail READA */
-			_req_may_be_done(req,error);
+			_req_may_be_done(req, error);
 			break;
 		}
 		/* else */
@@ -535,7 +535,7 @@
 		 * private bio then, and round the offset and size so
 		 * we get back enough data to be able to clear the bits again.
 		 */
-		__drbd_chk_io_error(mdev,FALSE);
+		__drbd_chk_io_error(mdev, FALSE);
 		/* fall through: _req_mod(req,queue_for_net_read); */
 
 	case queue_for_net_read:
@@ -548,9 +548,9 @@
 
 		/* so we can verify the handle in the answer packet
 		 * corresponding hlist_del is in _req_may_be_done() */
-		hlist_add_head(&req->colision, ar_hash_slot(mdev,req->sector));
+		hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector));
 
-		set_bit(UNPLUG_REMOTE,&mdev->flags); /* why? */
+		set_bit(UNPLUG_REMOTE, &mdev->flags); /* why? */
 
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 		req->rq_state |= RQ_NET_QUEUED;
@@ -564,7 +564,7 @@
 		/* assert something? */
 		/* from drbd_make_request_common only */
 
-		hlist_add_head(&req->colision,tl_hash_slot(mdev,req->sector));
+		hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector));
 		/* corresponding hlist_del is in _req_may_be_done() */
 
 		/* NOTE
@@ -585,12 +585,12 @@
 		D_ASSERT(test_bit(ISSUE_BARRIER, &mdev->flags) == 0);
 
 		req->epoch = mdev->newest_barrier->br_number;
-		list_add_tail(&req->tl_requests,&mdev->newest_barrier->requests);
+		list_add_tail(&req->tl_requests, &mdev->newest_barrier->requests);
 
 		/* mark the current epoch as closed,
 		 * in case it outgrew the limit */
 		if (++mdev->newest_barrier->n_req >= mdev->net_conf->max_epoch_size)
-			set_bit(ISSUE_BARRIER,&mdev->flags);
+			set_bit(ISSUE_BARRIER, &mdev->flags);
 
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 		req->rq_state |= RQ_NET_QUEUED;
@@ -610,7 +610,7 @@
 		req->rq_state &= ~RQ_NET_QUEUED;
 		/* if we did it right, tl_clear should be scheduled only after this,
 		 * so this should not be necessary! */
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		break;
 
 	case handed_over_to_network:
@@ -641,7 +641,7 @@
 		 * "completed_ok" events came in, once we return from
 		 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
 		 * whether it is done already, and end it.  */
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		break;
 
 	case connection_lost_while_pending:
@@ -654,7 +654,7 @@
 		 * it will be canceled soon.
 		 * FIXME we should change the code so this can not happen. */
 		if (!(req->rq_state & RQ_NET_QUEUED))
-			_req_may_be_done(req,error);
+			_req_may_be_done(req, error);
 		break;
 
 	case write_acked_by_peer_and_sis:
@@ -674,7 +674,7 @@
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 		dec_ap_pending(mdev);
 		req->rq_state &= ~RQ_NET_PENDING;
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		break;
 
 	case neg_acked:
@@ -683,7 +683,7 @@
 		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
 		/* FIXME THINK! is it DONE now, or is it not? */
 		req->rq_state |= RQ_NET_DONE;
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		/* else: done by handed_over_to_network */
 		break;
 
@@ -699,7 +699,7 @@
 		}
 		D_ASSERT(req->rq_state & RQ_NET_SENT);
 		req->rq_state |= RQ_NET_DONE;
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		break;
 
 	case data_received:
@@ -707,7 +707,7 @@
 		dec_ap_pending(mdev);
 		req->rq_state &= ~RQ_NET_PENDING;
 		req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
-		_req_may_be_done(req,error);
+		_req_may_be_done(req, error);
 		break;
 	};
 }
@@ -721,7 +721,7 @@
  */
 STATIC int drbd_may_do_local_read(drbd_dev *mdev, sector_t sector, int size)
 {
-	unsigned long sbnr,ebnr,bnr;
+	unsigned long sbnr, ebnr, bnr;
 	sector_t esector, nr_sectors;
 
 	if (mdev->state.disk == UpToDate) return 1;
@@ -738,7 +738,7 @@
 	ebnr = BM_SECT_TO_BIT(esector);
 
 	for (bnr = sbnr; bnr <= ebnr; bnr++) {
-		if (drbd_bm_test_bit(mdev,bnr)) return 0;
+		if (drbd_bm_test_bit(mdev, bnr)) return 0;
 	}
 	return 1;
 }
@@ -773,7 +773,7 @@
 	/* allocate outside of all locks; get a "reference count" (ap_bio_cnt)
 	 * to avoid races with the disconnect/reconnect code.  */
 	inc_ap_bio(mdev);
-	req = drbd_req_new(mdev,bio);
+	req = drbd_req_new(mdev, bio);
 	if (!req) {
 		dec_ap_bio(mdev);
 		/* only pass the error to the upper layers.
@@ -783,7 +783,7 @@
 		return 0;
 	}
 
-	dump_bio(mdev,bio,0);
+	dump_bio(mdev, bio, 0);
 
 	local = inc_local(mdev);
 	if (!local) {
@@ -795,7 +795,7 @@
 	} else {
 		/* READ || READA */
 		if (local) {
-			if (!drbd_may_do_local_read(mdev,sector,size)) {
+			if (!drbd_may_do_local_read(mdev, sector, size)) {
 				/* we could kick the syncer to
 				 * sync this extent asap, wait for
 				 * it, then continue locally.
@@ -867,10 +867,10 @@
 	 * if we lost that race, we retry.  */
 	if (rw == WRITE && remote &&
 	    mdev->unused_spare_barrier == NULL &&
-	    test_bit(ISSUE_BARRIER,&mdev->flags))
+	    test_bit(ISSUE_BARRIER, &mdev->flags))
 	{
   allocate_barrier:
-		b = kmalloc(sizeof(struct drbd_barrier),GFP_NOIO);
+		b = kmalloc(sizeof(struct drbd_barrier), GFP_NOIO);
 		if (!b) {
 			ERR("Failed to alloc barrier.");
 			err = -ENOMEM;
@@ -902,7 +902,7 @@
 	}
 	if (rw == WRITE && remote &&
 	    mdev->unused_spare_barrier == NULL &&
-	    test_bit(ISSUE_BARRIER,&mdev->flags)) {
+	    test_bit(ISSUE_BARRIER, &mdev->flags)) {
 		/* someone closed the current epoch
 		 * while we were grabbing the spinlock */
 		spin_unlock_irq(&mdev->req_lock);
@@ -921,9 +921,9 @@
 	 * make sure that, if this is a write request and it triggered a
 	 * barrier packet, this request is queued within the same spinlock. */
 	if (remote && mdev->unused_spare_barrier &&
-            test_and_clear_bit(ISSUE_BARRIER,&mdev->flags)) {
+            test_and_clear_bit(ISSUE_BARRIER, &mdev->flags)) {
 		struct drbd_barrier *b = mdev->unused_spare_barrier;
-		b = _tl_add_barrier(mdev,b);
+		b = _tl_add_barrier(mdev, b);
 		mdev->unused_spare_barrier = NULL;
 		b->w.cb =  w_send_barrier;
 		/* inc_ap_pending done here, so we won't
@@ -934,7 +934,7 @@
 		drbd_queue_work(&mdev->data.work, &b->w);
 	} else {
 		D_ASSERT(!(remote && rw == WRITE &&
-			   test_bit(ISSUE_BARRIER,&mdev->flags)));
+			   test_bit(ISSUE_BARRIER, &mdev->flags)));
 	}
 
 	/* NOTE
@@ -973,7 +973,7 @@
 			local = 0;
 		}
 		if (remote) dec_ap_pending(mdev);
-		dump_bio(mdev,req->master_bio,1);
+		dump_bio(mdev, req->master_bio, 1);
 		/* THINK: do we want to fail it (-EIO), or pretend success? */
 		bio_endio(req->master_bio, req->master_bio->bi_size, 0);
 		req->master_bio = NULL;
@@ -989,8 +989,8 @@
 		 * or READ, and no local disk,
 		 * or READ, but not in sync.
 		 */
-		if (rw == WRITE) _req_mod(req,queue_for_net_write, 0);
-		else		 _req_mod(req,queue_for_net_read, 0);
+		if (rw == WRITE) _req_mod(req, queue_for_net_write, 0);
+		else		 _req_mod(req, queue_for_net_read, 0);
 	}
 	spin_unlock_irq(&mdev->req_lock);
 	if (b) kfree(b); /* if someone else has beaten us to it... */
@@ -1035,7 +1035,7 @@
 
 	if (mdev->state.role != Primary &&
 		( !allow_oos || is_write) ) {
-		if (DRBD_ratelimit(5*HZ,5)) {
+		if (DRBD_ratelimit(5*HZ, 5)) {
 			ERR("Process %s[%u] tried to %s; since we are not in Primary state, we cannot allow this\n",
 			    current->comm, current->pid, is_write ? "WRITE" : "READ");
 		}
@@ -1053,7 +1053,7 @@
 	 */
 	if ( mdev->state.disk < UpToDate &&
 	     mdev->state.conn < Connected) {
-		if (DRBD_ratelimit(5*HZ,5)) {
+		if (DRBD_ratelimit(5*HZ, 5)) {
 			ERR("Sorry, I have no access to good data anymore.\n");
 		}
 		/*
@@ -1067,7 +1067,7 @@
 
 int drbd_make_request_26(request_queue_t *q, struct bio *bio)
 {
-	unsigned int s_enr,e_enr;
+	unsigned int s_enr, e_enr;
 	struct Drbd_Conf* mdev = (drbd_dev*) q->queuedata;
 
 	if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
@@ -1118,14 +1118,14 @@
 		const int mask = sps -1;
 		const sector_t first_sectors = sps - (sect & mask);
 		bp = bio_split(bio, bio_split_pool, first_sectors);
-		drbd_make_request_26(q,&bp->bio1);
-		drbd_make_request_26(q,&bp->bio2);
+		drbd_make_request_26(q, &bp->bio1);
+		drbd_make_request_26(q, &bp->bio2);
 		bio_pair_release(bp);
 		return 0;
 	}}
 
-	return drbd_make_request_common(mdev,bio_rw(bio),bio->bi_size,
-					bio->bi_sector,bio);
+	return drbd_make_request_common(mdev, bio_rw(bio), bio->bi_size,
+					bio->bi_sector, bio);
 }
 
 /* This is called by bio_add_page().  With this function we reduce
@@ -1160,8 +1160,8 @@
 	} else if (limit && inc_local(mdev)) {
 		request_queue_t * const b = mdev->bc->backing_bdev->bd_disk->queue;
 		if (b->merge_bvec_fn && mdev->bc->dc.use_bmbv) {
-			backing_limit = b->merge_bvec_fn(b,bio,bvec);
-			limit = min(limit,backing_limit);
+			backing_limit = b->merge_bvec_fn(b, bio, bvec);
+			limit = min(limit, backing_limit);
 		}
 		dec_local(mdev);
 	}

Modified: branches/drbd-8.0-for-linus/drbd/drbd_req.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_req.h	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_req.h	2007-07-24 11:38:05 UTC (rev 2985)
@@ -223,9 +223,9 @@
 
 /* when we receive the answer for a read request,
  * verify that we actually know about it */
-static inline drbd_request_t* _ack_id_to_req(drbd_dev *mdev,u64 id, sector_t sector)
+static inline drbd_request_t* _ack_id_to_req(drbd_dev *mdev, u64 id, sector_t sector)
 {
-	struct hlist_head *slot = tl_hash_slot(mdev,sector);
+	struct hlist_head *slot = tl_hash_slot(mdev, sector);
 	struct hlist_node *n;
 	drbd_request_t * req;
 
@@ -255,9 +255,9 @@
 
 /* when we receive the answer for a read request,
  * verify that we actually know about it */
-static inline drbd_request_t* _ar_id_to_req(drbd_dev *mdev,u64 id, sector_t sector)
+static inline drbd_request_t* _ar_id_to_req(drbd_dev *mdev, u64 id, sector_t sector)
 {
-	struct hlist_head *slot = ar_hash_slot(mdev,sector);
+	struct hlist_head *slot = ar_hash_slot(mdev, sector);
 	struct hlist_node *n;
 	drbd_request_t * req;
 
@@ -296,7 +296,7 @@
 
 static inline void drbd_req_free(drbd_request_t *req)
 {
-	mempool_free(req,drbd_request_mempool);
+	mempool_free(req, drbd_request_mempool);
 }
 
 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
@@ -314,7 +314,7 @@
 {
 	drbd_dev *mdev = req->mdev;
 	spin_lock_irq(&mdev->req_lock);
-	_req_mod(req,what,error);
+	_req_mod(req, what, error);
 	spin_unlock_irq(&mdev->req_lock);
 }
 #endif

Modified: branches/drbd-8.0-for-linus/drbd/drbd_worker.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_worker.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_worker.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -99,19 +99,19 @@
 
 	D_ASSERT(e->block_id != ID_VACANT);
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	mdev->read_cnt += e->size >> 9;
 	list_del(&e->w.list);
 	if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait);
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	drbd_chk_io_error(mdev,error,FALSE);
-	drbd_queue_work(&mdev->data.work,&e->w);
+	drbd_chk_io_error(mdev, error, FALSE);
+	drbd_queue_work(&mdev->data.work, &e->w);
 	dec_local(mdev);
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("Moved EE (READ) to worker sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)e->sector,e->size,e);
+		    (unsigned long long)e->sector, e->size, e);
 	       );
 	return 0;
 }
@@ -145,7 +145,7 @@
 
 	D_ASSERT(e->block_id != ID_VACANT);
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	mdev->writ_cnt += e->size >> 9;
 	is_syncer_req = is_syncer_block_id(e->block_id);
 
@@ -157,11 +157,11 @@
 	do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
 
 	list_del(&e->w.list); /* has been on active_ee or sync_ee */
-	list_add_tail(&e->w.list,&mdev->done_ee);
+	list_add_tail(&e->w.list, &mdev->done_ee);
 
-	MTRACE(TraceTypeEE,TraceLvlAll,
+	MTRACE(TraceTypeEE, TraceLvlAll,
 	       INFO("Moved EE (WRITE) to done_ee sec=%llus size=%u ee=%p\n",
-		    (unsigned long long)e->sector,e->size,e);
+		    (unsigned long long)e->sector, e->size, e);
 	       );
 
 	/* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
@@ -175,14 +175,14 @@
 		? list_empty(&mdev->sync_ee)
 		: list_empty(&mdev->active_ee);
 
-	if (error) __drbd_chk_io_error(mdev,FALSE);
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	if (error) __drbd_chk_io_error(mdev, FALSE);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
-	if (is_syncer_req) drbd_rs_complete_io(mdev,e_sector);
+	if (is_syncer_req) drbd_rs_complete_io(mdev, e_sector);
 
 	if (do_wake) wake_up(&mdev->ee_wait);
 
-	if (do_al_complete_io) drbd_al_complete_io(mdev,e_sector);
+	if (do_al_complete_io) drbd_al_complete_io(mdev, e_sector);
 
 	wake_asender(mdev);
 	dec_local(mdev);
@@ -216,13 +216,13 @@
 	         ? write_completed_with_error
 	         : read_completed_with_error
 	       : completed_ok;
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 	_req_mod(req, what, error);
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 	return 0;
 }
 
-int w_io_error(drbd_dev* mdev, struct drbd_work* w,int cancel)
+int w_io_error(drbd_dev* mdev, struct drbd_work* w, int cancel)
 {
 	drbd_request_t *req = (drbd_request_t*)w;
 	int ok;
@@ -247,7 +247,7 @@
 	return ok;
 }
 
-int w_read_retry_remote(drbd_dev* mdev, struct drbd_work* w,int cancel)
+int w_read_retry_remote(drbd_dev* mdev, struct drbd_work* w, int cancel)
 {
 	drbd_request_t *req = (drbd_request_t*)w;
 
@@ -257,7 +257,7 @@
 	     mdev->state.pdsk <= Inconsistent ) {
 		_req_mod(req, send_canceled, 0); /* FIXME freeze? ... */
 		spin_unlock_irq(&mdev->req_lock);
-		drbd_khelper(mdev,"pri-on-incon-degr"); /* FIXME REALLY? */
+		drbd_khelper(mdev, "pri-on-incon-degr"); /* FIXME REALLY? */
 		ALERT("WE ARE LOST. Local IO failure, no peer.\n");
 		return 1;
 	}
@@ -266,8 +266,8 @@
 	/* FIXME this is ugly. we should not detach for read io-error,
 	 * but try to WRITE the DataReply to the failed location,
 	 * to give the disk the chance to relocate that block */
-	drbd_io_error(mdev,FALSE); /* tries to schedule a detach and notifies peer */
-	return w_send_read_req(mdev,w,0);
+	drbd_io_error(mdev, FALSE); /* tries to schedule a detach and notifies peer */
+	return w_send_read_req(mdev, w, 0);
 }
 
 int w_resync_inactive(drbd_dev *mdev, struct drbd_work *w, int cancel)
@@ -283,9 +283,9 @@
 	drbd_dev* mdev = (drbd_dev*) data;
 	int queue;
 
-	spin_lock_irqsave(&mdev->req_lock,flags);
+	spin_lock_irqsave(&mdev->req_lock, flags);
 
-	if (likely(!test_and_clear_bit(STOP_SYNC_TIMER,&mdev->flags))) {
+	if (likely(!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))) {
 		queue=1;
 		mdev->resync_work.cb = w_make_resync_request;
 	} else {
@@ -293,23 +293,23 @@
 		mdev->resync_work.cb = w_resync_inactive;
 	}
 
-	spin_unlock_irqrestore(&mdev->req_lock,flags);
+	spin_unlock_irqrestore(&mdev->req_lock, flags);
 
 	/* harmless race: list_empty outside data.work.q_lock */
 	if (list_empty(&mdev->resync_work.list) && queue) {
-		drbd_queue_work(&mdev->data.work,&mdev->resync_work);
+		drbd_queue_work(&mdev->data.work, &mdev->resync_work);
 	}
 }
 
 #define SLEEP_TIME (HZ/10)
 
-int w_make_resync_request(drbd_dev* mdev, struct drbd_work* w,int cancel)
+int w_make_resync_request(drbd_dev* mdev, struct drbd_work* w, int cancel)
 {
 	unsigned long bit;
 	sector_t sector;
 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
 	int max_segment_size = mdev->rq_queue->max_segment_size;
-	int number,i,size;
+	int number, i, size;
 	int align;
 
 	PARANOIA_BUG_ON(w != &mdev->resync_work);
@@ -361,13 +361,13 @@
 		sector = BM_BIT_TO_SECT(bit);
 
 		if (drbd_try_rs_begin_io(mdev, sector)) {
-			drbd_bm_set_find(mdev,bit);
+			drbd_bm_set_find(mdev, bit);
 			goto requeue;
 		}
 
-		if (unlikely(drbd_bm_test_bit(mdev,bit) == 0 )) {
+		if (unlikely(drbd_bm_test_bit(mdev, bit) == 0 )) {
 		      //INFO("Block got synced while in drbd_rs_begin_io()\n");
-			drbd_rs_complete_io(mdev,sector);
+			drbd_rs_complete_io(mdev, sector);
 			goto next_sector;
 		}
 
@@ -401,7 +401,7 @@
 			 * obscure reason; ( b == 0 ) would get the out-of-band
 			 * only accidentally right because of the "oddly sized"
 			 * adjustment below */
-			if ( drbd_bm_test_bit(mdev,bit+1) != 1 )
+			if ( drbd_bm_test_bit(mdev, bit+1) != 1 )
 				break;
 			bit++;
 			size += BM_BLOCK_SIZE;
@@ -411,14 +411,14 @@
 		/* if we merged some,
 		 * reset the offset to start the next drbd_bm_find_next from */
 		if (size > BM_BLOCK_SIZE)
-			drbd_bm_set_find(mdev,bit+1);
+			drbd_bm_set_find(mdev, bit+1);
 #endif
 
 		/* adjust very last sectors, in case we are oddly sized */
 		if (sector + (size>>9) > capacity) size = (capacity-sector)<<9;
 		inc_rs_pending(mdev);
-		if (!drbd_send_drequest(mdev,RSDataRequest,
-				       sector,size,ID_SYNCER)) {
+		if (!drbd_send_drequest(mdev, RSDataRequest,
+				       sector, size, ID_SYNCER)) {
 			ERR("drbd_send_drequest() failed, aborting...\n");
 			dec_rs_pending(mdev);
 			dec_local(mdev);
@@ -457,7 +457,7 @@
 
 int drbd_resync_finished(drbd_dev* mdev)
 {
-	unsigned long db,dt,dbdt;
+	unsigned long db, dt, dbdt;
 	int dstate, pdstate;
 	struct drbd_work *w;
 
@@ -476,7 +476,7 @@
 		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
 		if (w) {
 			w->cb = w_resync_finished;
-			drbd_queue_work(&mdev->data.work,w);
+			drbd_queue_work(&mdev->data.work, w);
 			return 1;
 		}
 		ERR("Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -493,7 +493,7 @@
 	D_ASSERT((drbd_bm_total_weight(mdev)-mdev->rs_failed) == 0);
 
 	if (mdev->rs_failed) {
-		INFO("            %lu failed blocks\n",mdev->rs_failed);
+		INFO("            %lu failed blocks\n", mdev->rs_failed);
 
 		if (mdev->state.conn == SyncTarget ||
 		    mdev->state.conn == PausedSyncT) {
@@ -511,16 +511,16 @@
 			if (mdev->p_uuid) {
 				int i;
 				for ( i=Bitmap ; i<=History_end ; i++ ) {
-					_drbd_uuid_set(mdev,i,mdev->p_uuid[i]);
+					_drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
 				}
-				drbd_uuid_set(mdev,Bitmap,mdev->bc->md.uuid[Current]);
-				_drbd_uuid_set(mdev,Current,mdev->p_uuid[Current]);
+				drbd_uuid_set(mdev, Bitmap, mdev->bc->md.uuid[Current]);
+				_drbd_uuid_set(mdev, Current, mdev->p_uuid[Current]);
 			} else {
 				ERR("mdev->p_uuid is NULL! BUG\n");
 			}
 		}
 
-		drbd_uuid_set_bm(mdev,0UL);
+		drbd_uuid_set_bm(mdev, 0UL);
 
 		if (mdev->p_uuid) {
 			// Now the two UUID sets are equal, update what we
@@ -536,16 +536,16 @@
 	mdev->rs_failed = 0;
 	mdev->rs_paused = 0;
 
-	if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC,&mdev->flags)) {
+	if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
 		WARN("Writing the whole bitmap, due to failed kmalloc\n");
 		drbd_bm_write(mdev);
 	}
 
 	drbd_bm_recount_bits(mdev);
 
-	drbd_request_state(mdev,NS3(conn,Connected,
-				    disk,dstate,
-				    pdsk,pdstate));
+	drbd_request_state(mdev, NS3(conn, Connected,
+				    disk, dstate,
+				    pdsk, pdstate));
 
 	drbd_md_sync(mdev);
 
@@ -561,7 +561,7 @@
 	int ok;
 
 	if (unlikely(cancel)) {
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 		dec_unacked(mdev);
 		return 1;
 	}
@@ -569,16 +569,16 @@
 	if (likely(drbd_bio_uptodate(e->private_bio))) {
 		ok=drbd_send_block(mdev, DataReply, e);
 	} else {
-		if (DRBD_ratelimit(5*HZ,5))
+		if (DRBD_ratelimit(5*HZ, 5))
 			ERR("Sending NegDReply. sector=%llus.\n",
 			    (unsigned long long)e->sector);
 
-		ok=drbd_send_ack(mdev,NegDReply,e);
+		ok=drbd_send_ack(mdev, NegDReply, e);
 
 		/* FIXME we should not detach for read io-errors, in particular
 		 * not now: when the peer asked us for our data, we are likely
 		 * the only remaining disk... */
-		drbd_io_error(mdev,FALSE);
+		drbd_io_error(mdev, FALSE);
 	}
 
 	dec_unacked(mdev);
@@ -586,9 +586,9 @@
 	spin_lock_irq(&mdev->req_lock);
 	if ( drbd_bio_has_active_page(e->private_bio) ) {
 		/* This might happen if sendpage() has not finished */
-		list_add_tail(&e->w.list,&mdev->net_ee);
+		list_add_tail(&e->w.list, &mdev->net_ee);
 	} else {
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 	}
 	spin_unlock_irq(&mdev->req_lock);
 
@@ -605,13 +605,13 @@
 	int ok;
 
 	if (unlikely(cancel)) {
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 		dec_unacked(mdev);
 		return 1;
 	}
 
-	if (inc_local_if_state(mdev,Failed)) {
-		drbd_rs_complete_io(mdev,e->sector);
+	if (inc_local_if_state(mdev, Failed)) {
+		drbd_rs_complete_io(mdev, e->sector);
 		dec_local(mdev);
 	}
 
@@ -620,16 +620,16 @@
 			inc_rs_pending(mdev);
 			ok=drbd_send_block(mdev, RSDataReply, e);
 		} else {
-			if (DRBD_ratelimit(5*HZ,5))
+			if (DRBD_ratelimit(5*HZ, 5))
 				ERR("Not sending RSDataReply, partner DISKLESS!\n");
 			ok=1;
 		}
 	} else {
-		if (DRBD_ratelimit(5*HZ,5))
+		if (DRBD_ratelimit(5*HZ, 5))
 			ERR("Sending NegRSDReply. sector %llus.\n",
 			    (unsigned long long)e->sector);
 
-		ok=drbd_send_ack(mdev,NegRSDReply,e);
+		ok=drbd_send_ack(mdev, NegRSDReply, e);
 
 		drbd_io_error(mdev, FALSE);
 
@@ -642,9 +642,9 @@
 	spin_lock_irq(&mdev->req_lock);
 	if ( drbd_bio_has_active_page(e->private_bio) ) {
 		/* This might happen if sendpage() has not finished */
-		list_add_tail(&e->w.list,&mdev->net_ee);
+		list_add_tail(&e->w.list, &mdev->net_ee);
 	} else {
-		drbd_free_ee(mdev,e);
+		drbd_free_ee(mdev, e);
 	}
 	spin_unlock_irq(&mdev->req_lock);
 
@@ -654,7 +654,7 @@
 
 int w_prev_work_done(drbd_dev *mdev, struct drbd_work *w, int cancel)
 {
-	clear_bit(WORK_PENDING,&mdev->flags);
+	clear_bit(WORK_PENDING, &mdev->flags);
 	wake_up(&mdev->misc_wait);
 	return 1;
 }
@@ -683,7 +683,7 @@
 	/* inc_ap_pending was done where this was queued.
 	 * dec_ap_pending will be done in got_BarrierAck
 	 * or (on connection loss) in w_clear_epoch.  */
-	ok = _drbd_send_cmd(mdev,mdev->data.socket,Barrier,(Drbd_Header*)p,sizeof(*p),0);
+	ok = _drbd_send_cmd(mdev, mdev->data.socket, Barrier, (Drbd_Header*)p, sizeof(*p), 0);
 	drbd_put_data_sock(mdev);
 
 	return ok;
@@ -692,7 +692,7 @@
 int w_send_write_hint(drbd_dev *mdev, struct drbd_work *w, int cancel)
 {
 	if (cancel) return 1;
-	return drbd_send_short_cmd(mdev,UnplugRemote);
+	return drbd_send_short_cmd(mdev, UnplugRemote);
 }
 
 /**
@@ -708,8 +708,8 @@
 		return 1;
 	}
 
-	ok = drbd_send_dblock(mdev,req);
-	req_mod(req,ok ? handed_over_to_network : send_failed, 0);
+	ok = drbd_send_dblock(mdev, req);
+	req_mod(req, ok ? handed_over_to_network : send_failed, 0);
 
 	return ok;
 }
@@ -735,7 +735,7 @@
 	} else {
 		/* ?? we set Timeout or BrokenPipe in drbd_send() */
 		if (mdev->state.conn >= Connected)
-			drbd_force_state(mdev,NS(conn,NetworkFailure));
+			drbd_force_state(mdev, NS(conn, NetworkFailure));
 		/* req_mod(req, send_failed); we should not fail it here,
 		 * we might have to "freeze" on disconnect.
 		 * handled by req_mod(req, connection_lost_while_pending);
@@ -798,7 +798,7 @@
 	for (i=0; i < minor_count; i++) {
 		if ( !(odev = minor_to_mdev(i)) ) continue;
 		if (! _drbd_may_sync_now(odev)) {
-			rv |= ( _drbd_set_state(_NS(odev,aftr_isp,1),
+			rv |= ( _drbd_set_state(_NS(odev, aftr_isp, 1),
 						ChgStateHard|ScheduleAfter)
 				!= SS_NothingToDo ) ;
 		}
@@ -822,7 +822,7 @@
 		if ( !(odev = minor_to_mdev(i)) ) continue;
 		if (odev->state.aftr_isp) {
 			if (_drbd_may_sync_now(odev)) {
-				rv |= ( _drbd_set_state(_NS(odev,aftr_isp,0),
+				rv |= ( _drbd_set_state(_NS(odev, aftr_isp, 0),
 							ChgStateHard|ScheduleAfter)
 					!= SS_NothingToDo ) ;
 			}
@@ -870,7 +870,7 @@
  */
 void drbd_start_resync(drbd_dev *mdev, drbd_conns_t side)
 {
-	drbd_state_t os,ns;
+	drbd_state_t os, ns;
 	int r=0;
 
 	MTRACE(TraceTypeResync, TraceLvlSummary,
@@ -890,7 +890,7 @@
 
 		get_random_bytes(&uuid, sizeof(u64));
 		drbd_uuid_set(mdev, Bitmap, uuid);
-		drbd_send_sync_uuid(mdev,uuid);
+		drbd_send_sync_uuid(mdev, uuid);
 
 		D_ASSERT(mdev->state.disk == UpToDate);
 	}
@@ -908,7 +908,7 @@
 		ns.pdsk = Inconsistent;
 	}
 
-	r = _drbd_set_state(mdev,ns,ChgStateVerbose);
+	r = _drbd_set_state(mdev, ns, ChgStateVerbose);
 	ns = mdev->state;
 
 	if (r == SS_Success) {
@@ -923,7 +923,7 @@
 	drbd_global_unlock();
 
 	if (r == SS_Success) {
-		after_state_ch(mdev,os,ns,ChgStateVerbose);
+		after_state_ch(mdev, os, ns, ChgStateVerbose);
 
 		INFO("Began resync as %s (will sync %lu KB [%lu bits set]).\n",
 		     conns_to_name(ns.conn),
@@ -936,8 +936,8 @@
 		}
 
 		if (ns.conn == SyncTarget) {
-			D_ASSERT(!test_bit(STOP_SYNC_TIMER,&mdev->flags));
-			mod_timer(&mdev->resync_timer,jiffies);
+			D_ASSERT(!test_bit(STOP_SYNC_TIMER, &mdev->flags));
+			mod_timer(&mdev->resync_timer, jiffies);
 		}
 
 		drbd_md_sync(mdev);
@@ -949,7 +949,7 @@
 	drbd_dev *mdev = thi->mdev;
 	struct drbd_work *w = 0;
 	LIST_HEAD(work_list);
-	int intr=0,i;
+	int intr=0, i;
 
 	sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
 
@@ -997,33 +997,33 @@
 			spin_unlock_irq(&mdev->data.work.q_lock);
 			continue;
 		}
-		w = list_entry(mdev->data.work.q.next,struct drbd_work,list);
+		w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
 		list_del_init(&w->list);
 		spin_unlock_irq(&mdev->data.work.q_lock);
 
-		if (!w->cb(mdev,w, mdev->state.conn < Connected )) {
+		if (!w->cb(mdev, w, mdev->state.conn < Connected )) {
 			//WARN("worker: a callback failed! \n");
 			if (mdev->state.conn >= Connected)
-				drbd_force_state(mdev,NS(conn,NetworkFailure));
+				drbd_force_state(mdev, NS(conn, NetworkFailure));
 		}
 	}
 
 	spin_lock_irq(&mdev->data.work.q_lock);
 	i = 0;
 	while (!list_empty(&mdev->data.work.q)) {
-		list_splice_init(&mdev->data.work.q,&work_list);
+		list_splice_init(&mdev->data.work.q, &work_list);
 		spin_unlock_irq(&mdev->data.work.q_lock);
 
 		while(!list_empty(&work_list)) {
-			w = list_entry(work_list.next, struct drbd_work,list);
+			w = list_entry(work_list.next, struct drbd_work, list);
 			list_del_init(&w->list);
-			w->cb(mdev,w,1);
+			w->cb(mdev, w, 1);
 			i++; /* dead debugging code */
 		}
 
 		spin_lock_irq(&mdev->data.work.q_lock);
 	}
-	sema_init(&mdev->data.work.s,0);
+	sema_init(&mdev->data.work.s, 0);
 	/* DANGEROUS race: if someone did queue his work within the spinlock,
 	 * but up() ed outside the spinlock, we could get an up() on the
 	 * semaphore without corresponding list entry.

Modified: branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h	2007-07-24 11:38:05 UTC (rev 2985)
@@ -41,11 +41,11 @@
 					sector_t size)
 {
 	/* set_capacity(mdev->this_bdev->bd_disk, size); */
-	set_capacity(mdev->vdisk,size);
+	set_capacity(mdev->vdisk, size);
 	mdev->this_bdev->bd_inode->i_size = (loff_t)size << 9;
 }
 
-#define drbd_bio_uptodate(bio) bio_flagged(bio,BIO_UPTODATE)
+#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
 
 #ifdef CONFIG_HIGHMEM
 /*
@@ -122,7 +122,7 @@
 	}
 
 	if (FAULT_ACTIVE(mdev, fault_type))
-		bio_endio(bio,bio->bi_size,-EIO);
+		bio_endio(bio, bio->bi_size, -EIO);
 	else
 		generic_make_request(bio);
 }

Modified: branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h	2007-07-24 11:38:05 UTC (rev 2985)
@@ -27,7 +27,7 @@
 	BIT(		37,	T_MAY_IGNORE,	use_bmbv)
 )
 
-PACKET(detach, 4,)
+PACKET(detach, 4, )
 
 PACKET(net_conf, 5,
 	STRING(		8,	T_MANDATORY,	my_addr,	128)

Modified: branches/drbd-8.0-for-linus/drbd/linux/drbd_tag_magic.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/linux/drbd_tag_magic.h	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/linux/drbd_tag_magic.h	2007-07-24 11:38:05 UTC (rev 2985)
@@ -7,30 +7,35 @@
 // declare packet_type enums
 enum packet_types {
 #define PACKET(name, number, fields) P_ ## name = number,
-#define INTEGER(pn,pr,member)
-#define INT64(pn,pr,member)
-#define BIT(pn,pr,member)
-#define STRING(pn,pr,member,len)
+#define INTEGER(pn, pr, member)
+#define INT64(pn, pr, member)
+#define BIT(pn, pr, member)
+#define STRING(pn, pr, member, len)
 #include "drbd_nl.h"
 	P_nl_after_last_packet,
 };
 
 // These struct are used to deduce the size of the tag lists:
-#define PACKET(name, number ,fields) struct name ## _tag_len_struct { fields };
-#define INTEGER(pn,pr,member) int member; int tag_and_len ## member;
-#define INT64(pn,pr,member) __u64 member; int tag_and_len ## member;
-#define BIT(pn,pr,member)   unsigned char member : 1; int tag_and_len ## member;
-#define STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; \
-				 int tag_and_len ## member;
+#define PACKET(name, number, fields)	\
+	struct name ## _tag_len_struct { fields };
+#define INTEGER(pn, pr, member)		\
+	int member; int tag_and_len ## member;
+#define INT64(pn, pr, member)		\
+	__u64 member; int tag_and_len ## member;
+#define BIT(pn, pr, member)		\
+	unsigned char member : 1; int tag_and_len ## member;
+#define STRING(pn, pr, member, len)	\
+	unsigned char member[len]; int member ## _len; \
+	int tag_and_len ## member;
 #include "linux/drbd_nl.h"
 
 // declate tag-list-sizes
 const int tag_list_sizes[] = {
-#define PACKET(name,number,fields) 2 fields ,
-#define INTEGER(pn,pr,member)     +4+4
-#define INT64(pn,pr,member)       +4+8
-#define BIT(pn,pr,member)         +4+1
-#define STRING(pn,pr,member,len)  +4+len
+#define PACKET(name, number, fields) 2 fields ,
+#define INTEGER(pn, pr, member)      +4+4
+#define INT64(pn, pr, member)        +4+8
+#define BIT(pn, pr, member)          +4+1
+#define STRING(pn, pr, member, len)  +4+len
 #include "drbd_nl.h"
 };
 
@@ -52,10 +57,10 @@
 // declare tag enums
 #define PACKET(name, number, fields) fields
 enum drbd_tags {
-#define INTEGER(pn,pr,member)    T_ ## member = pn | TT_INTEGER | pr ,
-#define INT64(pn,pr,member)      T_ ## member = pn | TT_INT64   | pr ,
-#define BIT(pn,pr,member)        T_ ## member = pn | TT_BIT     | pr ,
-#define STRING(pn,pr,member,len) T_ ## member = pn | TT_STRING  | pr ,
+#define INTEGER(pn, pr, member)     T_ ## member = pn | TT_INTEGER | pr ,
+#define INT64(pn, pr, member)       T_ ## member = pn | TT_INT64   | pr ,
+#define BIT(pn, pr, member)         T_ ## member = pn | TT_BIT     | pr ,
+#define STRING(pn, pr, member, len) T_ ## member = pn | TT_STRING  | pr ,
 #include "drbd_nl.h"
 };
 
@@ -68,10 +73,10 @@
 // declare tag names
 #define PACKET(name, number, fields) fields
 const struct tag tag_descriptions[] = {
-#define INTEGER(pn,pr,member)    [ pn ] = { #member, TT_INTEGER | pr, sizeof(int)   },
-#define INT64(pn,pr,member)      [ pn ] = { #member, TT_INT64   | pr, sizeof(__u64) },
-#define BIT(pn,pr,member)        [ pn ] = { #member, TT_BIT     | pr, sizeof(int)   },
-#define STRING(pn,pr,member,len) [ pn ] = { #member, TT_STRING  | pr, len           },
+#define INTEGER(pn, pr, member)     [ pn ] = { #member, TT_INTEGER | pr, sizeof(int)   },
+#define INT64(pn, pr, member)       [ pn ] = { #member, TT_INT64   | pr, sizeof(__u64) },
+#define BIT(pn, pr, member)         [ pn ] = { #member, TT_BIT     | pr, sizeof(int)   },
+#define STRING(pn, pr, member, len) [ pn ] = { #member, TT_STRING  | pr, len           },
 #include "drbd_nl.h"
 };
 

Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.c	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.c	2007-07-24 11:38:05 UTC (rev 2985)
@@ -34,8 +34,8 @@
 #define STATIC static
 
 // this is developers aid only!
-#define PARANOIA_ENTRY() BUG_ON(test_and_set_bit(__LC_PARANOIA,&lc->flags))
-#define PARANOIA_LEAVE() do { clear_bit(__LC_PARANOIA,&lc->flags); smp_mb__after_clear_bit(); } while (0)
+#define PARANOIA_ENTRY() BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags))
+#define PARANOIA_LEAVE() do { clear_bit(__LC_PARANOIA, &lc->flags); smp_mb__after_clear_bit(); } while (0)
 #define RETURN(x...)     do { PARANOIA_LEAVE(); return x ; } while (0)
 
 /**
@@ -52,7 +52,7 @@
 	int i;
 
 	BUG_ON(!e_count);
-	e_size = max(sizeof(struct lc_element),e_size);
+	e_size = max(sizeof(struct lc_element), e_size);
 	bytes  = e_size+sizeof(struct hlist_head);
 	bytes *= e_count;
 	bytes += sizeof(struct lru_cache);
@@ -68,9 +68,9 @@
 		lc->lc_private       = private_p;
 		lc->name             = name;
 		for(i=0;i<e_count;i++) {
-			e = lc_entry(lc,i);
+			e = lc_entry(lc, i);
 			e->lc_number = LC_FREE;
-			list_add(&e->list,&lc->free);
+			list_add(&e->list, &lc->free);
 			// memset(,0,) did the rest of init for us
 		}
 	}
@@ -94,7 +94,7 @@
 	 * misses include "dirty" count (update from an other thread in progress)
 	 * and "changed", when this in fact lead to an successful update of the cache.
 	 */
-	return seq_printf(seq,"\t%s: used:%u/%u "
+	return seq_printf(seq, "\t%s: used:%u/%u "
 		"hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
 		lc->name, lc->used, lc->nr_elements,
 		lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed);
@@ -133,7 +133,7 @@
 	if (list_empty(&lc->lru)) return 0;
 
 	n=lc->lru.prev;
-	e=list_entry(n, struct lc_element,list);
+	e=list_entry(n, struct lc_element, list);
 
 	list_del(&e->list);
 	hlist_del(&e->colision);
@@ -156,7 +156,7 @@
 	hlist_del_init(&e->colision);
 	e->lc_number = LC_FREE;
 	e->refcnt = 0;
-	list_add(&e->list,&lc->free);
+	list_add(&e->list, &lc->free);
 	RETURN();
 }
 
@@ -168,7 +168,7 @@
 
 	n=lc->free.next;
 	list_del(n);
-	return list_entry(n, struct lc_element,list);
+	return list_entry(n, struct lc_element, list);
 }
 
 STATIC int lc_unused_element_available(struct lru_cache* lc)
@@ -224,7 +224,7 @@
 	if (e) {
 		++lc->hits;
 		if (e->refcnt++ == 0) lc->used++;
-		list_move(&e->list,&lc->in_use); // Not evictable...
+		list_move(&e->list, &lc->in_use); // Not evictable...
 		RETURN(e);
 	}
 
@@ -234,7 +234,7 @@
 	 * the LRU element, we have to wait ...
 	 */
 	if (!lc_unused_element_available(lc)) {
-		__set_bit(__LC_STARVING,&lc->flags);
+		__set_bit(__LC_STARVING, &lc->flags);
 		RETURN(NULL);
 	}
 
@@ -242,7 +242,7 @@
 	 * which then is replaced.
 	 * we need to update the cache; serialize on lc->flags & LC_DIRTY
 	 */
-	if (test_and_set_bit(__LC_DIRTY,&lc->flags)) {
+	if (test_and_set_bit(__LC_DIRTY, &lc->flags)) {
 		++lc->dirty;
 		RETURN(NULL);
 	}
@@ -250,7 +250,7 @@
 	e = lc_get_unused_element(lc);
 	BUG_ON(!e);
 
-	clear_bit(__LC_STARVING,&lc->flags);
+	clear_bit(__LC_STARVING, &lc->flags);
 	BUG_ON(++e->refcnt != 1);
 	lc->used++;
 
@@ -281,7 +281,7 @@
 	if (e) {
 		++lc->hits;
 		if (e->refcnt++ == 0) lc->used++;
-		list_move(&e->list,&lc->in_use); // Not evictable...
+		list_move(&e->list, &lc->in_use); // Not evictable...
 	}
 	RETURN(e);
 }
@@ -292,11 +292,11 @@
 	BUG_ON(e != lc->changing_element);
 	++lc->changed;
 	e->lc_number = lc->new_number;
-	list_add(&e->list,&lc->in_use);
+	list_add(&e->list, &lc->in_use);
 	hlist_add_head( &e->colision, lc->slot + lc_hash_fn(lc, lc->new_number) );
 	lc->changing_element = NULL;
 	lc->new_number = -1;
-	clear_bit(__LC_DIRTY,&lc->flags);
+	clear_bit(__LC_DIRTY, &lc->flags);
 	smp_mb__after_clear_bit();
 	PARANOIA_LEAVE();
 }
@@ -312,9 +312,9 @@
 	BUG_ON(e->refcnt == 0);
 	BUG_ON(e == lc->changing_element);
 	if (--e->refcnt == 0) {
-		list_move(&e->list,&lc->lru); // move it to the front of LRU.
+		list_move(&e->list, &lc->lru); // move it to the front of LRU.
 		lc->used--;
-		clear_bit(__LC_STARVING,&lc->flags);
+		clear_bit(__LC_STARVING, &lc->flags);
 		smp_mb__after_clear_bit();
 	}
 	RETURN(e->refcnt);
@@ -334,11 +334,11 @@
 
 	if (index < 0 || index >= lc->nr_elements) return;
 
-	e = lc_entry(lc,index);
+	e = lc_entry(lc, index);
 	e->lc_number = enr;
 
 	hlist_del_init(&e->colision);
-	hlist_add_head( &e->colision, lc->slot + lc_hash_fn(lc,enr) );
+	hlist_add_head( &e->colision, lc->slot + lc_hash_fn(lc, enr) );
 	list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
 }
 
@@ -353,16 +353,16 @@
 	struct lc_element *e;
 	int i;
 
-	seq_printf(seq,"\tnn: lc_number refcnt %s\n ",utext);
+	seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
 	for(i=0;i<nr_elements;i++) {
-		e = lc_entry(lc,i);
+		e = lc_entry(lc, i);
 		if (e->lc_number == LC_FREE) {
-			seq_printf(seq,"\t%2d: FREE\n",i );
+			seq_printf(seq, "\t%2d: FREE\n", i );
 		} else {
-			seq_printf(seq,"\t%2d: %4u %4u    ", i,
+			seq_printf(seq, "\t%2d: %4u %4u    ", i,
 				   e->lc_number,
 				   e->refcnt );
-			detail(seq,e);
+			detail(seq, e);
 		}
 	}
 }

Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.h	2007-07-24 09:44:10 UTC (rev 2984)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.h	2007-07-24 11:38:05 UTC (rev 2985)
@@ -122,26 +122,26 @@
  */
 static inline int lc_try_lock(struct lru_cache* lc)
 {
-	return !test_and_set_bit(__LC_DIRTY,&lc->flags);
+	return !test_and_set_bit(__LC_DIRTY, &lc->flags);
 }
 
 static inline void lc_unlock(struct lru_cache* lc)
 {
-	clear_bit(__LC_DIRTY,&lc->flags);
+	clear_bit(__LC_DIRTY, &lc->flags);
 	smp_mb__after_clear_bit();
 }
 
 static inline int lc_is_used(struct lru_cache* lc, unsigned int enr)
 {
-	struct lc_element* e = lc_find(lc,enr);
+	struct lc_element* e = lc_find(lc, enr);
 	return (e && e->refcnt);
 }
 
 #define LC_FREE (-1U)
 
 #define lc_e_base(lc)  ((char*) ( (lc)->slot + (lc)->nr_elements ) )
-#define lc_entry(lc,i) ((struct lc_element*) \
+#define lc_entry(lc, i) ((struct lc_element*) \
                        (lc_e_base(lc) + (i)*(lc)->element_size))
-#define lc_index_of(lc,e) (((char*)(e) - lc_e_base(lc))/(lc)->element_size)
+#define lc_index_of(lc, e) (((char*)(e) - lc_e_base(lc))/(lc)->element_size)
 
 #endif



More information about the drbd-cvs mailing list