[DRBD-cvs] svn commit by lars - r2984 - branches/drbd-8.0-for-linus/drbd - a97d57e6fadb7027f8b1ebbb266078c73db58046 if( => if ( dc

drbd-cvs at lists.linbit.com drbd-cvs at lists.linbit.com
Tue Jul 24 11:44:12 CEST 2007


Author: lars
Date: 2007-07-24 11:44:10 +0200 (Tue, 24 Jul 2007)
New Revision: 2984

Modified:
   branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
   branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
   branches/drbd-8.0-for-linus/drbd/drbd_int.h
   branches/drbd-8.0-for-linus/drbd/drbd_main.c
   branches/drbd-8.0-for-linus/drbd/drbd_nl.c
   branches/drbd-8.0-for-linus/drbd/drbd_proc.c
   branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
   branches/drbd-8.0-for-linus/drbd/drbd_req.c
   branches/drbd-8.0-for-linus/drbd/drbd_worker.c
   branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h
   branches/drbd-8.0-for-linus/drbd/lru_cache.c
Log:
a97d57e6fadb7027f8b1ebbb266078c73db58046 if( => if (
dc3d193957478fb03ad8a2a3ec7e08097c88d114 if ( ... ) => if (...)

(last commit got a wrong log message first,
 I "repaired" that with svnadmin setlog, though...)



Modified: branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_actlog.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_actlog.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -46,7 +46,7 @@
 	bio->bi_bdev = bdev->md_bdev;
 	bio->bi_sector = sector;
 	ok = (bio_add_page(bio, page, size, 0) == size);
-	if(!ok) goto out;
+	if (!ok) goto out;
 	init_completion(&event);
 	bio->bi_private = &event;
 	bio->bi_end_io = drbd_md_io_complete;
@@ -87,13 +87,13 @@
 	}
 
 	hardsect = drbd_get_hardsect(bdev->md_bdev);
-	if(hardsect == 0) hardsect = MD_HARDSECT;
+	if (hardsect == 0) hardsect = MD_HARDSECT;
 
 	// in case hardsect != 512 [ s390 only? ]
-	if( hardsect != MD_HARDSECT ) {
-		if(!mdev->md_io_tmpp) {
+	if (hardsect != MD_HARDSECT) {
+		if (!mdev->md_io_tmpp) {
 			struct page *page = alloc_page(GFP_NOIO);
-			if(!page) return 0;
+			if (!page) return 0;
 
 			WARN("Meta data's bdev hardsect = %d != %d\n",
 			     hardsect, MD_HARDSECT);
@@ -145,7 +145,7 @@
 		return 0;
 	}
 
-	if( hardsect != MD_HARDSECT && rw == READ ) {
+	if (hardsect != MD_HARDSECT && rw == READ) {
 		void *p = page_address(mdev->md_io_page);
 		void *hp = page_address(mdev->md_io_tmpp);
 
@@ -194,7 +194,7 @@
 	spin_lock_irq(&mdev->al_lock);
 	bm_ext = (struct bm_extent*) lc_find(mdev->resync,enr/AL_EXT_PER_BM_SECT);
 	if (unlikely(bm_ext!=NULL)) {
-		if(test_bit(BME_NO_WRITES,&bm_ext->flags)) {
+		if (test_bit(BME_NO_WRITES,&bm_ext->flags)) {
 			spin_unlock_irq(&mdev->al_lock);
 			//INFO("Delaying app write until sync read is done\n");
 			return 0;
@@ -241,7 +241,7 @@
 
 		evicted = al_ext->lc_number;
 
-		if(mdev->state.conn < Connected && evicted != LC_FREE ) {
+		if (mdev->state.conn < Connected && evicted != LC_FREE) {
 			drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT );
 		}
 
@@ -286,13 +286,13 @@
 
 	extent = lc_find(mdev->act_log,enr);
 
-	if(!extent) {
+	if (!extent) {
 		spin_unlock_irqrestore(&mdev->al_lock,flags);
 		ERR("al_complete_io() called on inactive extent %u\n",enr);
 		return;
 	}
 
-	if( lc_put(mdev->act_log,extent) == 0 ) {
+	if ( lc_put(mdev->act_log,extent) == 0 ) {
 		wake_up(&mdev->al_wait);
 	}
 
@@ -343,7 +343,7 @@
 		xor_sum ^= LC_FREE;
 	}
 	mdev->al_tr_cycle += AL_EXTENTS_PT;
-	if(mdev->al_tr_cycle >= mdev->act_log->nr_elements) mdev->al_tr_cycle=0;
+	if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) mdev->al_tr_cycle=0;
 
 	buffer->xor_sum = cpu_to_be32(xor_sum);
 
@@ -351,12 +351,12 @@
 // warning LGE "FIXME code missing"
 	sector = mdev->bc->md.md_offset + mdev->bc->md.al_offset + mdev->al_tr_pos;
 
-	if(!drbd_md_sync_page_io(mdev,mdev->bc,sector,WRITE)) {
+	if (!drbd_md_sync_page_io(mdev,mdev->bc,sector,WRITE)) {
 		drbd_chk_io_error(mdev, 1, TRUE);
 		drbd_io_error(mdev, TRUE);
 	}
 
-	if( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT) ) {
+	if ( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT) ) {
 		mdev->al_tr_pos=0;
 	}
 	D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
@@ -386,7 +386,7 @@
 
 	sector = bdev->md.md_offset + bdev->md.al_offset + index;
 
-	if(!drbd_md_sync_page_io(mdev,bdev,sector,READ)) {
+	if (!drbd_md_sync_page_io(mdev,bdev,sector,READ)) {
 		// Dont process error normally as this is done before
 		// disk is atached!
 		return -1;
@@ -427,27 +427,27 @@
 	// Find the valid transaction in the log
 	for(i=0;i<=mx;i++) {
 		rv = drbd_al_read_tr(mdev,bdev,buffer,i);
-		if(rv == 0) continue;
-		if(rv == -1) {
+		if (rv == 0) continue;
+		if (rv == -1) {
 			up(&mdev->md_io_mutex);
 			return 0;
 		}
 		cnr = be32_to_cpu(buffer->tr_number);
 		// INFO("index %d valid tnr=%d\n",i,cnr);
 
-		if(cnr == -1) overflow=1;
+		if (cnr == -1) overflow=1;
 
-		if(cnr < from_tnr && !overflow) {
+		if (cnr < from_tnr && !overflow) {
 			from = i;
 			from_tnr = cnr;
 		}
-		if(cnr > to_tnr) {
+		if (cnr > to_tnr) {
 			to = i;
 			to_tnr = cnr;
 		}
 	}
 
-	if(from == -1 || to == -1) {
+	if (from == -1 || to == -1) {
 		WARN("No usable activity log found.\n");
 
 		up(&mdev->md_io_mutex);
@@ -467,7 +467,7 @@
 
 		rv = drbd_al_read_tr(mdev,bdev,buffer,i);
 		ERR_IF(rv == 0) goto cancel;
-		if(rv == -1) {
+		if (rv == -1) {
 			up(&mdev->md_io_mutex);
 			return 0;
 		}
@@ -484,9 +484,9 @@
 			pos = be32_to_cpu(buffer->updates[j].pos);
 			extent_nr = be32_to_cpu(buffer->updates[j].extent);
 
-			if(extent_nr == LC_FREE) continue;
+			if (extent_nr == LC_FREE) continue;
 
-		       //if(j<3) INFO("T%03d S%03d=E%06d\n",trn,pos,extent_nr);
+		       //if (j<3) INFO("T%03d S%03d=E%06d\n",trn,pos,extent_nr);
 			lc_set(mdev->act_log,extent_nr,pos);
 			active_extents++;
 		}
@@ -495,14 +495,14 @@
 		transactions++;
 
 	cancel:
-		if( i == to) break;
+		if (i == to) break;
 		i++;
-		if( i > mx ) i=0;
+		if (i > mx) i=0;
 	}
 
 	mdev->al_tr_number = to_tnr+1;
 	mdev->al_tr_pos = to;
-	if( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT) ) {
+	if ( ++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements,AL_EXTENTS_PT) ) {
 		mdev->al_tr_pos=0;
 	}
 
@@ -539,14 +539,14 @@
 	}
 
 	drbd_chk_io_error(mdev,error,TRUE);
-	if(error && wc->error == 0) wc->error=error;
+	if (error && wc->error == 0) wc->error=error;
 
 	if (atomic_dec_and_test(&wc->count)) {
 		complete(&wc->io_done);
 	}
 
 	page = bio->bi_io_vec[0].bv_page;
-	if(page) put_page(page);
+	if (page) put_page(page);
 	bio_put(bio);
 	mdev->bm_writ_cnt++;
 	dec_local(mdev);
@@ -572,22 +572,22 @@
 
 	// check if that enr is already covered by an already created bio.
 	while( (bio=bios[i]) ) {
-		if(bio->bi_sector == on_disk_sector) return 0;
+		if (bio->bi_sector == on_disk_sector) return 0;
 		i++;
 	}
 
 	bio = bio_alloc(GFP_KERNEL, 1);
-	if(bio==NULL) return -ENOMEM;
+	if (bio==NULL) return -ENOMEM;
 
 	bio->bi_bdev = mdev->bc->md_bdev;
 	bio->bi_sector = on_disk_sector;
 
 	bios[i] = bio;
 
-	if(*page_offset == PAGE_SIZE) {
+	if (*page_offset == PAGE_SIZE) {
 		np = alloc_page(__GFP_HIGHMEM);
 		/* no memory leak, bio gets cleaned up by caller */
-		if(np == NULL) return -ENOMEM;
+		if (np == NULL) return -ENOMEM;
 		*page = np;
 		*page_offset = 0;
 		allocated_page=1;
@@ -599,12 +599,12 @@
 			 kmap(*page) + *page_offset );
 	kunmap(*page);
 
-	if(bio_add_page(bio, *page, MD_HARDSECT, *page_offset)!=MD_HARDSECT) {
+	if (bio_add_page(bio, *page, MD_HARDSECT, *page_offset)!=MD_HARDSECT) {
 		/* no memory leak, page gets cleaned up by caller */
 		return -EINVAL;
 	}
 
-	if(!allocated_page) get_page(*page);
+	if (!allocated_page) get_page(*page);
 
 	*page_offset += MD_HARDSECT;
 
@@ -644,7 +644,7 @@
 	nr_elements = mdev->act_log->nr_elements;
 
 	bios = kzalloc(sizeof(struct bio*) * nr_elements, GFP_KERNEL);
-	if(!bios) goto submit_one_by_one;
+	if (!bios) goto submit_one_by_one;
 
 	atomic_set(&wc.count,0);
 	init_completion(&wc.io_done);
@@ -653,9 +653,9 @@
 
 	for(i=0;i<nr_elements;i++) {
 		enr = lc_entry(mdev->act_log,i)->lc_number;
-		if(enr == LC_FREE) continue;
+		if (enr == LC_FREE) continue;
 		/* next statement also does atomic_inc wc.count */
-		if(atodb_prepare_unless_covered(mdev,bios,&page,
+		if (atodb_prepare_unless_covered(mdev,bios,&page,
 						&page_offset,
 						enr/AL_EXT_PER_BM_SECT,
 						&wc))
@@ -685,19 +685,19 @@
 	//
 	// In case we had IOs and they are already complete, there
 	// is not point in waiting anyways.
-	// Therefore this if() ...
-	if(atomic_read(&wc.count)) wait_for_completion(&wc.io_done);
+	// Therefore this if () ...
+	if (atomic_read(&wc.count)) wait_for_completion(&wc.io_done);
 
 	dec_local(mdev);
 
-	if(wc.error) drbd_io_error(mdev, TRUE);
+	if (wc.error) drbd_io_error(mdev, TRUE);
 	kfree(bios);
 	return;
 
  free_bios_submit_one_by_one:
 	// free everything by calling the endio callback directly.
 	for(i=0;i<nr_elements;i++) {
-		if(bios[i]==NULL) break;
+		if (bios[i]==NULL) break;
 		bios[i]->bi_size=0;
 		atodb_endio(bios[i], MD_HARDSECT, 0);
 	}
@@ -708,7 +708,7 @@
 
 	for(i=0;i<mdev->act_log->nr_elements;i++) {
 		enr = lc_entry(mdev->act_log,i)->lc_number;
-		if(enr == LC_FREE) continue;
+		if (enr == LC_FREE) continue;
 		/* Really slow: if we have al-extents 16..19 active,
 		 * sector 4 will be written four times! Synchronous! */
 		drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT );
@@ -734,7 +734,7 @@
 
 	for(i=0;i<mdev->act_log->nr_elements;i++) {
 		enr = lc_entry(mdev->act_log,i)->lc_number;
-		if(enr == LC_FREE) continue;
+		if (enr == LC_FREE) continue;
 		add += drbd_bm_ALe_set_all(mdev, enr);
 	}
 
@@ -751,10 +751,10 @@
 
 	spin_lock_irq(&mdev->al_lock);
 	rv = (al_ext->refcnt == 0);
-	if(likely(rv)) lc_del(mdev->act_log,al_ext);
+	if (likely(rv)) lc_del(mdev->act_log,al_ext);
 	spin_unlock_irq(&mdev->al_lock);
 
-	if(unlikely(!rv)) INFO("Waiting for extent in drbd_al_shrink()\n");
+	if (unlikely(!rv)) INFO("Waiting for extent in drbd_al_shrink()\n");
 
 	return rv;
 }
@@ -773,7 +773,7 @@
 
 	for(i=0;i<mdev->act_log->nr_elements;i++) {
 		al_ext = lc_entry(mdev->act_log,i);
-		if(al_ext->lc_number == LC_FREE) continue;
+		if (al_ext->lc_number == LC_FREE) continue;
 		wait_event(mdev->al_wait, _try_lc_del(mdev,al_ext));
 	}
 
@@ -784,7 +784,7 @@
 {
 	struct update_odbm_work *udw = (struct update_odbm_work*)w;
 
-	if( !inc_local_if_state(mdev,Attaching) ) {
+	if ( !inc_local_if_state(mdev,Attaching) ) {
 		if (DRBD_ratelimit(5*HZ,5))
 			WARN("Can not update on disk bitmap, local IO disabled.\n");
 		return 1;
@@ -795,7 +795,7 @@
 
 	kfree(udw);
 
-	if(drbd_bm_total_weight(mdev) <= mdev->rs_failed &&
+	if (drbd_bm_total_weight(mdev) <= mdev->rs_failed &&
 	   ( mdev->state.conn == SyncSource || mdev->state.conn == SyncTarget ||
 	     mdev->state.conn == PausedSyncS || mdev->state.conn == PausedSyncT ) ) {
 		drbd_bm_lock(mdev);
@@ -831,7 +831,7 @@
 
 	ext = (struct bm_extent *) lc_get(mdev->resync,enr);
 	if (ext) {
-		if( ext->lce.lc_number == enr) {
+		if (ext->lce.lc_number == enr) {
 			if (success)
 				ext->rs_left -= count;
 			else
@@ -860,7 +860,7 @@
 				     ext->flags, enr, rs_left);
 				ext->flags = 0;
 			}
-			if( ext->rs_failed ) {
+			if (ext->rs_failed) {
 				WARN("Kicking resync_lru element enr=%u "
 				     "out with rs_failed=%d\n",
 				     ext->lce.lc_number, ext->rs_failed);
@@ -877,7 +877,7 @@
 			ext->rs_failed = 0;
 
 			udw=kmalloc(sizeof(*udw),GFP_ATOMIC);
-			if(udw) {
+			if (udw) {
 				udw->enr = ext->lce.lc_number;
 				udw->w.cb = w_update_odbm;
 				drbd_queue_work_front(&mdev->data.work,&udw->w);
@@ -952,16 +952,16 @@
 	}
 	if (count) {
 		// we need the lock for drbd_try_clear_on_disk_bm
-		if(jiffies - mdev->rs_mark_time > HZ*10) {
+		if (jiffies - mdev->rs_mark_time > HZ*10) {
 			/* should be roling marks, but we estimate only anyways. */
-			if( mdev->rs_mark_left != drbd_bm_total_weight(mdev) &&
+			if ( mdev->rs_mark_left != drbd_bm_total_weight(mdev) &&
 			    mdev->state.conn != PausedSyncT &&
 			    mdev->state.conn != PausedSyncS ) {
 				mdev->rs_mark_time =jiffies;
 				mdev->rs_mark_left =drbd_bm_total_weight(mdev);
 			}
 		}
-		if( inc_local_if_state(mdev,Attaching) ) {
+		if ( inc_local_if_state(mdev,Attaching) ) {
 			drbd_try_clear_on_disk_bm(mdev,sector,count,TRUE);
 			dec_local(mdev);
 		}
@@ -970,7 +970,7 @@
 		wake_up=1;
 	}
 	spin_unlock_irqrestore(&mdev->al_lock,flags);
-	if(wake_up) wake_up(&mdev->al_wait);
+	if (wake_up) wake_up(&mdev->al_wait);
 }
 
 /*
@@ -990,7 +990,7 @@
 	unsigned int enr;
 	struct bm_extent* ext;
 
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		enr = BM_SECT_TO_EXT(sector);
 		spin_lock_irqsave(&mdev->al_lock,flags);
 		ext = (struct bm_extent *) lc_find(mdev->resync,enr);
@@ -1078,17 +1078,17 @@
 	int rv=0;
 
 	spin_lock_irq(&mdev->al_lock);
-	if(unlikely(enr == mdev->act_log->new_number)) rv=1;
+	if (unlikely(enr == mdev->act_log->new_number)) rv=1;
 	else {
 		al_ext = lc_find(mdev->act_log,enr);
-		if(al_ext) {
+		if (al_ext) {
 			if (al_ext->refcnt) rv=1;
 		}
 	}
 	spin_unlock_irq(&mdev->al_lock);
 
 	/*
-	if(unlikely(rv)) {
+	if (unlikely(rv)) {
 		INFO("Delaying sync read until app's write is done\n");
 	}
 	*/
@@ -1120,14 +1120,14 @@
 			(bm_ext = _bme_get(mdev,enr)) );
 	if (sig) return 0;
 
-	if(test_bit(BME_LOCKED,&bm_ext->flags)) return 1;
+	if (test_bit(BME_LOCKED,&bm_ext->flags)) return 1;
 
 	for(i=0;i<AL_EXT_PER_BM_SECT;i++) {
 		sig = wait_event_interruptible( mdev->al_wait,
 				!_is_in_al(mdev,enr*AL_EXT_PER_BM_SECT+i) );
 		if (sig) {
 			spin_lock_irq(&mdev->al_lock);
-			if( lc_put(mdev->resync,&bm_ext->lce) == 0 ) {
+			if ( lc_put(mdev->resync,&bm_ext->lce) == 0 ) {
 				clear_bit(BME_NO_WRITES,&bm_ext->flags);
 				mdev->resync_locked--;
 				wake_up(&mdev->al_wait);
@@ -1279,20 +1279,20 @@
 
 	spin_lock_irqsave(&mdev->al_lock,flags);
 	bm_ext = (struct bm_extent*) lc_find(mdev->resync,enr);
-	if(!bm_ext) {
+	if (!bm_ext) {
 		spin_unlock_irqrestore(&mdev->al_lock,flags);
 		ERR("drbd_rs_complete_io() called, but extent not found\n");
 		return;
 	}
 
-	if(bm_ext->lce.refcnt == 0) {
+	if (bm_ext->lce.refcnt == 0) {
 		spin_unlock_irqrestore(&mdev->al_lock,flags);
 		ERR("drbd_rs_complete_io(,%llu [=%u]) called, but refcnt is 0!?\n",
 		    (unsigned long long)sector, enr);
 		return;
 	}
 
-	if( lc_put(mdev->resync,(struct lc_element *)bm_ext) == 0 ) {
+	if ( lc_put(mdev->resync,(struct lc_element *)bm_ext) == 0 ) {
 		clear_bit(BME_LOCKED,&bm_ext->flags);
 		clear_bit(BME_NO_WRITES,&bm_ext->flags);
 		mdev->resync_locked--;
@@ -1317,10 +1317,10 @@
 
 	spin_lock_irq(&mdev->al_lock);
 
-	if(inc_local_if_state(mdev,Failed)) { // Makes sure ->resync is there.
+	if (inc_local_if_state(mdev,Failed)) { // Makes sure ->resync is there.
 		for(i=0;i<mdev->resync->nr_elements;i++) {
 			bm_ext = (struct bm_extent*) lc_entry(mdev->resync,i);
-			if(bm_ext->lce.lc_number == LC_FREE) continue;
+			if (bm_ext->lce.lc_number == LC_FREE) continue;
 			bm_ext->lce.refcnt = 0; // Rude but ok.
 			bm_ext->rs_left = 0;
 			clear_bit(BME_LOCKED,&bm_ext->flags);
@@ -1353,10 +1353,10 @@
 
 	spin_lock_irq(&mdev->al_lock);
 
-	if(inc_local_if_state(mdev,Failed)) { // Makes sure ->resync is there.
+	if (inc_local_if_state(mdev,Failed)) { // Makes sure ->resync is there.
 		for(i=0;i<mdev->resync->nr_elements;i++) {
 			bm_ext = (struct bm_extent*) lc_entry(mdev->resync,i);
-			if(bm_ext->lce.lc_number == LC_FREE) continue;
+			if (bm_ext->lce.lc_number == LC_FREE) continue;
 			if (bm_ext->lce.lc_number == mdev->resync_wenr) {
 				INFO("dropping %u in drbd_rs_del_all, "
 				     "aparently got 'synced' by application io\n",
@@ -1367,7 +1367,7 @@
 				mdev->resync_wenr = LC_FREE;
 				lc_put(mdev->resync,&bm_ext->lce);
 			}
-			if(bm_ext->lce.refcnt != 0) {
+			if (bm_ext->lce.refcnt != 0) {
 				INFO("Retrying drbd_rs_del_all() later. "
 				     "refcnt=%d\n",bm_ext->lce.refcnt);
 				dec_local(mdev);
@@ -1443,7 +1443,7 @@
 	if (count) {
 		mdev->rs_failed += count;
 
-		if( inc_local_if_state(mdev,Attaching) ) {
+		if ( inc_local_if_state(mdev,Attaching) ) {
 			drbd_try_clear_on_disk_bm(mdev,sector,count,FALSE);
 			dec_local(mdev);
 		}
@@ -1453,5 +1453,5 @@
 		wake_up=1;
 	}
 	spin_unlock_irq(&mdev->al_lock);
-	if(wake_up) wake_up(&mdev->al_wait);
+	if (wake_up) wake_up(&mdev->al_wait);
 }

Modified: branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -212,7 +212,7 @@
 	INFO("bm[%d]=0x%lX\n", w, b->bm[w]);
 	w++;
 
-	if ( w < b->bm_words ) {
+	if (w < b->bm_words) {
 		D_ASSERT(w == b->bm_words -1);
 		INFO("bm[%d]=0x%lX\n",w,b->bm[w]);
 	}
@@ -288,12 +288,12 @@
 	size_t w = b->bm_bits >> LN2_BPL;
 	int cleared=0;
 
-	if ( w < b->bm_words ) {
+	if (w < b->bm_words) {
 		cleared = hweight_long(b->bm[w] & ~mask);
 		b->bm[w++] &= mask;
 	}
 
-	if ( w < b->bm_words ) {
+	if (w < b->bm_words) {
 		cleared += hweight_long(b->bm[w]);
 		b->bm[w++]=0;
 	}
@@ -306,11 +306,11 @@
 	const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) -1;
 	size_t w = b->bm_bits >> LN2_BPL;
 
-	if ( w < b->bm_words ) {
+	if (w < b->bm_words) {
 		b->bm[w++] |= ~mask;
 	}
 
-	if ( w < b->bm_words ) {
+	if (w < b->bm_words) {
 		b->bm[w++] = ~(0UL);
 	}
 }
@@ -340,7 +340,7 @@
 
 	spin_lock_irqsave(&b->bm_lock,flags);
 	bits = bm_count_bits(b,0);
-	if(bits != b->bm_set) {
+	if (bits != b->bm_set) {
 		ERR("bm_set was %lu, corrected to %lu. %s:%d\n",
 		    b->bm_set,bits,file,line);
 		b->bm_set = bits;
@@ -400,7 +400,7 @@
 
 		D_ASSERT((u64)bits <= (((u64)mdev->bc->md.md_size_sect-MD_BM_OFFSET) << 12));
 
-		if ( words == b->bm_words ) {
+		if (words == b->bm_words) {
 			/* optimize: capacity has changed,
 			 * but only within one long word worth of bits.
 			 * just update the bm_dev_capacity and bm_bits members.
@@ -444,7 +444,7 @@
 		b->bm_words = words;
 		b->bm_dev_capacity = capacity;
 		bm_clear_surplus(b);
-		if( !growing ) b->bm_set = bm_count_bits(b,0);
+		if (!growing) b->bm_set = bm_count_bits(b,0);
 		bm_end_info(mdev, __FUNCTION__ );
 		spin_unlock_irq(&b->bm_lock);
 		INFO("resync bitmap: bits=%lu words=%lu\n",bits,words);
@@ -699,7 +699,7 @@
 	// MUST_BE_LOCKED(); not neccessarily global ...
 
 	down(&mdev->md_io_mutex);
-	if(drbd_md_sync_page_io(mdev,mdev->bc,on_disk_sector,READ)) {
+	if (drbd_md_sync_page_io(mdev,mdev->bc,on_disk_sector,READ)) {
 		bm_words  = drbd_bm_words(mdev);
 		offset    = S2W(enr);	// word offset into bitmap
 		num_words = min(S2W(1), bm_words - offset);
@@ -783,7 +783,7 @@
 	 */
 	mdev->bitmap = NULL;
 
-	if(rw == WRITE)	bm_cpu_to_lel(b);
+	if (rw == WRITE)	bm_cpu_to_lel(b);
 
 	now = jiffies;
 	atomic_set(&b->bm_async_io, num_pages);
@@ -807,7 +807,7 @@
 	}
 
 	now = jiffies;
-	if(rw == WRITE) {
+	if (rw == WRITE) {
 		bm_lel_to_cpu(b);
 	} else /* rw == READ */ {
 		/* just read, if neccessary adjust endianness */

Modified: branches/drbd-8.0-for-linus/drbd/drbd_int.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_int.h	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_int.h	2007-07-24 09:44:10 UTC (rev 2984)
@@ -236,7 +236,7 @@
 #define RECALC_SIGPENDING()        recalc_sigpending();
 
 #if defined(DBG_SPINLOCKS) && defined(__SMP__)
-# define MUST_HOLD(lock) if(!spin_is_locked(lock)) { ERR("Not holding lock! in %s\n", __FUNCTION__ ); }
+# define MUST_HOLD(lock) if (!spin_is_locked(lock)) { ERR("Not holding lock! in %s\n", __FUNCTION__ ); }
 #else
 # define MUST_HOLD(lock)
 #endif
@@ -1501,7 +1501,7 @@
 
 static inline int semaphore_is_locked(struct semaphore* s)
 {
-	if(!down_trylock(s)) {
+	if (!down_trylock(s)) {
 		up(s);
 		return 0;
 	}
@@ -1609,7 +1609,7 @@
 }
 
 static inline void wake_asender(drbd_dev *mdev) {
-	if(test_bit(SIGNAL_ASENDER, &mdev->flags)) {
+	if (test_bit(SIGNAL_ASENDER, &mdev->flags)) {
 		force_sig(DRBD_SIG, mdev->asender.task);
 	}
 }
@@ -1681,14 +1681,14 @@
 }
 
 #define ERR_IF_CNT_IS_NEGATIVE(which)				\
-	if(atomic_read(&mdev->which)<0)				\
+	if (atomic_read(&mdev->which)<0)				\
 		ERR("in %s:%d: " #which " = %d < 0 !\n",	\
 		    __func__ , __LINE__ ,			\
 		    atomic_read(&mdev->which))
 
 #define dec_ap_pending(mdev)	do {				\
 	typecheck(drbd_dev*,mdev);				\
-	if(atomic_dec_and_test(&mdev->ap_pending_cnt))		\
+	if (atomic_dec_and_test(&mdev->ap_pending_cnt))		\
 		wake_up(&mdev->misc_wait);			\
 	ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
 
@@ -1735,7 +1735,7 @@
 
 static inline void dec_net(drbd_dev* mdev)
 {
-	if(atomic_dec_and_test(&mdev->net_cnt)) {
+	if (atomic_dec_and_test(&mdev->net_cnt)) {
 		wake_up(&mdev->misc_wait);
 	}
 }
@@ -1750,7 +1750,7 @@
 
 	atomic_inc(&mdev->net_cnt);
 	have_net_conf = mdev->state.conn >= Unconnected;
-	if(!have_net_conf) dec_net(mdev);
+	if (!have_net_conf) dec_net(mdev);
 	return have_net_conf;
 }
 
@@ -1762,7 +1762,7 @@
 
 static inline void dec_local(drbd_dev* mdev)
 {
-	if(atomic_dec_and_test(&mdev->local_cnt)) {
+	if (atomic_dec_and_test(&mdev->local_cnt)) {
 		wake_up(&mdev->misc_wait);
 	}
 	D_ASSERT(atomic_read(&mdev->local_cnt)>=0);
@@ -1777,7 +1777,7 @@
 
 	atomic_inc(&mdev->local_cnt);
 	io_allowed = (mdev->state.disk >= mins );
-	if( !io_allowed ) {
+	if (!io_allowed) {
 		dec_local(mdev);
 	}
 	return io_allowed;
@@ -1793,7 +1793,7 @@
 static inline int drbd_get_max_buffers(drbd_dev* mdev)
 {
 	int mxb = 1000000; /* arbitrary limit on open requests */
-	if(inc_net(mdev)) {
+	if (inc_net(mdev)) {
 		mxb = mdev->net_conf->max_buffers;
 		dec_net(mdev);
 	}

Modified: branches/drbd-8.0-for-linus/drbd/drbd_main.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_main.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_main.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -159,7 +159,7 @@
 	struct drbd_barrier *b;
 
 	b=kmalloc(sizeof(struct drbd_barrier),GFP_KERNEL);
-	if(!b) return 0;
+	if (!b) return 0;
 	INIT_LIST_HEAD(&b->requests);
 	INIT_LIST_HEAD(&b->w.list);
 	b->next=0;
@@ -179,7 +179,7 @@
 {
 	D_ASSERT(mdev->oldest_barrier == mdev->newest_barrier);
 	kfree(mdev->oldest_barrier);
-	if(mdev->tl_hash) {
+	if (mdev->tl_hash) {
 		kfree(mdev->tl_hash);
 		mdev->tl_hash_s = 0;
 	}
@@ -241,11 +241,11 @@
 	D_ASSERT(b->n_req == set_size);
 
 #if 1
-	if(b->br_number != barrier_nr) {
+	if (b->br_number != barrier_nr) {
 		DUMPI(b->br_number);
 		DUMPI(barrier_nr);
 	}
-	if(b->n_req != set_size) {
+	if (b->n_req != set_size) {
 		DUMPI(b->n_req);
 		DUMPI(set_size);
 	}
@@ -319,22 +319,22 @@
 	int send,ok=1;
 
 	eh = PassOn;
-	if(inc_local_if_state(mdev,Failed)) {
+	if (inc_local_if_state(mdev,Failed)) {
 		eh = mdev->bc->dc.on_io_error;
 		dec_local(mdev);
 	}
 
-	if(!forcedetach && eh == PassOn)
+	if (!forcedetach && eh == PassOn)
 		return 1;
 
 	spin_lock_irqsave(&mdev->req_lock,flags);
-	if( (send = (mdev->state.disk == Failed)) ) {
+	if ( (send = (mdev->state.disk == Failed)) ) {
 		_drbd_set_state(_NS(mdev,disk,Diskless),
 				ChgStateHard|ScheduleAfter);
 	}
 	spin_unlock_irqrestore(&mdev->req_lock,flags);
 
-	if(!send) return ok;
+	if (!send) return ok;
 
 	ok = drbd_send_state(mdev);
 	if (ok) WARN("Notified peer that my disk is broken.\n");
@@ -349,7 +349,7 @@
 
 	/* Releasing the backing device is done in after_state_ch() */
 
-	if(eh == CallIOEHelper) {
+	if (eh == CallIOEHelper) {
 		drbd_khelper(mdev,"local-io-error");
 	}
 
@@ -404,22 +404,22 @@
 	unsigned long flags;
 	int rv;
 
-	if(test_and_clear_bit(CL_ST_CHG_SUCCESS,&mdev->flags))
+	if (test_and_clear_bit(CL_ST_CHG_SUCCESS,&mdev->flags))
 		return SS_CW_Success;
 
-	if(test_and_clear_bit(CL_ST_CHG_FAIL,&mdev->flags))
+	if (test_and_clear_bit(CL_ST_CHG_FAIL,&mdev->flags))
 		return SS_CW_FailedByPeer;
 
 	rv=0;
 	spin_lock_irqsave(&mdev->req_lock,flags);
 	os = mdev->state;
 	ns.i = (os.i & ~mask.i) | val.i;
-	if( !cl_wide_st_chg(mdev,os,ns) ) rv = SS_CW_NoNeed;
-	if( !rv ) {
+	if ( !cl_wide_st_chg(mdev,os,ns) ) rv = SS_CW_NoNeed;
+	if (!rv) {
 		rv = is_valid_state(mdev,ns);
-		if(rv==SS_Success) {
+		if (rv==SS_Success) {
 			rv = is_valid_state_transition(mdev,ns,os);
-			if(rv==SS_Success) rv = 0; // cont waiting, otherwise fail.
+			if (rv==SS_Success) rv = 0; // cont waiting, otherwise fail.
 		}
 	}
 	spin_unlock_irqrestore(&mdev->req_lock,flags);
@@ -444,30 +444,30 @@
 	os = mdev->state;
 	ns.i = (os.i & ~mask.i) | val.i;
 
-	if(cl_wide_st_chg(mdev,os,ns)) {
+	if (cl_wide_st_chg(mdev,os,ns)) {
 		rv = is_valid_state(mdev,ns);
-		if(rv == SS_Success ) rv = is_valid_state_transition(mdev,ns,os);
+		if (rv == SS_Success) rv = is_valid_state_transition(mdev,ns,os);
 		spin_unlock_irqrestore(&mdev->req_lock,flags);
 
-		if( rv < SS_Success ) {
-			if( f & ChgStateVerbose ) print_st_err(mdev,os,ns,rv);
+		if (rv < SS_Success) {
+			if (f & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
 			return rv;
 		}
 
 		drbd_state_lock(mdev);
-		if( !drbd_send_state_req(mdev,mask,val) ) {
+		if ( !drbd_send_state_req(mdev,mask,val) ) {
 			drbd_state_unlock(mdev);
 			rv = SS_CW_FailedByPeer;
-			if( f & ChgStateVerbose ) print_st_err(mdev,os,ns,rv);
+			if (f & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
 			return rv;
 		}
 
 		wait_event(mdev->state_wait,(rv=_req_st_cond(mdev,mask,val)));
 
-		if( rv < SS_Success ) {
+		if (rv < SS_Success) {
 			// nearly dead code.
 			drbd_state_unlock(mdev);
-			if( f & ChgStateVerbose ) print_st_err(mdev,os,ns,rv);
+			if (f & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
 			return rv;
 		}
 		spin_lock_irqsave(&mdev->req_lock,flags);
@@ -519,7 +519,7 @@
 #define user_isps_to_name(A) ( (A) ? "1" : "0" )
 
 #define PSC(A) \
-	({ if( ns.A != os.A ) { \
+	({ if (ns.A != os.A) { \
 		pbp += sprintf(pbp, #A "( %s -> %s ) ", \
 		              A##s_to_name(os.A), \
 		              A##s_to_name(ns.A)); \
@@ -533,41 +533,41 @@
 	int rv=SS_Success;
 
 	fp = DontCare;
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		fp = mdev->bc->dc.fencing;
 		dec_local(mdev);
 	}
 
-	if(inc_net(mdev)) {
-		if( !mdev->net_conf->two_primaries &&
+	if (inc_net(mdev)) {
+		if ( !mdev->net_conf->two_primaries &&
 		    ns.role == Primary && ns.peer == Primary )
 			rv=SS_TwoPrimaries;
 		dec_net(mdev);
 	}
 
-	if( rv <= 0 ) /* already found a reason to abort */;
-	else if( ns.role == Secondary && mdev->open_cnt )
+	if (rv <= 0) /* already found a reason to abort */;
+	else if (ns.role == Secondary && mdev->open_cnt)
 		rv=SS_DeviceInUse;
 
-	else if( ns.role == Primary && ns.conn < Connected &&
+	else if ( ns.role == Primary && ns.conn < Connected &&
 		 ns.disk < UpToDate ) rv=SS_NoUpToDateDisk;
 
-	else if( fp >= Resource &&
+	else if ( fp >= Resource &&
 		 ns.role == Primary && ns.conn < Connected &&
 		 ns.pdsk >= DUnknown ) rv=SS_PrimaryNOP;
 
-	else if( ns.role == Primary && ns.disk <= Inconsistent &&
+	else if ( ns.role == Primary && ns.disk <= Inconsistent &&
 		 ns.pdsk <= Inconsistent ) rv=SS_NoUpToDateDisk;
 
-	else if( ns.conn > Connected &&
+	else if ( ns.conn > Connected &&
 		 ns.disk < UpToDate && ns.pdsk < UpToDate )
 		rv=SS_BothInconsistent;
 
-	else if( ns.conn > Connected &&
+	else if ( ns.conn > Connected &&
 		 (ns.disk == Diskless || ns.pdsk == Diskless ) )
 		rv=SS_SyncingDiskless;
 
-	else if( (ns.conn == Connected ||
+	else if ( (ns.conn == Connected ||
 		  ns.conn == WFBitMapS ||
 		  ns.conn == SyncSource ||
 		  ns.conn == PausedSyncS) &&
@@ -580,13 +580,13 @@
 {
 	int rv=SS_Success;
 
-	if( (ns.conn == StartingSyncT || ns.conn == StartingSyncS ) &&
+	if ( (ns.conn == StartingSyncT || ns.conn == StartingSyncS ) &&
 	    os.conn > Connected) rv=SS_ResyncRunning;
 
-	if( ns.conn == Disconnecting && os.conn == StandAlone)
+	if (ns.conn == Disconnecting && os.conn == StandAlone)
 		rv=SS_AlreadyStandAlone;
 
-	if( ns.disk > Attaching && os.disk == Diskless)
+	if (ns.disk > Attaching && os.disk == Diskless)
 		rv=SS_IsDiskLess;
 
 	return rv;
@@ -603,49 +603,49 @@
 	os = mdev->state;
 
 	fp = DontCare;
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		fp = mdev->bc->dc.fencing;
 		dec_local(mdev);
 	}
 
 	/* Early state sanitising. Dissalow the invalidate ioctl to connect  */
-	if( (ns.conn == StartingSyncS || ns.conn == StartingSyncT) &&
+	if ( (ns.conn == StartingSyncS || ns.conn == StartingSyncT) &&
 		os.conn < Connected ) {
 		ns.conn = os.conn;
 		ns.pdsk = os.pdsk;
 	}
 
 	/* Dissalow Network errors to configure a device's network part */
-	if( (ns.conn >= Timeout && ns.conn <= TearDown ) &&
+	if ( (ns.conn >= Timeout && ns.conn <= TearDown ) &&
 	    os.conn <= Disconnecting ) {
 		ns.conn = os.conn;
 	}
 
 	/* Dissalow network errors (+TearDown) to overwrite each other.
 	   Dissalow network errors to overwrite the Disconnecting state. */
-	if( ( (os.conn >= Timeout && os.conn <= TearDown)
+	if ( ( (os.conn >= Timeout && os.conn <= TearDown)
 	      || os.conn == Disconnecting ) &&
 	    ns.conn >= Timeout && ns.conn <= TearDown ) {
 		ns.conn = os.conn;
 	}
 
-	if( ns.conn < Connected ) {
+	if (ns.conn < Connected) {
 		ns.peer_isp = 0;
 		ns.peer = Unknown;
 		if ( ns.pdsk > DUnknown ||
 		     ns.pdsk < Inconsistent ) ns.pdsk = DUnknown;
 	}
 
-	if( ns.conn <= Disconnecting && ns.disk == Diskless ) {
+	if (ns.conn <= Disconnecting && ns.disk == Diskless) {
 		ns.pdsk = DUnknown;
 	}
 
-	if( ns.conn > Connected && (ns.disk <= Failed || ns.pdsk <= Failed )) {
+	if ( ns.conn > Connected && (ns.disk <= Failed || ns.pdsk <= Failed )) {
 		warn_sync_abort=1;
 		ns.conn = Connected;
 	}
 
-	if( ns.conn >= Connected &&
+	if ( ns.conn >= Connected &&
 	    ( ns.disk == Consistent || ns.disk == Outdated ) ) {
 		switch(ns.conn) {
 		case WFBitMapT:
@@ -663,12 +663,12 @@
 			WARN("Implicit set disk state Inconsistent!\n");
 			break;
 		}
-		if( os.disk == Outdated && ns.disk == UpToDate ) {
+		if (os.disk == Outdated && ns.disk == UpToDate) {
 			WARN("Implicit set disk from Outdate to UpToDate\n");
 		}
 	}
 
-	if( ns.conn >= Connected &&
+	if ( ns.conn >= Connected &&
 	    ( ns.pdsk == Consistent || ns.pdsk == Outdated ) ) {
 		switch(ns.conn) {
 		case Connected:
@@ -686,45 +686,45 @@
 			WARN("Implicit set pdsk Inconsistent!\n");
 			break;
 		}
-		if( os.pdsk == Outdated && ns.pdsk == UpToDate ) {
+		if (os.pdsk == Outdated && ns.pdsk == UpToDate) {
 			WARN("Implicit set pdsk from Outdate to UpToDate\n");
 		}
 	}
 
 	/* Connection breaks down before we finished "Negotiating" */
-	if (ns.conn < Connected && ns.disk == Negotiating ) {
+	if (ns.conn < Connected && ns.disk == Negotiating) {
 		ns.disk = mdev->new_state_tmp.disk;
 		ns.pdsk = mdev->new_state_tmp.pdsk;
 	}
 
-	if( fp == Stonith ) {
-		if(ns.role == Primary &&
+	if (fp == Stonith) {
+		if (ns.role == Primary &&
 		   ns.conn < Connected &&
 		   ns.pdsk > Outdated ) {
 			ns.susp = 1;
 		}
 	}
 
-	if( ns.aftr_isp || ns.peer_isp || ns.user_isp ) {
-		if(ns.conn == SyncSource) ns.conn=PausedSyncS;
-		if(ns.conn == SyncTarget) ns.conn=PausedSyncT;
+	if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
+		if (ns.conn == SyncSource) ns.conn=PausedSyncS;
+		if (ns.conn == SyncTarget) ns.conn=PausedSyncT;
 	} else {
-		if(ns.conn == PausedSyncS) ns.conn=SyncSource;
-		if(ns.conn == PausedSyncT) ns.conn=SyncTarget;
+		if (ns.conn == PausedSyncS) ns.conn=SyncSource;
+		if (ns.conn == PausedSyncT) ns.conn=SyncTarget;
 	}
 
-	if( ns.i == os.i ) return SS_NothingToDo;
+	if (ns.i == os.i) return SS_NothingToDo;
 
-	if( !(flags & ChgStateHard) ) {
+	if ( !(flags & ChgStateHard) ) {
 		/*  pre-state-change checks ; only look at ns  */
 		/* See drbd_state_sw_errors in drbd_strings.c */
 
 		rv = is_valid_state(mdev,ns);
-		if(rv < SS_Success) {
+		if (rv < SS_Success) {
 			/* If the old state was illegal as well, then let
 			   this happen...*/
 
-			if( is_valid_state(mdev,os) == rv ) {
+			if ( is_valid_state(mdev,os) == rv ) {
 				ERR("Forcing state change from bad state. "
 				    "Error would be: '%s'\n",
 				    set_st_err_name(rv));
@@ -735,12 +735,12 @@
 		} else rv = is_valid_state_transition(mdev,ns,os);
 	}
 
-	if(rv < SS_Success) {
-		if( flags & ChgStateVerbose ) print_st_err(mdev,os,ns,rv);
+	if (rv < SS_Success) {
+		if (flags & ChgStateVerbose) print_st_err(mdev,os,ns,rv);
 		return rv;
 	}
 
-	if(warn_sync_abort) {
+	if (warn_sync_abort) {
 		WARN("Resync aborted.\n");
 	}
 
@@ -767,27 +767,27 @@
 	wake_up(&mdev->state_wait);
 
 	/**   post-state-change actions   **/
-	if ( os.conn >= SyncSource   && ns.conn <= Connected ) {
+	if (os.conn >= SyncSource   && ns.conn <= Connected) {
 		set_bit(STOP_SYNC_TIMER,&mdev->flags);
 		mod_timer(&mdev->resync_timer,jiffies);
 	}
 
-	if( (os.conn == PausedSyncT || os.conn == PausedSyncS) &&
+	if ( (os.conn == PausedSyncT || os.conn == PausedSyncS) &&
 	    (ns.conn == SyncTarget  || ns.conn == SyncSource) ) {
 		INFO("Syncer continues.\n");
 		mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
-		if( ns.conn == SyncTarget ) {
+		if (ns.conn == SyncTarget) {
 			D_ASSERT(!test_bit(STOP_SYNC_TIMER,&mdev->flags));
 			clear_bit(STOP_SYNC_TIMER,&mdev->flags);
 			mod_timer(&mdev->resync_timer,jiffies);
 		}
 	}
 
-	if( (os.conn == SyncTarget  || os.conn == SyncSource) &&
+	if ( (os.conn == SyncTarget  || os.conn == SyncSource) &&
 	    (ns.conn == PausedSyncT || ns.conn == PausedSyncS) ) {
 		INFO("Resync suspended\n");
 		mdev->rs_mark_time = jiffies;
-		if( ns.conn == PausedSyncT ) {
+		if (ns.conn == PausedSyncT) {
 			set_bit(STOP_SYNC_TIMER,&mdev->flags);
 		}
 	}
@@ -799,11 +799,11 @@
 		D_ASSERT(i);
 	}
 
-	if( flags & ScheduleAfter ) {
+	if (flags & ScheduleAfter) {
 		struct after_state_chg_work* ascw;
 
 		ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
-		if(ascw) {
+		if (ascw) {
 			ascw->os = os;
 			ascw->ns = ns;
 			ascw->flags = flags;
@@ -836,13 +836,13 @@
 
 	if ( (os.conn != Connected && ns.conn == Connected) ) {
 		clear_bit(CRASHED_PRIMARY, &mdev->flags);
-		if( mdev->p_uuid ) {
+		if (mdev->p_uuid) {
 			mdev->p_uuid[UUID_FLAGS] &= ~((u64)2);
 		}
 	}
 
 	fp = DontCare;
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		fp = mdev->bc->dc.fencing;
 
 		mdf = mdev->bc->md.flags & ~(MDF_Consistent|MDF_PrimaryInd|
@@ -858,7 +858,7 @@
 		if (mdev->state.disk > Outdated)       mdf |= MDF_WasUpToDate;
 		if (mdev->state.pdsk <= Outdated &&
 		    mdev->state.pdsk >= Inconsistent)  mdf |= MDF_PeerOutDated;
-		if( mdf != mdev->bc->md.flags) {
+		if (mdf != mdev->bc->md.flags) {
 			mdev->bc->md.flags = mdf;
 			drbd_md_mark_dirty(mdev);
 		}
@@ -871,7 +871,7 @@
 	/* Here we have the actions that are performed after a
 	   state change. This function might sleep */
 
-	if( fp == Stonith && ns.susp ) {
+	if (fp == Stonith && ns.susp) {
 		// case1: The outdate peer handler is successfull:
 		// case2: The connection was established again:
 		if ( (os.pdsk > Outdated  && ns.pdsk <= Outdated) || // case1
@@ -900,16 +900,16 @@
 	/* Lost contact to peer's copy of the data */
 	if ( (os.pdsk>=Inconsistent && os.pdsk!=DUnknown && os.pdsk!=Outdated) &&
 	     (ns.pdsk<Inconsistent || ns.pdsk==DUnknown || ns.pdsk==Outdated) ) {
-		if ( mdev->p_uuid ) {
+		if (mdev->p_uuid) {
 			kfree(mdev->p_uuid);
 			mdev->p_uuid = NULL;
 		}
 		if (inc_local(mdev)) {
-			if (ns.role == Primary && mdev->bc->md.uuid[Bitmap] == 0 ) {
+			if (ns.role == Primary && mdev->bc->md.uuid[Bitmap] == 0) {
 				/* Only do it if we have not yet done it... */
 				drbd_uuid_new_current(mdev);
 			}
-			if (ns.peer == Primary ) {
+			if (ns.peer == Primary) {
 				/* Note: The condition ns.peer == Primary implies
 				   that we are connected. Otherwise it would
 				   be ns.peer == Unknown. */
@@ -925,13 +925,13 @@
 		}
 	}
 
-	if( ns.pdsk < Inconsistent ) {
+	if (ns.pdsk < Inconsistent) {
 		/* Diskless Peer becomes primary */
-		if (os.peer == Secondary && ns.peer == Primary ) {
+		if (os.peer == Secondary && ns.peer == Primary) {
 			drbd_uuid_new_current(mdev);
 		}
 		/* Diskless Peer becomes secondary */
-		if (os.peer == Primary && ns.peer == Secondary ) {
+		if (os.peer == Primary && ns.peer == Secondary) {
 			drbd_al_to_on_disk_bm(mdev);
 		}
 	}
@@ -1001,7 +1001,7 @@
 		drbd_bm_unlock(mdev);
 	}
 
-	if ( os.disk > Diskless && ns.disk == Diskless ) {
+	if (os.disk > Diskless && ns.disk == Diskless) {
 		/* since inc_local() only works as long as disk>=Inconsistent,
 		   and it is Diskless here, local_cnt can only go down, it can
 		   not increase... It will reach zero */
@@ -1020,12 +1020,12 @@
 	}
 
 	// Receiver should clean up itself
-	if ( os.conn != Disconnecting && ns.conn == Disconnecting ) {
+	if (os.conn != Disconnecting && ns.conn == Disconnecting) {
 		drbd_thread_signal(&mdev->receiver);
 	}
 
 	// Now the receiver finished cleaning up itself, it should die now
-	if ( os.conn != StandAlone && ns.conn == StandAlone ) {
+	if (os.conn != StandAlone && ns.conn == StandAlone) {
 		drbd_thread_stop_nowait(&mdev->receiver);
 	}
 
@@ -1035,13 +1035,13 @@
 		drbd_thread_restart_nowait(&mdev->receiver);
 	}
 
-	if ( os.conn == StandAlone && ns.conn == Unconnected) {
+	if (os.conn == StandAlone && ns.conn == Unconnected) {
 		drbd_thread_start(&mdev->receiver);
 	}
 
 	if ( os.disk == Diskless && os.conn <= Disconnecting &&
 	     (ns.disk > Diskless || ns.conn >= Unconnected) ) {
-		if(!drbd_thread_start(&mdev->worker)) {
+		if (!drbd_thread_start(&mdev->worker)) {
 			module_put(THIS_MODULE);
 		}
 	}
@@ -1076,7 +1076,7 @@
 
 	while(1) {
 		retval = thi->function(thi);
-		if(get_t_state(thi) != Restarting) break;
+		if (get_t_state(thi) != Restarting) break;
 		thi->t_state = Running;
 	}
 
@@ -1151,7 +1151,7 @@
 
 	if (thi->t_state == None) {
 		spin_unlock(&thi->t_lock);
-		if(restart) drbd_thread_start(thi);
+		if (restart) drbd_thread_start(thi);
 		return;
 	}
 
@@ -1164,7 +1164,7 @@
 		thi->t_state = ns;
 		smp_mb();
 		if (thi->task != current) {
-			if(wait) init_completion(&thi->startstop);
+			if (wait) init_completion(&thi->startstop);
 			force_sig(DRBD_SIGKILL,thi->task);
 		} else D_ASSERT(!wait);
 	}
@@ -1214,7 +1214,7 @@
 	sent = drbd_send(mdev,sock,h,size,msg_flags);
 
 	ok = ( sent == size );
-	if(!ok) {
+	if (!ok) {
 		ERR("short sent %s size=%d sent=%d\n",
 		    cmdname(cmd), (int)size, sent);
 	}
@@ -1304,7 +1304,7 @@
 	int i;
 	u64 uuid_flags = 0;
 
-	if(!inc_local_if_state(mdev,Negotiating)) return 1; // ok.
+	if (!inc_local_if_state(mdev,Negotiating)) return 1; // ok.
 
 	for (i = Current; i < UUID_SIZE; i++) {
 		/* FIXME howto handle diskless ? */
@@ -1342,7 +1342,7 @@
 	int q_order_type;
 	int ok;
 
-	if(inc_local_if_state(mdev,Negotiating)) {
+	if (inc_local_if_state(mdev,Negotiating)) {
 		D_ASSERT(mdev->bc->backing_bdev);
 		d_size = drbd_get_max_capacity(mdev->bc);
 		u_size = mdev->bc->dc.disk_size;
@@ -1552,7 +1552,7 @@
 		return TRUE;
 
 	drop_it = !--mdev->ko_count;
-	if ( !drop_it ) {
+	if (!drop_it) {
 		ERR("[%s/%d] sock_sendmsg time expired, ko = %u\n",
 		       current->comm, current->pid, mdev->ko_count);
 		request_ping(mdev);
@@ -1697,13 +1697,13 @@
 	p.seq_num  = cpu_to_be32( req->seq_num =
 				  atomic_add_return(1,&mdev->packet_seq) );
 	dp_flags = 0;
-	if(req->master_bio->bi_rw & BIO_RW_BARRIER) {
+	if (req->master_bio->bi_rw & BIO_RW_BARRIER) {
 		dp_flags |= DP_HARDBARRIER;
 	}
-	if(req->master_bio->bi_rw & BIO_RW_SYNC) {
+	if (req->master_bio->bi_rw & BIO_RW_SYNC) {
 		dp_flags |= DP_RW_SYNC;
 	}
-	if(mdev->state.conn >= SyncSource &&
+	if (mdev->state.conn >= SyncSource &&
 	   mdev->state.conn <= PausedSyncT) {
 		dp_flags |= DP_MAY_SET_IN_SYNC;
 	}
@@ -1712,8 +1712,8 @@
 	dump_packet(mdev,mdev->data.socket,0,(void*)&p, __FILE__, __LINE__);
 	set_bit(UNPLUG_REMOTE,&mdev->flags);
 	ok = sizeof(p) == drbd_send(mdev,mdev->data.socket,&p,sizeof(p),MSG_MORE);
-	if(ok) {
-		if(mdev->net_conf->wire_protocol == DRBD_PROT_A) {
+	if (ok) {
+		if (mdev->net_conf->wire_protocol == DRBD_PROT_A) {
 			ok = _drbd_send_bio(mdev,req->master_bio);
 		} else {
 			ok = _drbd_send_zc_bio(mdev,req->master_bio);
@@ -1831,7 +1831,7 @@
 				continue;
 		}
 		D_ASSERT(rv != 0);
-		if (rv == -EINTR ) {
+		if (rv == -EINTR) {
 #if 0
 			/* FIXME this happens all the time.
 			 * we don't care for now!
@@ -1876,7 +1876,7 @@
 	int rv=0;
 
 	mdev = minor_to_mdev(MINOR(inode->i_rdev));
-	if(!mdev) return -ENODEV;
+	if (!mdev) return -ENODEV;
 
 	spin_lock_irqsave(&mdev->req_lock,flags);
 	/* to have a stable mdev->state.role and no race with updating open_cnt */
@@ -1889,7 +1889,7 @@
 		}
 	}
 
-	if(!rv) mdev->open_cnt++;
+	if (!rv) mdev->open_cnt++;
 	spin_unlock_irqrestore(&mdev->req_lock,flags);
 
 	return rv;
@@ -1901,7 +1901,7 @@
 	drbd_dev *mdev;
 
 	mdev = minor_to_mdev(MINOR(inode->i_rdev));
-	if(!mdev) return -ENODEV;
+	if (!mdev) return -ENODEV;
 
 	/*
 	printk(KERN_ERR DEVICE_NAME ": close(inode=%p,file=%p)"
@@ -1943,7 +1943,7 @@
 	}
 	spin_unlock_irq(&mdev->req_lock);
 
-	if(mdev->state.disk >= Inconsistent) drbd_kick_lo(mdev);
+	if (mdev->state.disk >= Inconsistent) drbd_kick_lo(mdev);
 }
 
 void drbd_set_defaults(drbd_dev *mdev)
@@ -2073,7 +2073,7 @@
 	drbd_thread_stop(&mdev->receiver);
 
 	/* no need to lock it, I'm the only thread alive */
-	if ( mdev->epoch_size !=  0)
+	if (mdev->epoch_size !=  0)
 		ERR("epoch_size:%d\n",mdev->epoch_size);
 	mdev->al_writ_cnt  =
 	mdev->bm_writ_cnt  =
@@ -2179,7 +2179,7 @@
 
 	for (i=0;i< number;i++) {
 		page = alloc_page(GFP_HIGHUSER);
-		if(!page) goto Enomem;
+		if (!page) goto Enomem;
 		set_page_private(page,(unsigned long)drbd_pp_pool);
 		drbd_pp_pool = page;
 	}
@@ -2224,7 +2224,7 @@
 			struct gendisk  **disk = &mdev->vdisk;
 			request_queue_t **q    = &mdev->rq_queue;
 
-			if(!mdev) continue;
+			if (!mdev) continue;
 			drbd_free_resources(mdev);
 
 			if (*disk) {
@@ -2243,19 +2243,19 @@
 			if (mdev->resync) lc_free(mdev->resync);
 
 			rr = drbd_release_ee(mdev,&mdev->active_ee);
-			if(rr) ERR("%d EEs in active list found!\n",rr);
+			if (rr) ERR("%d EEs in active list found!\n",rr);
 
 			rr = drbd_release_ee(mdev,&mdev->sync_ee);
-			if(rr) ERR("%d EEs in sync list found!\n",rr);
+			if (rr) ERR("%d EEs in sync list found!\n",rr);
 
 			rr = drbd_release_ee(mdev,&mdev->read_ee);
-			if(rr) ERR("%d EEs in read list found!\n",rr);
+			if (rr) ERR("%d EEs in read list found!\n",rr);
 
 			rr = drbd_release_ee(mdev,&mdev->done_ee);
-			if(rr) ERR("%d EEs in done list found!\n",rr);
+			if (rr) ERR("%d EEs in done list found!\n",rr);
 
 			rr = drbd_release_ee(mdev,&mdev->net_ee);
-			if(rr) ERR("%d EEs in net list found!\n",rr);
+			if (rr) ERR("%d EEs in net list found!\n",rr);
 
 			ERR_IF (!list_empty(&mdev->data.work.q)) {
 				struct list_head *lp;
@@ -2272,21 +2272,21 @@
 
 			if (mdev->act_log) lc_free(mdev->act_log);
 
-			if(mdev->ee_hash) {
+			if (mdev->ee_hash) {
 				kfree(mdev->ee_hash);
 				mdev->ee_hash_s = 0;
 				mdev->ee_hash = NULL;
 			}
-			if(mdev->tl_hash) {
+			if (mdev->tl_hash) {
 				kfree(mdev->tl_hash);
 				mdev->tl_hash_s = 0;
 				mdev->tl_hash = NULL;
 			}
-			if(mdev->app_reads_hash) {
+			if (mdev->app_reads_hash) {
 				kfree(mdev->app_reads_hash);
 				mdev->app_reads_hash = NULL;
 			}
-			if ( mdev->p_uuid ) {
+			if (mdev->p_uuid) {
 				kfree(mdev->p_uuid);
 				mdev->p_uuid = NULL;
 			}
@@ -2309,7 +2309,7 @@
 	request_queue_t *q;
 
 	mdev = kzalloc(sizeof(drbd_dev),GFP_KERNEL);
-	if(!mdev) goto Enomem;
+	if (!mdev) goto Enomem;
 
 	mdev->minor = minor;
 
@@ -2346,7 +2346,7 @@
 	q->unplug_fn = drbd_unplug_fn;
 
 	mdev->md_io_page = alloc_page(GFP_KERNEL);
-	if(!mdev->md_io_page) goto Enomem;
+	if (!mdev->md_io_page) goto Enomem;
 
 	if (drbd_bm_init(mdev)) goto Enomem;
 	// no need to lock access, we are still initializing the module.
@@ -2358,9 +2358,9 @@
 	return mdev;
 
  Enomem:
-	if(mdev) {
-		if(mdev->app_reads_hash) kfree(mdev->app_reads_hash);
-		if(mdev->md_io_page) __free_page(mdev->md_io_page);
+	if (mdev) {
+		if (mdev->app_reads_hash) kfree(mdev->app_reads_hash);
+		if (mdev->md_io_page) __free_page(mdev->md_io_page);
 		kfree(mdev);
 	}
 	return NULL;
@@ -2423,7 +2423,7 @@
 #endif
 	}
 
-	if( (err = drbd_nl_init()) ) {
+	if ( (err = drbd_nl_init()) ) {
 		return err;
 	}
 
@@ -2446,7 +2446,7 @@
 
 	drbd_proc = NULL; // play safe for drbd_cleanup
 	minor_table = kzalloc(sizeof(drbd_dev *)*minor_count,GFP_KERNEL);
-	if(!minor_table) goto Enomem;
+	if (!minor_table) goto Enomem;
 
 	if ((err = drbd_create_mempools()))
 		goto Enomem;
@@ -2488,7 +2488,7 @@
 
 void drbd_free_bc(struct drbd_backing_dev* bc)
 {
-	if(bc == NULL) return;
+	if (bc == NULL) return;
 
 	BD_RELEASE(bc->backing_bdev);
 	BD_RELEASE(bc->md_bdev);
@@ -2514,7 +2514,7 @@
 
 void drbd_free_resources(drbd_dev *mdev)
 {
-	if ( mdev->cram_hmac_tfm ) {
+	if (mdev->cram_hmac_tfm) {
 		crypto_free_hash(mdev->cram_hmac_tfm);
 		mdev->cram_hmac_tfm = NULL;
 	}
@@ -2558,7 +2558,7 @@
 
 	// We use here Failed and not Attaching because we try to write
 	// metadata even if we detach due to a disk failure!
-	if(!inc_local_if_state(mdev,Failed)) return;
+	if (!inc_local_if_state(mdev,Failed)) return;
 
 	INFO("Writing meta data super block now.\n");
 
@@ -2621,7 +2621,7 @@
 	struct meta_data_on_disk * buffer;
 	int i,rv = NoError;
 
-	if(!inc_local_if_state(mdev,Attaching)) return MDIOError;
+	if (!inc_local_if_state(mdev,Attaching)) return MDIOError;
 
 	down(&mdev->md_io_mutex);
 	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
@@ -2634,7 +2634,7 @@
 		goto err;
 	}
 
-	if(be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
+	if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
 		ERR("Error while reading metadata, magic not found.\n");
 		rv = MDInvalid;
 		goto err;
@@ -2713,7 +2713,7 @@
 
 void _drbd_uuid_set(drbd_dev *mdev, int idx, u64 val)
 {
-	if(idx == Current) {
+	if (idx == Current) {
 		if (mdev->state.role == Primary) {
 			val |= 1;
 		} else {
@@ -2733,7 +2733,7 @@
 
 void drbd_uuid_set(drbd_dev *mdev, int idx, u64 val)
 {
-	if(mdev->bc->md.uuid[idx]) {
+	if (mdev->bc->md.uuid[idx]) {
 		drbd_uuid_move_history(mdev);
 		mdev->bc->md.uuid[History_start]=mdev->bc->md.uuid[idx];
 		MTRACE(TraceTypeUuid,TraceLvlMetrics,
@@ -2768,9 +2768,9 @@
 
 void drbd_uuid_set_bm(drbd_dev *mdev, u64 val)
 {
-	if( mdev->bc->md.uuid[Bitmap]==0 && val==0 ) return;
+	if (mdev->bc->md.uuid[Bitmap]==0 && val==0) return;
 
-	if(val==0) {
+	if (val==0) {
 		drbd_uuid_move_history(mdev);
 		mdev->bc->md.uuid[History_start]=mdev->bc->md.uuid[Bitmap];
 		mdev->bc->md.uuid[Bitmap]=0;
@@ -2780,7 +2780,7 @@
 		       drbd_print_uuid(mdev,Bitmap);
 			);
 	} else {
-		if( mdev->bc->md.uuid[Bitmap] ) WARN("bm UUID already set");
+		if (mdev->bc->md.uuid[Bitmap]) WARN("bm UUID already set");
 
 		mdev->bc->md.uuid[Bitmap] = val;
 		mdev->bc->md.uuid[Bitmap] &= ~((u64)1);
@@ -3057,7 +3057,7 @@
 
 #define PSM(A) \
 do { \
-	if( mask.A ) { \
+	if (mask.A) { \
 		int i = snprintf(p, len, " " #A "( %s )", \
 				A##s_to_name(val.A)); \
 		if (i >= len) return op; \

Modified: branches/drbd-8.0-for-linus/drbd/drbd_nl.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_nl.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_nl.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -57,7 +57,7 @@
 		switch( tag_number(tag) ) { \
 		fields \
 		default: \
-			if( tag & T_MANDATORY ) { \
+			if (tag & T_MANDATORY) { \
 				ERR("Unknown tag: %d\n",tag_number(tag)); \
 				return 0; \
 			} \
@@ -188,14 +188,14 @@
 	D_ASSERT(mdev->state.pdsk == DUnknown);
 
 	fp = DontCare;
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		fp = mdev->bc->dc.fencing;
 		dec_local(mdev);
 	}
 
 	D_ASSERT( fp > DontCare );
 
-	if( fp == Stonith ) drbd_request_state(mdev,NS(susp,1));
+	if (fp == Stonith) drbd_request_state(mdev,NS(susp,1));
 
 	r=drbd_khelper(mdev,"outdate-peer");
 
@@ -216,7 +216,7 @@
 		drbd_request_state(mdev,NS(disk,Outdated));
 		break;
 	case 7:
-		if( fp != Stonith ) {
+		if (fp != Stonith) {
 			ERR("outdate-peer() = 7 && fencing != Stonith !!!\n");
 		}
 		nps = Outdated;
@@ -240,7 +240,7 @@
 	drbd_state_t mask, val;
 	drbd_disks_t nps;
 
-	if ( new_role == Primary ) {
+	if (new_role == Primary) {
 		request_ping(mdev); // Detect a dead peer ASAP
 	}
 
@@ -249,7 +249,7 @@
 
 	while (try++ < 3) {
 		r = _drbd_request_state(mdev,mask,val,0);
-		if( r == SS_NoUpToDateDisk && force &&
+		if ( r == SS_NoUpToDateDisk && force &&
 		    ( mdev->state.disk == Inconsistent ||
 		      mdev->state.disk == Outdated ) ) {
 			mask.disk = disk_mask;
@@ -258,12 +258,12 @@
 			continue;
 		}
 
-		if( r == SS_NoUpToDateDisk &&
+		if ( r == SS_NoUpToDateDisk &&
 		    mdev->state.disk == Consistent ) {
 			D_ASSERT(mdev->state.pdsk == DUnknown);
 			nps = drbd_try_outdate_peer(mdev);
 
-			if(nps == Outdated) {
+			if (nps == Outdated) {
 				val.disk = UpToDate;
 				mask.disk = disk_mask;
 			}
@@ -274,11 +274,11 @@
 			continue;
 		}
 
-		if ( r == SS_NothingToDo ) goto fail;
-		if ( r == SS_PrimaryNOP ) {
+		if (r == SS_NothingToDo) goto fail;
+		if (r == SS_PrimaryNOP) {
 			nps = drbd_try_outdate_peer(mdev);
 
-			if ( force && nps > Outdated ) {
+			if (force && nps > Outdated) {
 				WARN("Forced into split brain situation!\n");
 				nps = Outdated;
 			}
@@ -288,21 +288,21 @@
 
 			continue;
 		}
-		if( r == SS_TwoPrimaries ) {
+		if (r == SS_TwoPrimaries) {
 			// Maybe the peer is detected as dead very soon...
 			set_current_state(TASK_INTERRUPTIBLE);
 			schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
-			if(try == 1) try++; // only a single retry in this case.
+			if (try == 1) try++; // only a single retry in this case.
 			continue;
 		}
-		if ( r < SS_Success ) {
+		if (r < SS_Success) {
 			r = drbd_request_state(mdev,mask,val); // Be verbose.
-			if( r < SS_Success ) goto fail;
+			if (r < SS_Success) goto fail;
 		}
 		break;
 	}
 
-	if(forced) WARN("Forced to conisder local data as UpToDate!\n");
+	if (forced) WARN("Forced to conisder local data as UpToDate!\n");
 
 	fsync_bdev(mdev->this_bdev);
 
@@ -326,7 +326,7 @@
 			dec_local(mdev);
 		}
 	} else {
-		if(inc_net(mdev)) {
+		if (inc_net(mdev)) {
 			mdev->net_conf->want_lose = 0;
 			dec_net(mdev);
 		}
@@ -346,14 +346,14 @@
 		}
 	}
 
-	if((new_role == Secondary) && inc_local(mdev) ) {
+	if ((new_role == Secondary) && inc_local(mdev) ) {
 		drbd_al_to_on_disk_bm(mdev);
 		dec_local(mdev);
 	}
 
 	if (mdev->state.conn >= WFReportParams) {
 		/* if this was forced, we should consider sync */
-		if(forced) drbd_send_uuids(mdev);
+		if (forced) drbd_send_uuids(mdev);
 		drbd_send_state(mdev);
 	}
 
@@ -372,7 +372,7 @@
 	struct primary primary_args;
 
 	memset(&primary_args, 0, sizeof(struct primary));
-	if(!primary_from_tags(mdev,nlp->tag_list,&primary_args)) {
+	if (!primary_from_tags(mdev,nlp->tag_list,&primary_args)) {
 		reply->ret_code=UnknownMandatoryTag;
 		return 0;
 	}
@@ -471,7 +471,7 @@
 
 	size = drbd_new_dev_size(mdev,mdev->bc);
 
-	if( drbd_get_capacity(mdev->this_bdev) != size ||
+	if ( drbd_get_capacity(mdev->this_bdev) != size ||
 	    drbd_bm_capacity(mdev) != size ) {
 		int err;
 		err = drbd_bm_resize(mdev,size);
@@ -503,13 +503,13 @@
 	md_moved = prev_first_sect != drbd_md_first_sector(mdev->bc)
 		|| prev_size       != mdev->bc->md.md_size_sect;
 
-	if ( md_moved ) {
+	if (md_moved) {
 		WARN("Moving meta-data.\n");
 		/* assert: (flexible) internal meta data */
 	}
 
-	if ( la_size_changed || md_moved ) {
-		if( inc_local_if_state(mdev,Attaching) ) {
+	if (la_size_changed || md_moved) {
+		if ( inc_local_if_state(mdev,Attaching) ) {
 			drbd_al_shrink(mdev); // All extents inactive.
 			rv = drbd_bm_write(mdev);  // write bitmap
 			// Write mdev->la_size to on disk.
@@ -534,25 +534,25 @@
 
 	m_size = drbd_get_max_capacity(bdev);
 
-	if(p_size && m_size) {
+	if (p_size && m_size) {
 		size=min_t(sector_t,p_size,m_size);
 	} else {
-		if(la_size) {
+		if (la_size) {
 			size=la_size;
-			if(m_size && m_size < size) size=m_size;
-			if(p_size && p_size < size) size=p_size;
+			if (m_size && m_size < size) size=m_size;
+			if (p_size && p_size < size) size=p_size;
 		} else {
-			if(m_size) size=m_size;
-			if(p_size) size=p_size;
+			if (m_size) size=m_size;
+			if (p_size) size=p_size;
 		}
 	}
 
-	if(size == 0) {
+	if (size == 0) {
 		ERR("Both nodes diskless!\n");
 	}
 
-	if(u_size) {
-		if(u_size<<1 > size) {
+	if (u_size) {
+		if (u_size<<1 > size) {
 			ERR("Requested disk size is too big (%lu > %lu)\n",
 			    (unsigned long)u_size, (unsigned long)size>>1);
 		} else {
@@ -652,7 +652,7 @@
 	// t->max_segment_size = min_not_zero(...,...)
 
 	// workaround here:
-	if(q->max_segment_size == 0) q->max_segment_size = max_seg_s;
+	if (q->max_segment_size == 0) q->max_segment_size = max_seg_s;
 
 	MTRACE(TraceTypeRq,TraceLvlSummary,
 	       DUMPI(q->max_sectors);
@@ -663,13 +663,13 @@
 	       DUMPI(q->seg_boundary_mask);
 	       );
 
-	if(b->merge_bvec_fn) {
+	if (b->merge_bvec_fn) {
 		WARN("Backing device's merge_bvec_fn() = %p\n",
 		     b->merge_bvec_fn);
 	}
 	INFO("max_segment_size ( = BIO size ) = %u\n", q->max_segment_size);
 
-	if( q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+	if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
 		INFO("Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
 		     q->backing_dev_info.ra_pages,
 		     b->backing_dev_info.ra_pages);
@@ -700,7 +700,7 @@
         * then fail.
         */
 	while(mdev->bc != NULL) {
-		if(ntries++ >= 5) {
+		if (ntries++ >= 5) {
 			WARN("drbd_nl_disk_conf: mdev->bc not NULL.\n");
 			retcode=HaveDiskConfig;
 			goto fail;
@@ -710,12 +710,12 @@
 	}
 
 	nbc = kmalloc(sizeof(struct drbd_backing_dev),GFP_KERNEL);
-	if(!nbc) {
+	if (!nbc) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
 
-	if( !(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_local(mdev) ) {
+	if ( !(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_local(mdev) ) {
 		memcpy(&nbc->dc,&mdev->bc->dc,sizeof(struct disk_conf));
 		dec_local(mdev);
 	} else {
@@ -725,7 +725,7 @@
 		nbc->dc.fencing     = DRBD_FENCING_DEF;
 	}
 
-	if(!disk_conf_from_tags(mdev,nlp->tag_list,&nbc->dc)) {
+	if (!disk_conf_from_tags(mdev,nlp->tag_list,&nbc->dc)) {
 		retcode=UnknownMandatoryTag;
 		goto fail;
 	}
@@ -733,7 +733,7 @@
 	nbc->lo_file = NULL;
 	nbc->md_file = NULL;
 
-	if ( nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+	if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
 		retcode=LDMDInvalid;
 		goto fail;
 	}
@@ -782,7 +782,7 @@
 	}
 
 	resync_lru = lc_alloc("resync",31, sizeof(struct bm_extent),mdev);
-	if(!resync_lru) {
+	if (!resync_lru) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
@@ -816,7 +816,7 @@
 		goto release_bdev2_fail;
 	}
 
-	if ( nbc->dc.meta_dev_idx == -1 ) i = 1;
+	if (nbc->dc.meta_dev_idx == -1) i = 1;
 	else i = nbc->dc.meta_dev_idx+1;
 
 	/* for internal, we need to check agains <= (then we have a drbd with
@@ -828,7 +828,7 @@
 	 * FIXME this is arbitrary and needs to be reconsidered as soon as we
 	 * move to flexible size meta data.
 	 */
-	if( drbd_get_capacity(nbc->md_bdev) < 2*MD_RESERVED_SIZE*i
+	if ( drbd_get_capacity(nbc->md_bdev) < 2*MD_RESERVED_SIZE*i
 				+ (nbc->dc.meta_dev_idx == -1) ? (1<<16) : 0 )
 	{
 		retcode = MDDeviceTooSmall;
@@ -844,14 +844,14 @@
 		goto release_bdev2_fail;
 	}
 
-	if((retcode = drbd_request_state(mdev,NS(disk,Attaching))) < SS_Success ) {
+	if ((retcode = drbd_request_state(mdev,NS(disk,Attaching))) < SS_Success ) {
 		goto release_bdev2_fail;
 	}
 
 	drbd_md_set_sector_offsets(mdev,nbc);
 
 	retcode = drbd_md_read(mdev,nbc);
-	if ( retcode != NoError ) {
+	if (retcode != NoError) {
 		goto force_diskless;
 	}
 
@@ -862,13 +862,13 @@
 	}
 
 	// Prevent shrinking of consistent devices !
-	if(drbd_md_test_flag(nbc,MDF_Consistent) &&
+	if (drbd_md_test_flag(nbc,MDF_Consistent) &&
 	   drbd_new_dev_size(mdev,nbc) < nbc->md.la_size_sect) {
 		retcode = LDDeviceTooSmall;
 		goto force_diskless;
 	}
 
-	if(!drbd_al_read_log(mdev,nbc)) {
+	if (!drbd_al_read_log(mdev,nbc)) {
 		retcode = MDIOError;
 		goto force_diskless;
 	}
@@ -883,7 +883,7 @@
 	nbc = NULL;
 	resync_lru = NULL;
 
-	if(drbd_md_test_flag(mdev->bc,MDF_PrimaryInd)) {
+	if (drbd_md_test_flag(mdev->bc,MDF_PrimaryInd)) {
 		set_bit(CRASHED_PRIMARY, &mdev->flags);
 	} else {
 		clear_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -939,7 +939,7 @@
 		}
 	}
 
-	if(test_bit(CRASHED_PRIMARY, &mdev->flags)) {
+	if (test_bit(CRASHED_PRIMARY, &mdev->flags)) {
 		drbd_al_apply_to_bm(mdev);
 		drbd_al_to_on_disk_bm(mdev);
 	}
@@ -955,8 +955,8 @@
 	   If MDF_WasUpToDate is not set go into Outdated disk state,
 	   otherwise into Consistent state.
 	*/
-	if(drbd_md_test_flag(mdev->bc,MDF_Consistent)) {
-		if(drbd_md_test_flag(mdev->bc,MDF_WasUpToDate)) {
+	if (drbd_md_test_flag(mdev->bc,MDF_Consistent)) {
+		if (drbd_md_test_flag(mdev->bc,MDF_WasUpToDate)) {
 			ns.disk = Consistent;
 		} else {
 			ns.disk = Outdated;
@@ -965,11 +965,11 @@
 		ns.disk = Inconsistent;
 	}
 
-	if(drbd_md_test_flag(mdev->bc,MDF_PeerOutDated)) {
+	if (drbd_md_test_flag(mdev->bc,MDF_PeerOutDated)) {
 		ns.pdsk = Outdated;
 	}
 
-	if( ns.disk == Consistent &&
+	if ( ns.disk == Consistent &&
 	    ( ns.pdsk == Outdated || mdev->bc->dc.fencing == DontCare ) ) {
 		ns.disk = UpToDate;
 	}
@@ -981,7 +981,7 @@
 
 	/* In case we are Connected postpone any desicion on the new disk
 	   state after the negotiation phase. */
-	if(mdev->state.conn == Connected) {
+	if (mdev->state.conn == Connected) {
 		mdev->new_state_tmp.i = ns.i;
 		ns.i = os.i;
 		ns.disk = Negotiating;
@@ -998,8 +998,8 @@
 
 	drbd_bm_unlock(mdev);
 
-	if(inc_local_if_state(mdev,Attaching)) {
-		if(mdev->state.role == Primary) mdev->bc->md.uuid[Current] |=  (u64)1;
+	if (inc_local_if_state(mdev,Attaching)) {
+		if (mdev->state.role == Primary) mdev->bc->md.uuid[Current] |=  (u64)1;
 		else                            mdev->bc->md.uuid[Current] &= ~(u64)1;
 		dec_local(mdev);
 	}
@@ -1059,12 +1059,12 @@
 	}
 
 	new_conf = kmalloc(sizeof(struct net_conf),GFP_KERNEL);
-	if(!new_conf) {
+	if (!new_conf) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
 
-	if( !(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_net(mdev)) {
+	if ( !(nlp->flags & DRBD_NL_SET_DEFAULTS) && inc_net(mdev)) {
 		memcpy(new_conf,mdev->net_conf,sizeof(struct net_conf));
 		dec_local(mdev);
 	} else {
@@ -1097,7 +1097,7 @@
 		goto fail;
 	};
 
-	if( mdev->state.role == Primary && new_conf->want_lose ) {
+	if (mdev->state.role == Primary && new_conf->want_lose) {
 		retcode=DiscardNotAllowed;
 		goto fail;
 	}
@@ -1109,18 +1109,18 @@
 	retcode = NoError;
 	for(i=0;i<minor_count;i++) {
 		odev = minor_to_mdev(i);
-		if(!odev || odev == mdev) continue;
-		if( inc_net(odev)) {
-			if( M_ADDR(new_conf) == M_ADDR(odev->net_conf) &&
+		if (!odev || odev == mdev) continue;
+		if ( inc_net(odev)) {
+			if ( M_ADDR(new_conf) == M_ADDR(odev->net_conf) &&
 			    M_PORT(new_conf) == M_PORT(odev->net_conf) ) {
 				retcode=LAAlreadyInUse;
 			}
-			if(O_ADDR(new_conf) == O_ADDR(odev->net_conf) &&
+			if (O_ADDR(new_conf) == O_ADDR(odev->net_conf) &&
 			   O_PORT(new_conf) == O_PORT(odev->net_conf) ) {
 				retcode=OAAlreadyInUse;
 			}
 			dec_net(odev);
-			if(retcode != NoError) goto fail;
+			if (retcode != NoError) goto fail;
 		}
 	}
 #undef M_ADDR
@@ -1128,7 +1128,7 @@
 #undef O_ADDR
 #undef O_PORT
 
-	if( new_conf->cram_hmac_alg[0] != 0) {
+	if (new_conf->cram_hmac_alg[0] != 0) {
 		snprintf(hmac_name,HMAC_NAME_L,"hmac(%s)",new_conf->cram_hmac_alg);
 		tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
 		if (IS_ERR(tfm)) {
@@ -1147,7 +1147,7 @@
 	ns = new_conf->max_epoch_size/8;
 	if (mdev->tl_hash_s != ns) {
 		new_tl_hash=kzalloc(ns*sizeof(void*), GFP_KERNEL);
-		if(!new_tl_hash) {
+		if (!new_tl_hash) {
 			retcode=KMallocFailed;
 			goto fail;
 		}
@@ -1156,7 +1156,7 @@
 	ns = new_conf->max_buffers/8;
 	if (new_conf->two_primaries && ( mdev->ee_hash_s != ns ) ) {
 		new_ee_hash=kzalloc(ns*sizeof(void*), GFP_KERNEL);
-		if(!new_ee_hash) {
+		if (!new_ee_hash) {
 			retcode=KMallocFailed;
 			goto fail;
 		}
@@ -1195,19 +1195,19 @@
 	mdev->send_cnt = 0;
 	mdev->recv_cnt = 0;
 
-	if(new_tl_hash) {
+	if (new_tl_hash) {
 		if (mdev->tl_hash) kfree(mdev->tl_hash);
 		mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
 		mdev->tl_hash = new_tl_hash;
 	}
 
-	if(new_ee_hash) {
+	if (new_ee_hash) {
 		if (mdev->ee_hash) kfree(mdev->ee_hash);
 		mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
 		mdev->ee_hash = new_ee_hash;
 	}
 
-	if ( mdev->cram_hmac_tfm ) {
+	if (mdev->cram_hmac_tfm) {
 		crypto_free_hash(mdev->cram_hmac_tfm);
 	}
 	mdev->cram_hmac_tfm = tfm;
@@ -1234,9 +1234,9 @@
 
 	retcode = _drbd_request_state(mdev,NS(conn,Disconnecting),0);	// silently.
 
-	if ( retcode == SS_NothingToDo ) goto done;
-	else if ( retcode == SS_AlreadyStandAlone ) goto done;
-	else if ( retcode == SS_PrimaryNOP ) {
+	if (retcode == SS_NothingToDo) goto done;
+	else if (retcode == SS_AlreadyStandAlone) goto done;
+	else if (retcode == SS_PrimaryNOP) {
 		// Our statche checking code wants to see the peer outdated.
 		retcode = drbd_request_state(mdev,NS2(conn,Disconnecting,
 						      pdsk,Outdated));
@@ -1244,7 +1244,7 @@
 		// The peer probabely wants to see us outdated.
 		retcode = _drbd_request_state(mdev,NS2(conn,Disconnecting,
 						       disk,Outdated),0);
-		if( retcode == SS_IsDiskLess ) {
+		if (retcode == SS_IsDiskLess) {
 			// We are diskless and our peer wants to outdate us.
 			// So, simply go away, and let the peer try to
 			// outdate us with its 'outdate-peer' handler later.
@@ -1252,9 +1252,9 @@
 		}
 	}
 
-	if( retcode < SS_Success ) goto fail;
+	if (retcode < SS_Success) goto fail;
 
-	if( wait_event_interruptible( mdev->misc_wait,
+	if ( wait_event_interruptible( mdev->misc_wait,
 				      mdev->state.conn==StandAlone) ) {
 		retcode = GotSignal;
 		goto fail;
@@ -1291,7 +1291,7 @@
 		goto fail;
 	}
 
-	if(!inc_local(mdev)) {
+	if (!inc_local(mdev)) {
 		retcode = HaveNoDiskConfig;
 		goto fail;
 	}
@@ -1322,7 +1322,7 @@
 
 	memcpy(&sc,&mdev->sync_conf,sizeof(struct syncer_conf));
 
-	if(nlp->flags & DRBD_NL_SET_DEFAULTS) {
+	if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
 		sc.rate       = DRBD_RATE_DEF;
 		sc.after      = DRBD_AFTER_DEF;
 		sc.al_extents = DRBD_AL_EXTENTS_DEF;
@@ -1333,18 +1333,18 @@
 		goto fail;
 	}
 
-	if( sc.after != -1) {
-		if( sc.after < -1 || minor_to_mdev(sc.after) == NULL ) {
+	if (sc.after != -1) {
+		if ( sc.after < -1 || minor_to_mdev(sc.after) == NULL ) {
 			retcode=SyncAfterInvalid;
 			goto fail;
 		}
 		odev = minor_to_mdev(sc.after); // check against loops in
 		while(1) {
-			if( odev == mdev ) {
+			if (odev == mdev) {
 				retcode=SyncAfterCycle;
 				goto fail;
 			}
-			if( odev->sync_conf.after == -1 ) break; // no cycles.
+			if (odev->sync_conf.after == -1) break; // no cycles.
 			odev = minor_to_mdev(odev->sync_conf.after);
 		}
 	}
@@ -1352,7 +1352,7 @@
 	ERR_IF (sc.rate < 1) sc.rate = 1;
 	ERR_IF (sc.al_extents < 7) sc.al_extents = 127; // arbitrary minimum
 #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
-	if(sc.al_extents > AL_MAX) {
+	if (sc.al_extents > AL_MAX) {
 		ERR("sc.al_extents > %d\n",AL_MAX);
 		sc.al_extents = AL_MAX;
 	}
@@ -1360,7 +1360,7 @@
 
 	mdev->sync_conf = sc;
 
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		err = drbd_check_al_size(mdev);
 		dec_local(mdev);
 		drbd_md_sync(mdev);
@@ -1404,7 +1404,7 @@
 {
 	int retcode=NoError;
 
-	if(drbd_request_state(mdev,NS(user_isp,1)) == SS_NothingToDo)
+	if (drbd_request_state(mdev,NS(user_isp,1)) == SS_NothingToDo)
 		retcode = PauseFlagAlreadySet;
 
 	reply->ret_code = retcode;
@@ -1416,7 +1416,7 @@
 {
 	int retcode=NoError;
 
-	if(drbd_request_state(mdev,NS(user_isp,0)) == SS_NothingToDo)
+	if (drbd_request_state(mdev,NS(user_isp,0)) == SS_NothingToDo)
 		retcode = PauseFlagAlreadyClear;
 
 	reply->ret_code = retcode;
@@ -1446,7 +1446,7 @@
 
 	spin_lock_irq(&mdev->req_lock);
 	os = mdev->state;
-	if( mdev->state.disk < Outdated ) {
+	if (mdev->state.disk < Outdated) {
 		retcode = -999;
 	} else {
 		retcode = _drbd_set_state(_NS(mdev,disk,Outdated),ChgStateVerbose);
@@ -1455,7 +1455,7 @@
 	spin_unlock_irq(&mdev->req_lock);
 	if (retcode==SS_Success) after_state_ch(mdev,os,ns, ChgStateVerbose);
 
-	if( retcode == -999 ) {
+	if (retcode == -999) {
 		retcode = DiskLowerThanOutdated;
 		goto fail;
 	}
@@ -1474,12 +1474,12 @@
 
 	tl = reply->tag_list;
 
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		tl = disk_conf_to_tags(mdev,&mdev->bc->dc,tl);
 		dec_local(mdev);
 	}
 
-	if(inc_net(mdev)) {
+	if (inc_net(mdev)) {
 		tl = net_conf_to_tags(mdev,mdev->net_conf,tl);
 		dec_net(mdev);
 	}
@@ -1510,7 +1510,7 @@
 
 	tl = reply->tag_list;
 
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		// This is a hand crafted add tag ;)
 		*tl++ = T_uuids;
 		*tl++ = UUID_SIZE*sizeof(u64);
@@ -1551,19 +1551,19 @@
 
 	mdev = minor_to_mdev(nlp->drbd_minor);
 
-	if(!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
+	if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
 		mdev = drbd_new_device(nlp->drbd_minor);
 
 		spin_lock_irq(&drbd_pp_lock);
-		if( minor_table[nlp->drbd_minor] == NULL) {
+		if (minor_table[nlp->drbd_minor] == NULL) {
 			minor_table[nlp->drbd_minor] = mdev;
 			mdev = NULL;
 		}
 		spin_unlock_irq(&drbd_pp_lock);
 
-		if(mdev) {
-			if(mdev->app_reads_hash) kfree(mdev->app_reads_hash);
-			if(mdev->md_io_page) __free_page(mdev->md_io_page);
+		if (mdev) {
+			if (mdev->app_reads_hash) kfree(mdev->app_reads_hash);
+			if (mdev->md_io_page) __free_page(mdev->md_io_page);
 			kfree(mdev);
 			mdev = NULL;
 		}
@@ -1623,19 +1623,19 @@
 		+ sizeof(struct drbd_nl_cfg_reply)
 		+ sizeof(short int);
 
-	if(!try_module_get(THIS_MODULE)) {
+	if (!try_module_get(THIS_MODULE)) {
 		printk(KERN_ERR DEVICE_NAME "try_module_get() failed!\n");
 		return;
 	}
 
-	if( !(mdev = ensure_mdev(nlp)) ) {
+	if ( !(mdev = ensure_mdev(nlp)) ) {
 		retcode=MinorNotKnown;
 		goto fail;
 	}
 
 	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_packet(data););
 
-	if( nlp->packet_type >= P_nl_after_last_packet ) {
+	if (nlp->packet_type >= P_nl_after_last_packet) {
 		retcode=UnknownNetLinkPacket;
 		goto fail;
 	}
@@ -1643,7 +1643,7 @@
 	cm = cnd_table + nlp->packet_type;
 	reply_size += cm->reply_body_size;
 
-	if( !(cn_reply = kmalloc(reply_size,GFP_KERNEL)) ) {
+	if ( !(cn_reply = kmalloc(reply_size,GFP_KERNEL)) ) {
 		retcode=KMallocFailed;
 		goto fail;
 	}
@@ -1665,7 +1665,7 @@
 	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
 
 	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
-	if(rr && rr != -ESRCH) {
+	if (rr && rr != -ESRCH) {
 		printk(KERN_INFO DEVICE_NAME " cn_netlink_send()=%d\n",rr);
 	}
 	kfree(cn_reply);
@@ -1815,7 +1815,7 @@
 	if(err) return err;
 #endif
 	err = cn_add_callback(&cn_id_drbd,"cn_drbd",&drbd_connector_callback);
-	if(err) {
+	if (err) {
 		printk(KERN_ERR DEVICE_NAME "cn_drbd failed to register\n");
 		return err;
 	}
@@ -1856,7 +1856,7 @@
 	TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
 
 	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
-	if(rr && rr != -ESRCH) {
+	if (rr && rr != -ESRCH) {
 		printk(KERN_INFO DEVICE_NAME " cn_netlink_send()=%d\n",rr);
 	}
 }

Modified: branches/drbd-8.0-for-linus/drbd/drbd_proc.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_proc.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_proc.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -194,11 +194,11 @@
 
 	for (i = 0; i < minor_count; i++) {
 		mdev = minor_to_mdev(i);
-		if(!mdev) {
+		if (!mdev) {
 			hole=1;
 			continue;
 		}
-		if( hole ) {
+		if (hole) {
 			hole=0;
 			seq_printf( seq, "\n");
 		}
@@ -241,14 +241,14 @@
 		     mdev->state.conn == SyncTarget ) {
 			drbd_syncer_progress(mdev,seq);
 		}
-		if(mdev->resync) {
+		if (mdev->resync) {
 			lc_printf_stats(seq,mdev->resync);
 		}
-		if(mdev->act_log) {
+		if (mdev->act_log) {
 			lc_printf_stats(seq,mdev->act_log);
 		}
 #if 0
-		if(mdev->resync) {
+		if (mdev->resync) {
 			lc_dump(mdev->resync,seq,"rs_left",
 				resync_dump_detail);
 		}

Modified: branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_receiver.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_receiver.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -76,13 +76,13 @@
 	do {
 		la=le;
 		le=le->next;
-		if( le->prev != la ) {
+		if (le->prev != la) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s list fucked.\n",
 			       mdev_to_minor(mdev),t);
 			break;
 		}
-		if( forward++ > CHECK_LIST_LIMIT ) {
+		if (forward++ > CHECK_LIST_LIMIT) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s forward > 1000\n",
 			       mdev_to_minor(mdev),t);
@@ -94,13 +94,13 @@
 	do {
 		la=le;
 		le=le->prev;
-		if( le->next != la ) {
+		if (le->next != la) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s list fucked.\n",
 			       mdev_to_minor(mdev),t);
 			break;
 		}
-		if( backward++ > CHECK_LIST_LIMIT ) {
+		if (backward++ > CHECK_LIST_LIMIT) {
 			printk(KERN_ERR DEVICE_NAME
 			       "%d: %s backward > 1000\n",
 			       mdev_to_minor(mdev),t);
@@ -108,7 +108,7 @@
 		}
 	} while(le != list);
 
-	if(forward != backward) {
+	if (forward != backward) {
 		printk(KERN_ERR DEVICE_NAME "%d: forward=%d, backward=%d\n",
 		       mdev_to_minor(mdev),forward,backward);
 	}
@@ -163,7 +163,7 @@
 		 * don't wait, if none is available, though.
 		 */
 		if ( atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers ) {
-			if( (page = alloc_page(GFP_TRY)) )
+			if ( (page = alloc_page(GFP_TRY)) )
 				break;
 		}
 
@@ -206,7 +206,7 @@
 
 	atomic_dec(&mdev->pp_in_use);
 
-	if(free_it) __free_page(page);
+	if (free_it) __free_page(page);
 
 	/*
 	 * FIXME
@@ -380,7 +380,7 @@
 
 	list_for_each_safe(le, tle, &mdev->net_ee) {
 		e = list_entry(le, struct Tl_epoch_entry, w.list);
-		if( drbd_bio_has_active_page(e->private_bio) ) break;
+		if ( drbd_bio_has_active_page(e->private_bio) ) break;
 		list_del(le);
 		drbd_free_ee(mdev,e);
 	}
@@ -445,11 +445,11 @@
 		le = mdev->done_ee.next;
 		list_del(le);
 		e = list_entry(le, struct Tl_epoch_entry, w.list);
-		if(mdev->net_conf->wire_protocol == DRBD_PROT_C ||
+		if (mdev->net_conf->wire_protocol == DRBD_PROT_C ||
 		   is_syncer_block_id(e->block_id)) {
 			++n;
 		}
-		if(!hlist_unhashed(&e->colision)) hlist_del_init(&e->colision);
+		if (!hlist_unhashed(&e->colision)) hlist_del_init(&e->colision);
 		drbd_free_ee(mdev,e);
 	}
 
@@ -503,7 +503,7 @@
       out_release:
 	sock_release(newsock);
       out:
-	if(err != -EAGAIN && err != -EINTR)
+	if (err != -EAGAIN && err != -EINTR)
 		ERR("accept failed! %d\n", err);
 	return 0;
 }
@@ -585,7 +585,7 @@
 
 	set_fs(oldfs);
 
-	if(rv != size) drbd_force_state(mdev,NS(conn,BrokenPipe));
+	if (rv != size) drbd_force_state(mdev,NS(conn,BrokenPipe));
 
 	return rv;
 }
@@ -602,7 +602,7 @@
 		return NULL;
 	}
 
-	if(!inc_net(mdev)) return NULL;
+	if (!inc_net(mdev)) return NULL;
 
 	sock->sk->sk_rcvtimeo =
 	sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
@@ -652,7 +652,7 @@
 		return NULL;
 	}
 
-	if(!inc_net(mdev)) return NULL;
+	if (!inc_net(mdev)) return NULL;
 
 	sock2->sk->sk_reuse    = 1; /* SO_REUSEADDR */
 	sock2->sk->sk_rcvtimeo =
@@ -693,7 +693,7 @@
 
 	rr = drbd_recv_short(mdev, sock, h, sizeof(*h));
 
-	if( rr==sizeof(*h) && h->magic==BE_DRBD_MAGIC ) {
+	if ( rr==sizeof(*h) && h->magic==BE_DRBD_MAGIC ) {
 		return be16_to_cpu(h->command);
 	}
 
@@ -715,7 +715,7 @@
 	D_ASSERT(mdev->state.conn >= Unconnected);
 	D_ASSERT(!mdev->data.socket);
 
-	if(drbd_request_state(mdev,NS(conn,WFConnection)) < SS_Success ) return 0;
+	if (drbd_request_state(mdev,NS(conn,WFConnection)) < SS_Success ) return 0;
 	clear_bit(DISCARD_CONCURRENT, &mdev->flags);
 
 	sock  = NULL;
@@ -724,20 +724,20 @@
 	do {
 		for(try=0;;) { // 3 tries, this should take less than a second!
 			s=drbd_try_connect(mdev);
-			if(s || ++try >= 3 ) break;
+			if (s || ++try >= 3) break;
 			// give the other side time to call bind() & listen()
 			set_current_state(TASK_INTERRUPTIBLE);
 			schedule_timeout(HZ / 10);
 		}
 
-		if(s) {
-			if( !sock ) {
-				if( drbd_send_fp(mdev, s, HandShakeS) ) {
+		if (s) {
+			if (!sock) {
+				if ( drbd_send_fp(mdev, s, HandShakeS) ) {
 					sock = s;
 					s = NULL;
 				}
-			} else if( !msock ) {
-				if( drbd_send_fp(mdev, s, HandShakeM) ) {
+			} else if (!msock) {
+				if ( drbd_send_fp(mdev, s, HandShakeM) ) {
 					msock = s;
 					s = NULL;
 				}
@@ -745,23 +745,23 @@
 				ERR("Logic error in drbd_connect()\n");
 				return -1;
 			}
-			if(s) {
+			if (s) {
 				ERR("Error during sending initial packet.\n");
 				sock_release(s);
 			}
 		}
 
-		if(sock && msock) break;
+		if (sock && msock) break;
 
 		s=drbd_wait_for_connect(mdev);
-		if(s) {
+		if (s) {
 			switch(drbd_recv_fp(mdev,s)) {
 			case HandShakeS:
-				if(sock) sock_release(sock);
+				if (sock) sock_release(sock);
 				sock = s;
 				break;
 			case HandShakeM:
-				if(msock) sock_release(msock);
+				if (msock) sock_release(msock);
 				msock = s;
 				set_bit(DISCARD_CONCURRENT, &mdev->flags);
 				break;
@@ -771,13 +771,13 @@
 			}
 		}
 
-		if(mdev->state.conn <= Disconnecting) return -1;
-		if(signal_pending(current)) {
+		if (mdev->state.conn <= Disconnecting) return -1;
+		if (signal_pending(current)) {
 			flush_signals(current);
 			smp_rmb();
 			if (get_t_state(&mdev->receiver) == Exiting) {
-				if(sock) sock_release(sock);
-				if(msock) sock_release(msock);
+				if (sock) sock_release(sock);
+				if (msock) sock_release(msock);
 				return -1;
 			}
 		}
@@ -811,13 +811,13 @@
 	mdev->meta.socket = msock;
 	mdev->last_received = jiffies;
 
-	if(drbd_request_state(mdev,NS(conn,WFReportParams)) < SS_Success) return 0;
+	if (drbd_request_state(mdev,NS(conn,WFReportParams)) < SS_Success) return 0;
 	D_ASSERT(mdev->asender.task == NULL);
 
 	h = drbd_do_handshake(mdev);
 	if (h <= 0) return h;
 
-	if ( mdev->cram_hmac_tfm ) {
+	if (mdev->cram_hmac_tfm) {
 		if (!drbd_do_auth(mdev)) {
 			ERR("Authentication of peer failed\n");
 			return 0;
@@ -917,14 +917,14 @@
 	int ds,i,rr;
 
 	e = drbd_alloc_ee(mdev,id,sector,data_size,GFP_KERNEL);
-	if(!e) return 0;
+	if (!e) return 0;
 	bio = e->private_bio;
 	ds = data_size;
 	bio_for_each_segment(bvec, bio, i) {
 		page = bvec->bv_page;
 		rr = drbd_recv(mdev,kmap(page),min_t(int,ds,PAGE_SIZE));
 		kunmap(page);
-		if( rr != min_t(int,ds,PAGE_SIZE) ) {
+		if ( rr != min_t(int,ds,PAGE_SIZE) ) {
 			drbd_free_ee(mdev,e);
 			WARN("short read receiving data: read %d expected %d\n",
 			     rr, min_t(int,ds,PAGE_SIZE));
@@ -952,7 +952,7 @@
 	data=kmap(page);
 	while(data_size) {
 		rr = drbd_recv(mdev,data,min_t(int,data_size,PAGE_SIZE));
-		if( rr != min_t(int,data_size,PAGE_SIZE) ) {
+		if ( rr != min_t(int,data_size,PAGE_SIZE) ) {
 			rv = 0;
 			WARN("short read receiving data: read %d expected %d\n",
 			     rr, min_t(int,data_size,PAGE_SIZE));
@@ -1037,7 +1037,7 @@
 	struct Tl_epoch_entry *e;
 
 	e = read_in_block(mdev,ID_SYNCER,sector,data_size);
-	if(!e) return FALSE;
+	if (!e) return FALSE;
 
 	dec_rs_pending(mdev);
 
@@ -1130,7 +1130,7 @@
 	sector = be64_to_cpu(p->sector);
 	D_ASSERT(p->block_id == ID_SYNCER);
 
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		/* data is submitted to disk within recv_resync_read.
 		 * corresponding dec_local done below on error,
 		 * or in drbd_endio_write_sec. */
@@ -1166,14 +1166,14 @@
 	// unsigned int epoch_size;
 	int ok=1,pcmd;
 
-	if(mdev->net_conf->wire_protocol == DRBD_PROT_C) {
-		if(likely(drbd_bio_uptodate(e->private_bio))) {
+	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
+		if (likely(drbd_bio_uptodate(e->private_bio))) {
 			pcmd = (mdev->state.conn >= SyncSource &&
 				mdev->state.conn <= PausedSyncT &&
 				e->flags & EE_MAY_SET_IN_SYNC) ?
 				RSWriteAck : WriteAck;
 			ok &= drbd_send_ack(mdev,pcmd,e);
-			if(pcmd==RSWriteAck)
+			if (pcmd==RSWriteAck)
 				drbd_set_in_sync(mdev,sector,e->size);
 		} else {
 			/* FIXME I think we should send a NegAck regardless of
@@ -1188,7 +1188,7 @@
 			 * maybe assert this?  */
 		}
 		dec_unacked(mdev);
-	} else if(unlikely(!drbd_bio_uptodate(e->private_bio))) {
+	} else if (unlikely(!drbd_bio_uptodate(e->private_bio))) {
 		ok = drbd_io_error(mdev, FALSE);
 	}
 
@@ -1293,7 +1293,7 @@
 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
 		return FALSE;
 
-	if(!inc_local(mdev)) {
+	if (!inc_local(mdev)) {
 		/* data is submitted to disk at the end of this function.
 		 * corresponding dec_local done either below (on error),
 		 * or in drbd_endio_write_sec. */
@@ -1320,13 +1320,13 @@
 	e->w.cb = e_end_block;
 
 	dp_flags = be32_to_cpu(p->dp_flags);
-	if ( dp_flags & DP_HARDBARRIER ) {
+	if (dp_flags & DP_HARDBARRIER) {
 		e->private_bio->bi_rw |= BIO_RW_BARRIER;
 	}
-	if ( dp_flags & DP_RW_SYNC ) {
+	if (dp_flags & DP_RW_SYNC) {
 		e->private_bio->bi_rw |= BIO_RW_SYNC;
 	}
-	if ( dp_flags & DP_MAY_SET_IN_SYNC ) {
+	if (dp_flags & DP_MAY_SET_IN_SYNC) {
 		e->flags |= EE_MAY_SET_IN_SYNC;
 	}
 
@@ -1542,7 +1542,7 @@
 		break;
 	}
 
-	if(mdev->state.pdsk == Diskless) {
+	if (mdev->state.pdsk == Diskless) {
 		// In case we have the only disk of the cluster,
 		drbd_set_out_of_sync(mdev,e->sector,e->size);
 		e->flags |= EE_CALL_AL_COMPLETE_IO;
@@ -1597,7 +1597,7 @@
 		return FALSE;
 	}
 
-	if(!inc_local_if_state(mdev, UpToDate)) {
+	if (!inc_local_if_state(mdev, UpToDate)) {
 		if (DRBD_ratelimit(5*HZ,5))
 			ERR("Can not satisfy peer's read request, no local data.\n");
 		drbd_send_ack_rp(mdev,h->command == DataRequest ? NegDReply :
@@ -1686,17 +1686,17 @@
 		WARN("Discard younger/older primary did not found a decision\n"
 		     "Using discard-least-changes instead\n");
 	case DiscardZeroChg:
-		if( ch_peer == 0 && ch_self == 0) {
+		if (ch_peer == 0 && ch_self == 0) {
 			rv=test_bit(DISCARD_CONCURRENT,&mdev->flags) ? -1 : 1;
 			break;
 		} else {
-			if ( ch_peer == 0 ) { rv =  1; break; }
-			if ( ch_self == 0 ) { rv = -1; break; }
+			if (ch_peer == 0) { rv =  1; break; }
+			if (ch_self == 0) { rv = -1; break; }
 		}
-		if( mdev->net_conf->after_sb_0p == DiscardZeroChg ) break;
+		if (mdev->net_conf->after_sb_0p == DiscardZeroChg) break;
 	case DiscardLeastChg:
 		if      ( ch_self < ch_peer ) rv = -1;
-		else if ( ch_self > ch_peer ) rv =  1;
+		else if (ch_self > ch_peer) rv =  1;
 		else /* ( ch_self == ch_peer ) */ {
 			// Well, then use something else.
 			rv=test_bit(DISCARD_CONCURRENT,&mdev->flags) ? -1 : 1;
@@ -1731,8 +1731,8 @@
 		break;
 	case Consensus:
 		hg = drbd_asb_recover_0p(mdev);
-		if( hg == -1 && mdev->state.role==Secondary) rv=hg;
-		if( hg == 1  && mdev->state.role==Primary)   rv=hg;
+		if (hg == -1 && mdev->state.role==Secondary) rv=hg;
+		if (hg == 1  && mdev->state.role==Primary)   rv=hg;
 		break;
 	case Violently:
 		rv = drbd_asb_recover_0p(mdev);
@@ -1741,7 +1741,7 @@
 		return mdev->state.role==Primary ? 1 : -1;
 	case CallHelper:
 		hg = drbd_asb_recover_0p(mdev);
-		if( hg == -1 && mdev->state.role==Primary) {
+		if (hg == -1 && mdev->state.role==Primary) {
 			self = drbd_set_role(mdev,Secondary,0);
 			if (self != SS_Success) {
 				drbd_khelper(mdev,"pri-lost-after-sb");
@@ -1779,7 +1779,7 @@
 		break;
 	case CallHelper:
 		hg = drbd_asb_recover_0p(mdev);
-		if( hg == -1 ) {
+		if (hg == -1) {
 			self = drbd_set_role(mdev,Secondary,0);
 			if (self != SS_Success) {
 				drbd_khelper(mdev,"pri-lost-after-sb");
@@ -1904,7 +1904,7 @@
 	drbd_disks_t mydisk;
 
 	mydisk = mdev->state.disk;
-	if( mydisk == Negotiating ) mydisk = mdev->new_state_tmp.disk;
+	if (mydisk == Negotiating) mydisk = mdev->new_state_tmp.disk;
 
 	hg = drbd_uuid_compare(mdev,&rule_nr);
 
@@ -1921,11 +1921,11 @@
 		return conn_mask;
 	}
 
-	if( (mydisk==Inconsistent && peer_disk>Inconsistent) ||
+	if ( (mydisk==Inconsistent && peer_disk>Inconsistent) ||
 	    (peer_disk==Inconsistent && mydisk>Inconsistent) )  {
 		int f = (hg == -100) || abs(hg) == 2;
 		hg = mydisk > Inconsistent ? 1 : -1;
-		if(f) hg=hg*2;
+		if (f) hg=hg*2;
 		INFO("Becoming sync %s due to disk states.\n",
 		     hg > 0 ? "source" : "target");
 	}
@@ -1948,7 +1948,7 @@
 		if ( abs(hg) < 100 ) {
 			WARN("Split-Brain detected, %d primaries, automatically solved. Sync from %s node\n",
 			     pcount, (hg < 0) ? "peer":"this");
-			if(forced) {
+			if (forced) {
 				WARN("Doing a full sync, since"
 				     " UUIDs where ambiguous.\n");
 				drbd_uuid_dump(mdev,"self",mdev->bc->md.uuid);
@@ -1958,11 +1958,11 @@
 		}
 	}
 
-	if ( hg == -100 ) {
-		if(mdev->net_conf->want_lose && !(mdev->p_uuid[UUID_FLAGS]&1)){
+	if (hg == -100) {
+		if (mdev->net_conf->want_lose && !(mdev->p_uuid[UUID_FLAGS]&1)){
 			hg = -1;
 		}
-		if(!mdev->net_conf->want_lose && (mdev->p_uuid[UUID_FLAGS]&1)){
+		if (!mdev->net_conf->want_lose && (mdev->p_uuid[UUID_FLAGS]&1)){
 			hg = 1;
 		}
 
@@ -1980,7 +1980,7 @@
 		return conn_mask;
 	}
 
-	if (hg > 0 && mydisk <= Inconsistent ) {
+	if (hg > 0 && mydisk <= Inconsistent) {
 		ERR("I shall become SyncSource, but I am inconsistent!\n");
 		drbd_force_state(mdev,NS(conn,Disconnecting));
 		return conn_mask;
@@ -2022,7 +2022,7 @@
 		rv = WFBitMapT;
 	} else {
 		rv = Connected;
-		if(drbd_bm_total_weight(mdev)) {
+		if (drbd_bm_total_weight(mdev)) {
 			INFO("No resync, but %lu bits in bitmap!\n",
 			     drbd_bm_total_weight(mdev));
 		}
@@ -2037,15 +2037,15 @@
 STATIC int cmp_after_sb(enum after_sb_handler peer, enum after_sb_handler self)
 {
 	// DiscardRemote - DiscardLocal is valid
-	if( (peer == DiscardRemote && self == DiscardLocal) ||
+	if ( (peer == DiscardRemote && self == DiscardLocal) ||
 	    (self == DiscardRemote && peer == DiscardLocal) ) return 0;
 
 	// any other things with DiscardRemote or DiscardLocal are invalid
-	if( peer == DiscardRemote || peer == DiscardLocal ||
+	if ( peer == DiscardRemote || peer == DiscardLocal ||
 	    self == DiscardRemote || self == DiscardLocal ) return 1;
 
 	// everything else is valid if they are equal on both sides.
-	if( peer == self ) return 0;
+	if (peer == self) return 0;
 
 	// everything es is invalid.
 	return 1;
@@ -2069,32 +2069,32 @@
 	p_want_lose     = be32_to_cpu(p->want_lose);
 	p_two_primaries = be32_to_cpu(p->two_primaries);
 
-	if( p_proto != mdev->net_conf->wire_protocol) {
+	if (p_proto != mdev->net_conf->wire_protocol) {
 		ERR("incompatible communication protocols\n");
 		goto disconnect;
 	}
 
-	if( cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p) ) {
+	if ( cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p) ) {
 		ERR("incompatible after-sb-0pri settings\n");
 		goto disconnect;
 	}
 
-	if( cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p) ) {
+	if ( cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p) ) {
 		ERR("incompatible after-sb-1pri settings\n");
 		goto disconnect;
 	}
 
-	if( cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p) ) {
+	if ( cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p) ) {
 		ERR("incompatible after-sb-2pri settings\n");
 		goto disconnect;
 	}
 
-	if( p_want_lose && mdev->net_conf->want_lose ) {
+	if (p_want_lose && mdev->net_conf->want_lose) {
 		ERR("both sides have the 'want_lose' flag set\n");
 		goto disconnect;
 	}
 
-	if( p_two_primaries != mdev->net_conf->two_primaries ) {
+	if (p_two_primaries != mdev->net_conf->two_primaries) {
 		ERR("incompatible setting of the two-primaries options\n");
 		goto disconnect;
 	}
@@ -2153,14 +2153,14 @@
 	p_size=be64_to_cpu(p->d_size);
 	p_usize=be64_to_cpu(p->u_size);
 
-	if(p_size == 0 && mdev->state.disk == Diskless ) {
+	if (p_size == 0 && mdev->state.disk == Diskless) {
 		ERR("some backing storage is needed\n");
 		drbd_force_state(mdev,NS(conn,Disconnecting));
 		return FALSE;
 	}
 
 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		warn_if_differ_considerably(mdev, "lower level device sizes",
 			   p_size, drbd_get_capacity(mdev->bc->backing_bdev));
 		warn_if_differ_considerably(mdev, "user requested size",
@@ -2175,14 +2175,14 @@
 
 		my_usize = mdev->bc->dc.disk_size;
 
-		if( mdev->bc->dc.disk_size != p_usize ) {
+		if (mdev->bc->dc.disk_size != p_usize) {
 			mdev->bc->dc.disk_size = p_usize;
 			INFO("Peer sets u_size to %lu KB\n",
 			     (unsigned long)mdev->bc->dc.disk_size);
 		}
 
 		// Never shrink a device with usable data.
-		if(drbd_new_dev_size(mdev,mdev->bc) <
+		if (drbd_new_dev_size(mdev,mdev->bc) <
 		   drbd_get_capacity(mdev->this_bdev) &&
 		   mdev->state.disk >= Outdated ) {
 			dec_local(mdev);
@@ -2196,7 +2196,7 @@
 #undef min_not_zero
 
 	mdev->p_size=p_size;
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		drbd_bm_lock(mdev); // {
 		/*
 		 * you may get a flip-flop connection established/connection loss,
@@ -2216,17 +2216,17 @@
 		nconn=drbd_sync_handshake(mdev,mdev->state.peer,mdev->state.pdsk);
 		dec_local(mdev);
 
-		if(nconn == conn_mask) return FALSE;
+		if (nconn == conn_mask) return FALSE;
 
-		if(drbd_request_state(mdev,NS(conn,nconn)) < SS_Success) {
+		if (drbd_request_state(mdev,NS(conn,nconn)) < SS_Success) {
 			drbd_force_state(mdev,NS(conn,Disconnecting));
 			return FALSE;
 		}
 	}
 
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		max_seg_s = be32_to_cpu(p->max_segment_size);
-		if( max_seg_s != mdev->rq_queue->max_segment_size ) {
+		if (max_seg_s != mdev->rq_queue->max_segment_size) {
 			drbd_setup_queue_param(mdev, max_seg_s);
 		}
 
@@ -2234,8 +2234,8 @@
 		dec_local(mdev);
 	}
 
-	if (mdev->state.conn > WFReportParams ) {
-		if( be64_to_cpu(p->c_size) !=
+	if (mdev->state.conn > WFReportParams) {
+		if ( be64_to_cpu(p->c_size) !=
 		    drbd_get_capacity(mdev->this_bdev) ) {
 			// we have different sizes, probabely peer
 			// needs to know my new size...
@@ -2262,7 +2262,7 @@
 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
 	}
 
-	if ( mdev->p_uuid ) kfree(mdev->p_uuid);
+	if (mdev->p_uuid) kfree(mdev->p_uuid);
 	mdev->p_uuid = p_uuid;
 
 	return TRUE;
@@ -2344,7 +2344,7 @@
 	oconn = nconn = mdev->state.conn;
 	spin_unlock_irq(&mdev->req_lock);
 
-	if (nconn == WFReportParams ) nconn = Connected;
+	if (nconn == WFReportParams) nconn = Connected;
 
 	if (mdev->p_uuid && oconn <= Connected &&
 	    peer_state.disk >= Negotiating &&
@@ -2352,31 +2352,31 @@
 		nconn=drbd_sync_handshake(mdev,peer_state.role,peer_state.disk);
 		dec_local(mdev);
 
-		if(nconn == conn_mask) return FALSE;
+		if (nconn == conn_mask) return FALSE;
 	}
 
 	spin_lock_irq(&mdev->req_lock);
-	if( mdev->state.conn != oconn ) goto retry;
+	if (mdev->state.conn != oconn) goto retry;
 	os = mdev->state;
 	ns.i = mdev->state.i;
 	ns.conn = nconn;
 	ns.peer = peer_state.role;
 	ns.pdsk = peer_state.disk;
 	ns.peer_isp = ( peer_state.aftr_isp | peer_state.user_isp );
-	if((nconn == Connected || nconn == WFBitMapS) &&
+	if ((nconn == Connected || nconn == WFBitMapS) &&
 	   ns.disk == Negotiating ) ns.disk = UpToDate;
-	if((nconn == Connected || nconn == WFBitMapT) &&
+	if ((nconn == Connected || nconn == WFBitMapT) &&
 	   ns.pdsk == Negotiating ) ns.pdsk = UpToDate;
 	rv = _drbd_set_state(mdev,ns,ChgStateVerbose | ChgStateHard);
 	spin_unlock_irq(&mdev->req_lock);
 
-	if(rv < SS_Success) {
+	if (rv < SS_Success) {
 		drbd_force_state(mdev,NS(conn,Disconnecting));
 		return FALSE;
 	}
 
-	if (oconn > WFReportParams ) {
-		if( nconn > Connected && peer_state.conn <= Connected) {
+	if (oconn > WFReportParams) {
+		if (nconn > Connected && peer_state.conn <= Connected) {
 			// we want resync, peer has not yet decided to sync...
 			drbd_send_uuids(mdev);
 			drbd_send_state(mdev);
@@ -2614,7 +2614,7 @@
 	drbd_thread_stop(&mdev->asender);
 
 	fp = DontCare;
-	if(inc_local(mdev)) {
+	if (inc_local(mdev)) {
 		fp = mdev->bc->dc.fencing;
 		dec_local(mdev);
 	}
@@ -2665,7 +2665,7 @@
 	drbd_queue_work(&mdev->data.work,&prev_work_done);
 	wait_event(mdev->misc_wait, !test_bit(WORK_PENDING,&mdev->flags));
 
-	if ( mdev->p_uuid ) {
+	if (mdev->p_uuid) {
 		kfree(mdev->p_uuid);
 		mdev->p_uuid = NULL;
 	}
@@ -2679,8 +2679,8 @@
 
 	drbd_md_sync(mdev);
 
-	if ( mdev->state.role == Primary ) {
-		if( fp >= Resource &&
+	if (mdev->state.role == Primary) {
+		if ( fp >= Resource &&
 		    mdev->state.pdsk >= DUnknown ) {
 			drbd_disks_t nps = drbd_try_outdate_peer(mdev);
 			drbd_request_state(mdev,NS(pdsk,nps));
@@ -2689,7 +2689,7 @@
 
 	spin_lock_irq(&mdev->req_lock);
 	os = mdev->state;
-	if ( os.conn >= Unconnected ) {
+	if (os.conn >= Unconnected) {
 		// Do not restart in case we are Disconnecting
 		ns = os;
 		ns.conn = Unconnected;
@@ -2700,20 +2700,20 @@
 		after_state_ch(mdev,os,ns,ChgStateVerbose);
 	}
 
-	if(os.conn == Disconnecting) {
+	if (os.conn == Disconnecting) {
 		wait_event( mdev->misc_wait,atomic_read(&mdev->net_cnt) == 0 );
-		if(mdev->ee_hash) {
+		if (mdev->ee_hash) {
 			kfree(mdev->ee_hash);
 			mdev->ee_hash = NULL;
 			mdev->ee_hash_s = 0;
 		}
 
-		if(mdev->tl_hash) {
+		if (mdev->tl_hash) {
 			kfree(mdev->tl_hash);
 			mdev->tl_hash = NULL;
 			mdev->tl_hash_s = 0;
 		}
-		if(mdev->cram_hmac_tfm) {
+		if (mdev->cram_hmac_tfm) {
 			crypto_free_hash(mdev->cram_hmac_tfm);
 			mdev->cram_hmac_tfm = NULL;
 		}
@@ -2881,7 +2881,7 @@
 
 	rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
 				(u8*)mdev->net_conf->shared_secret, key_len);
-	if(rv) {
+	if (rv) {
 		ERR("crypto_hash_setkey() failed with %d\n",rv);
 		rv = 0;
 		goto fail;
@@ -2902,14 +2902,14 @@
 		goto fail;
 	}
 
-	if (p.length > CHALLENGE_LEN*2 ) {
+	if (p.length > CHALLENGE_LEN*2) {
 		ERR( "expected AuthChallenge payload too big.\n");
 		rv = 0;
 		goto fail;
 	}
 
 	peers_ch = kmalloc(p.length,GFP_KERNEL);
-	if(peers_ch == NULL) {
+	if (peers_ch == NULL) {
 		ERR("kmalloc of peers_ch failed\n");
 		rv = 0;
 		goto fail;
@@ -2925,7 +2925,7 @@
 
 	resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
 	response = kmalloc(resp_size,GFP_KERNEL);
-	if(response == NULL) {
+	if (response == NULL) {
 		ERR("kmalloc of response failed\n");
 		rv = 0;
 		goto fail;
@@ -2936,7 +2936,7 @@
 	sg.length = p.length;
 
 	rv = crypto_hash_digest(&desc, &sg, sg.length, response);
-	if(rv) {
+	if (rv) {
 		ERR( "crypto_hash_digest() failed with %d\n",rv);
 		rv = 0;
 		goto fail;
@@ -2955,7 +2955,7 @@
 		goto fail;
 	}
 
-	if (p.length != resp_size ) {
+	if (p.length != resp_size) {
 		ERR( "expected AuthResponse payload of wrong size\n" );
 		rv = 0;
 		goto fail;
@@ -2970,7 +2970,7 @@
 	}
 
 	right_response = kmalloc(resp_size,GFP_KERNEL);
-	if(response == NULL) {
+	if (response == NULL) {
 		ERR("kmalloc of right_response failed\n");
 		rv = 0;
 		goto fail;
@@ -2981,7 +2981,7 @@
 	sg.length = CHALLENGE_LEN;
 
 	rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
-	if(rv) {
+	if (rv) {
 		ERR( "crypto_hash_digest() failed with %d\n",rv);
 		rv = 0;
 		goto fail;
@@ -2989,15 +2989,15 @@
 
 	rv = ! memcmp(response,right_response,resp_size);
 
-	if(rv) {
+	if (rv) {
 		INFO("Peer authenticated using %d bytes of '%s' HMAC\n",
 		     resp_size,mdev->net_conf->cram_hmac_alg);
 	}
 
  fail:
-	if(peers_ch) kfree(peers_ch);
-	if(response) kfree(response);
-	if(right_response) kfree(right_response);
+	if (peers_ch) kfree(peers_ch);
+	if (response) kfree(response);
+	if (right_response) kfree(right_response);
 
 	return rv;
 }
@@ -3019,14 +3019,14 @@
 			drbd_disconnect(mdev);
 			schedule_timeout(HZ);
 		}
-		if( h < 0 ) {
+		if (h < 0) {
 			WARN("Discarding network configuration.\n");
 			drbd_force_state(mdev,NS(conn,Disconnecting));
 		}
 	} while ( h == 0 );
 
-	if( h > 0 ) {
-		if(inc_net(mdev)) {
+	if (h > 0) {
+		if (inc_net(mdev)) {
 			drbdd(mdev);
 			dec_net(mdev);
 		}
@@ -3035,10 +3035,10 @@
 	drbd_disconnect(mdev);
 
 	// Ensure that the thread state fits to our connection state.
-	if( mdev->state.conn == Unconnected ) {
+	if (mdev->state.conn == Unconnected) {
 		ERR_IF( mdev->receiver.t_state != Restarting )
 			drbd_thread_restart_nowait(&mdev->receiver);
-	} else if( mdev->state.conn == StandAlone ) {
+	} else if (mdev->state.conn == StandAlone) {
 		ERR_IF( mdev->receiver.t_state != Exiting )
 			drbd_thread_stop_nowait(&mdev->receiver);
 	}
@@ -3055,7 +3055,7 @@
 
 	int retcode = be32_to_cpu(p->retcode);
 
-	if(retcode >= SS_Success) {
+	if (retcode >= SS_Success) {
 		set_bit(CL_ST_CHG_SUCCESS,&mdev->flags);
 	} else {
 		set_bit(CL_ST_CHG_FAIL,&mdev->flags);
@@ -3090,7 +3090,7 @@
 
 	update_peer_seq(mdev,be32_to_cpu(p->seq_num));
 
-	if( is_syncer_block_id(p->block_id)) {
+	if ( is_syncer_block_id(p->block_id)) {
 		drbd_set_in_sync(mdev,sector,blksize);
 		dec_rs_pending(mdev);
 	} else {
@@ -3144,7 +3144,7 @@
 
 	update_peer_seq(mdev,be32_to_cpu(p->seq_num));
 
-	if(is_syncer_block_id(p->block_id)) {
+	if (is_syncer_block_id(p->block_id)) {
 		sector_t sector = be64_to_cpu(p->sector);
 		int size = be32_to_cpu(p->blksize);
 
@@ -3206,7 +3206,7 @@
 
 	dec_rs_pending(mdev);
 
-	if(inc_local_if_state(mdev,Failed)) {
+	if (inc_local_if_state(mdev,Failed)) {
 		drbd_rs_complete_io(mdev,sector);
 		drbd_rs_failed_io(mdev, sector, size);
 		dec_local(mdev);
@@ -3277,7 +3277,7 @@
 			spin_lock_irq(&mdev->req_lock);
 			empty = list_empty(&mdev->done_ee);
 			spin_unlock_irq(&mdev->req_lock);
-			if(empty) break;
+			if (empty) break;
 			clear_bit(SIGNAL_ASENDER, &mdev->flags);
 			flush_signals(current);
 		}
@@ -3308,7 +3308,7 @@
 			ERR("meta connection shut down by peer.\n");
 			goto err;
 		} else if (rv == -EAGAIN) {
-			if( mdev->meta.socket->sk->sk_rcvtimeo ==
+			if ( mdev->meta.socket->sk->sk_rcvtimeo ==
 			    mdev->net_conf->ping_timeo*HZ/10 ) {
 				ERR("PingAck did not arrive in time.\n");
 				goto err;
@@ -3322,7 +3322,7 @@
 			goto err;
 		}
 
-		if (received == expect && cmd == -1 ) {
+		if (received == expect && cmd == -1) {
 			cmd = be16_to_cpu(h->command);
 			len = be16_to_cpu(h->length);
 			if (unlikely( h->magic != BE_DRBD_MAGIC )) {
@@ -3337,10 +3337,10 @@
 				DUMPI(expect);
 			}
 		}
-		if(received == expect) {
+		if (received == expect) {
 			D_ASSERT(cmd != -1);
 			dump_packet(mdev,mdev->meta.socket,1,(void*)h, __FILE__, __LINE__);
-			if(!asender_tbl[cmd].process(mdev,h)) goto err;
+			if (!asender_tbl[cmd].process(mdev,h)) goto err;
 
 			buf      = h;
 			received = 0;
@@ -3349,7 +3349,7 @@
 		}
 	} //while
 
-	if(0) {
+	if (0) {
 	err:
 		clear_bit(SIGNAL_ASENDER, &mdev->flags);
 		if (mdev->state.conn >= Connected)

Modified: branches/drbd-8.0-for-linus/drbd/drbd_req.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_req.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_req.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -127,7 +127,7 @@
 			drbd_set_out_of_sync(mdev,req->sector,req->size);
 		}
 
-		if( (s & RQ_NET_OK) && (s & RQ_LOCAL_OK) &&
+		if ( (s & RQ_NET_OK) && (s & RQ_LOCAL_OK) &&
 		    (s & RQ_NET_SIS) ) {
 			drbd_set_in_sync(mdev,req->sector,req->size);
 		}
@@ -388,7 +388,7 @@
 		}
 	}
 
-	if(mdev->ee_hash_s) {
+	if (mdev->ee_hash_s) {
 		/* now, check for overlapping requests with remote origin */
 		BUG_ON(mdev->ee_hash == NULL);
 #undef OVERLAPS
@@ -589,7 +589,7 @@
 
 		/* mark the current epoch as closed,
 		 * in case it outgrew the limit */
-		if( ++mdev->newest_barrier->n_req >= mdev->net_conf->max_epoch_size )
+		if (++mdev->newest_barrier->n_req >= mdev->net_conf->max_epoch_size)
 			set_bit(ISSUE_BARRIER,&mdev->flags);
 
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
@@ -871,7 +871,7 @@
 	{
   allocate_barrier:
 		b = kmalloc(sizeof(struct drbd_barrier),GFP_NOIO);
-		if(!b) {
+		if (!b) {
 			ERR("Failed to alloc barrier.");
 			err = -ENOMEM;
 			goto fail_and_free_req;
@@ -1076,7 +1076,7 @@
 	}
 
 	/* Currently our BARRIER code is disabled. */
-	if(unlikely(bio_barrier(bio))) {
+	if (unlikely(bio_barrier(bio))) {
 		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
 		return 0;
 	}
@@ -1094,7 +1094,7 @@
 	s_enr = bio->bi_sector >> HT_SHIFT;
 	e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
 
-	if(unlikely(s_enr != e_enr)) {
+	if (unlikely(s_enr != e_enr)) {
 	if (bio->bi_vcnt != 1 || bio->bi_idx != 0) {
 		/* rather error out here than BUG in bio_split */
 		ERR("bio would need to, but cannot, be split: "
@@ -1159,7 +1159,7 @@
 		if (limit <= bvec->bv_len) limit = bvec->bv_len;
 	} else if (limit && inc_local(mdev)) {
 		request_queue_t * const b = mdev->bc->backing_bdev->bd_disk->queue;
-		if(b->merge_bvec_fn && mdev->bc->dc.use_bmbv) {
+		if (b->merge_bvec_fn && mdev->bc->dc.use_bmbv) {
 			backing_limit = b->merge_bvec_fn(b,bio,bvec);
 			limit = min(limit,backing_limit);
 		}

Modified: branches/drbd-8.0-for-linus/drbd/drbd_worker.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_worker.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_worker.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -102,7 +102,7 @@
 	spin_lock_irqsave(&mdev->req_lock,flags);
 	mdev->read_cnt += e->size >> 9;
 	list_del(&e->w.list);
-	if(list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait);
+	if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait);
 	spin_unlock_irqrestore(&mdev->req_lock,flags);
 
 	drbd_chk_io_error(mdev,error,FALSE);
@@ -169,7 +169,7 @@
 	 * done from "drbd_process_done_ee" within the appropriate w.cb
 	 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
 
-	if(!is_syncer_req) mdev->epoch_size++;
+	if (!is_syncer_req) mdev->epoch_size++;
 
 	do_wake = is_syncer_req
 		? list_empty(&mdev->sync_ee)
@@ -240,10 +240,10 @@
 	 * when it is done and had a local write error, see comments there */
 	drbd_req_free(req);
 
-	if(unlikely(cancel)) return 1;
+	if (unlikely(cancel)) return 1;
 
 	ok = drbd_io_error(mdev, FALSE);
-	if(unlikely(!ok)) ERR("Sending in w_io_error() failed\n");
+	if (unlikely(!ok)) ERR("Sending in w_io_error() failed\n");
 	return ok;
 }
 
@@ -285,7 +285,7 @@
 
 	spin_lock_irqsave(&mdev->req_lock,flags);
 
-	if(likely(!test_and_clear_bit(STOP_SYNC_TIMER,&mdev->flags))) {
+	if (likely(!test_and_clear_bit(STOP_SYNC_TIMER,&mdev->flags))) {
 		queue=1;
 		mdev->resync_work.cb = w_make_resync_request;
 	} else {
@@ -296,7 +296,7 @@
 	spin_unlock_irqrestore(&mdev->req_lock,flags);
 
 	/* harmless race: list_empty outside data.work.q_lock */
-	if(list_empty(&mdev->resync_work.list) && queue) {
+	if (list_empty(&mdev->resync_work.list) && queue) {
 		drbd_queue_work(&mdev->data.work,&mdev->resync_work);
 	}
 }
@@ -314,9 +314,9 @@
 
 	PARANOIA_BUG_ON(w != &mdev->resync_work);
 
-	if(unlikely(cancel)) return 1;
+	if (unlikely(cancel)) return 1;
 
-	if(unlikely(mdev->state.conn < Connected)) {
+	if (unlikely(mdev->state.conn < Connected)) {
 		ERR("Confused in w_make_resync_request()! cstate < Connected");
 		return 0;
 	}
@@ -332,7 +332,7 @@
 	}
 	number -= atomic_read(&mdev->rs_pending_cnt);
 
-	if(!inc_local(mdev)) {
+	if (!inc_local(mdev)) {
 		/* Since we only need to access mdev->rsync a
 		   inc_local_if_state(mdev,Failed) would be sufficient, but
 		   to continue resync with a broken disk makes no sense at
@@ -405,7 +405,7 @@
 				break;
 			bit++;
 			size += BM_BLOCK_SIZE;
-			if( (BM_BLOCK_SIZE<<align) <= size) align++;
+			if ( (BM_BLOCK_SIZE<<align) <= size) align++;
 			i++;
 		}
 		/* if we merged some,
@@ -417,7 +417,7 @@
 		/* adjust very last sectors, in case we are oddly sized */
 		if (sector + (size>>9) > capacity) size = (capacity-sector)<<9;
 		inc_rs_pending(mdev);
-		if(!drbd_send_drequest(mdev,RSDataRequest,
+		if (!drbd_send_drequest(mdev,RSDataRequest,
 				       sector,size,ID_SYNCER)) {
 			ERR("drbd_send_drequest() failed, aborting...\n");
 			dec_rs_pending(mdev);
@@ -426,7 +426,7 @@
 		}
 	}
 
-	if(drbd_bm_rs_done(mdev)) {
+	if (drbd_bm_rs_done(mdev)) {
 		/* last syncer _request_ was sent,
 		 * but the RSDataReply not yet received.  sync will end (and
 		 * next sync group will resume), as soon as we receive the last
@@ -464,7 +464,7 @@
 	// Remove all elements from the resync LRU. Since future actions
 	// might set bits in the (main) bitmap, then the entries in the
 	// resync LRU would be wrong.
-	if(drbd_rs_del_all(mdev)) {
+	if (drbd_rs_del_all(mdev)) {
 		// In case this is not possible now, most probabely because
 		// there are RSDataReply Packets lingering on the worker's
 		// queue (or even the read operations for those packets
@@ -474,7 +474,7 @@
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule_timeout(HZ / 10);
 		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
-		if(w) {
+		if (w) {
 			w->cb = w_resync_finished;
 			drbd_queue_work(&mdev->data.work,w);
 			return 1;
@@ -508,7 +508,7 @@
 
 		if (mdev->state.conn == SyncTarget ||
 		    mdev->state.conn == PausedSyncT) {
-			if( mdev->p_uuid ) {
+			if (mdev->p_uuid) {
 				int i;
 				for ( i=Bitmap ; i<=History_end ; i++ ) {
 					_drbd_uuid_set(mdev,i,mdev->p_uuid[i]);
@@ -522,7 +522,7 @@
 
 		drbd_uuid_set_bm(mdev,0UL);
 
-		if ( mdev->p_uuid ) {
+		if (mdev->p_uuid) {
 			// Now the two UUID sets are equal, update what we
 			// know of the peer.
 			int i;
@@ -560,13 +560,13 @@
 	struct Tl_epoch_entry *e = (struct Tl_epoch_entry*)w;
 	int ok;
 
-	if(unlikely(cancel)) {
+	if (unlikely(cancel)) {
 		drbd_free_ee(mdev,e);
 		dec_unacked(mdev);
 		return 1;
 	}
 
-	if(likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely(drbd_bio_uptodate(e->private_bio))) {
 		ok=drbd_send_block(mdev, DataReply, e);
 	} else {
 		if (DRBD_ratelimit(5*HZ,5))
@@ -584,7 +584,7 @@
 	dec_unacked(mdev);
 
 	spin_lock_irq(&mdev->req_lock);
-	if( drbd_bio_has_active_page(e->private_bio) ) {
+	if ( drbd_bio_has_active_page(e->private_bio) ) {
 		/* This might happen if sendpage() has not finished */
 		list_add_tail(&e->w.list,&mdev->net_ee);
 	} else {
@@ -592,7 +592,7 @@
 	}
 	spin_unlock_irq(&mdev->req_lock);
 
-	if(unlikely(!ok)) ERR("drbd_send_block() failed\n");
+	if (unlikely(!ok)) ERR("drbd_send_block() failed\n");
 	return ok;
 }
 
@@ -604,18 +604,18 @@
 	struct Tl_epoch_entry *e = (struct Tl_epoch_entry*)w;
 	int ok;
 
-	if(unlikely(cancel)) {
+	if (unlikely(cancel)) {
 		drbd_free_ee(mdev,e);
 		dec_unacked(mdev);
 		return 1;
 	}
 
-	if(inc_local_if_state(mdev,Failed)) {
+	if (inc_local_if_state(mdev,Failed)) {
 		drbd_rs_complete_io(mdev,e->sector);
 		dec_local(mdev);
 	}
 
-	if(likely(drbd_bio_uptodate(e->private_bio))) {
+	if (likely(drbd_bio_uptodate(e->private_bio))) {
 		if (likely( mdev->state.pdsk >= Inconsistent )) {
 			inc_rs_pending(mdev);
 			ok=drbd_send_block(mdev, RSDataReply, e);
@@ -640,7 +640,7 @@
 	dec_unacked(mdev);
 
 	spin_lock_irq(&mdev->req_lock);
-	if( drbd_bio_has_active_page(e->private_bio) ) {
+	if ( drbd_bio_has_active_page(e->private_bio) ) {
 		/* This might happen if sendpage() has not finished */
 		list_add_tail(&e->w.list,&mdev->net_ee);
 	} else {
@@ -648,7 +648,7 @@
 	}
 	spin_unlock_irq(&mdev->req_lock);
 
-	if(unlikely(!ok)) ERR("drbd_send_block() failed\n");
+	if (unlikely(!ok)) ERR("drbd_send_block() failed\n");
 	return ok;
 }
 
@@ -730,7 +730,7 @@
 	ok = drbd_send_drequest(mdev, DataRequest, req->sector, req->size,
 				(unsigned long)req);
 
-	if(ok) {
+	if (ok) {
 		req_mod(req, handed_over_to_network, 0);
 	} else {
 		/* ?? we set Timeout or BrokenPipe in drbd_send() */
@@ -752,7 +752,7 @@
 
 	local_irq_disable();
 	for (i=0; i < minor_count; i++) {
-		if(!(mdev = minor_to_mdev(i))) continue;
+		if (!(mdev = minor_to_mdev(i))) continue;
 		spin_lock(&mdev->req_lock);
 	}
 }
@@ -763,7 +763,7 @@
 	int i;
 
 	for (i=0; i < minor_count; i++) {
-		if(!(mdev = minor_to_mdev(i))) continue;
+		if (!(mdev = minor_to_mdev(i))) continue;
 		spin_unlock(&mdev->req_lock);
 	}
 	local_irq_enable();
@@ -774,10 +774,10 @@
 	drbd_dev *odev = mdev;
 
 	while(1) {
-		if( odev->sync_conf.after == -1 ) return 1;
+		if (odev->sync_conf.after == -1) return 1;
 		odev = minor_to_mdev(odev->sync_conf.after);
 		ERR_IF(!odev) return 1;
-		if( (odev->state.conn >= SyncSource &&
+		if ( (odev->state.conn >= SyncSource &&
 		     odev->state.conn <= PausedSyncT) ||
 		    odev->state.aftr_isp || odev->state.peer_isp ||
 		    odev->state.user_isp ) return 0;
@@ -796,7 +796,7 @@
 	int i, rv = 0;
 
 	for (i=0; i < minor_count; i++) {
-		if( !(odev = minor_to_mdev(i)) ) continue;
+		if ( !(odev = minor_to_mdev(i)) ) continue;
 		if (! _drbd_may_sync_now(odev)) {
 			rv |= ( _drbd_set_state(_NS(odev,aftr_isp,1),
 						ChgStateHard|ScheduleAfter)
@@ -819,8 +819,8 @@
 	int i, rv = 0;
 
 	for (i=0; i < minor_count; i++) {
-		if( !(odev = minor_to_mdev(i)) ) continue;
-		if ( odev->state.aftr_isp ) {
+		if ( !(odev = minor_to_mdev(i)) ) continue;
+		if (odev->state.aftr_isp) {
 			if (_drbd_may_sync_now(odev)) {
 				rv |= ( _drbd_set_state(_NS(odev,aftr_isp,0),
 							ChgStateHard|ScheduleAfter)
@@ -883,7 +883,7 @@
 	/* In case a previous resync run was aborted by an IO error... */
 	drbd_rs_cancel_all(mdev);
 
-	if(side == SyncTarget) {
+	if (side == SyncTarget) {
 		drbd_bm_reset_find(mdev);
 	} else /* side == SyncSource */ {
 		u64 uuid;
@@ -902,7 +902,7 @@
 
 	ns.conn = side;
 
-	if(side == SyncTarget) {
+	if (side == SyncTarget) {
 		ns.disk = Inconsistent;
 	} else /* side == SyncSource */ {
 		ns.pdsk = Inconsistent;
@@ -911,7 +911,7 @@
 	r = _drbd_set_state(mdev,ns,ChgStateVerbose);
 	ns = mdev->state;
 
-	if ( r == SS_Success ) {
+	if (r == SS_Success) {
 		mdev->rs_total     =
 		mdev->rs_mark_left = drbd_bm_total_weight(mdev);
 		mdev->rs_failed    = 0;
@@ -922,7 +922,7 @@
 	}
 	drbd_global_unlock();
 
-	if ( r == SS_Success ) {
+	if (r == SS_Success) {
 		after_state_ch(mdev,os,ns,ChgStateVerbose);
 
 		INFO("Began resync as %s (will sync %lu KB [%lu bits set]).\n",
@@ -930,12 +930,12 @@
 		     (unsigned long) mdev->rs_total << (BM_BLOCK_SIZE_B-10),
 		     (unsigned long) mdev->rs_total);
 
-		if ( mdev->rs_total == 0 ) {
+		if (mdev->rs_total == 0) {
 			drbd_resync_finished(mdev);
 			return;
 		}
 
-		if( ns.conn == SyncTarget ) {
+		if (ns.conn == SyncTarget) {
 			D_ASSERT(!test_bit(STOP_SYNC_TIMER,&mdev->flags));
 			mod_timer(&mdev->resync_timer,jiffies);
 		}
@@ -955,15 +955,15 @@
 
 	while (get_t_state(thi) == Running) {
 
-		if(down_trylock(&mdev->data.work.s)) {
+		if (down_trylock(&mdev->data.work.s)) {
 			down(&mdev->data.mutex);
-			if(mdev->data.socket)drbd_tcp_flush(mdev->data.socket);
+			if (mdev->data.socket)drbd_tcp_flush(mdev->data.socket);
 			up(&mdev->data.mutex);
 
 			intr = down_interruptible(&mdev->data.work.s);
 
 			down(&mdev->data.mutex);
-			if(mdev->data.socket) drbd_tcp_cork(mdev->data.socket);
+			if (mdev->data.socket) drbd_tcp_cork(mdev->data.socket);
 			up(&mdev->data.mutex);
 		}
 
@@ -1001,7 +1001,7 @@
 		list_del_init(&w->list);
 		spin_unlock_irq(&mdev->data.work.q_lock);
 
-		if(!w->cb(mdev,w, mdev->state.conn < Connected )) {
+		if (!w->cb(mdev,w, mdev->state.conn < Connected )) {
 			//WARN("worker: a callback failed! \n");
 			if (mdev->state.conn >= Connected)
 				drbd_force_state(mdev,NS(conn,NetworkFailure));

Modified: branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/drbd_wrappers.h	2007-07-24 09:44:10 UTC (rev 2984)
@@ -137,7 +137,7 @@
 /* XXX the check on !blk_queue_plugged is redundant,
  * implicitly checked in blk_plug_device */
 
-	if(!blk_queue_plugged(q)) {
+	if (!blk_queue_plugged(q)) {
 		blk_plug_device(q);
 		del_timer(&q->unplug_timer);
 		// unplugging should not happen automatically...

Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.c	2007-07-24 08:47:36 UTC (rev 2983)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.c	2007-07-24 09:44:10 UTC (rev 2984)
@@ -215,7 +215,7 @@
 	BUG_ON(!lc->nr_elements);
 
 	PARANOIA_ENTRY();
-	if ( lc->flags & LC_STARVING ) {
+	if (lc->flags & LC_STARVING) {
 		++lc->starving;
 		RETURN(NULL);
 	}
@@ -223,7 +223,7 @@
 	e = lc_find(lc, enr);
 	if (e) {
 		++lc->hits;
-		if( e->refcnt++ == 0) lc->used++;
+		if (e->refcnt++ == 0) lc->used++;
 		list_move(&e->list,&lc->in_use); // Not evictable...
 		RETURN(e);
 	}
@@ -233,7 +233,7 @@
 	/* In case there is nothing available and we can not kick out
 	 * the LRU element, we have to wait ...
 	 */
-	if(!lc_unused_element_available(lc)) {
+	if (!lc_unused_element_available(lc)) {
 		__set_bit(__LC_STARVING,&lc->flags);
 		RETURN(NULL);
 	}
@@ -272,7 +272,7 @@
 	BUG_ON(!lc->nr_elements);
 
 	PARANOIA_ENTRY();
-	if ( lc->flags & LC_STARVING ) {
+	if (lc->flags & LC_STARVING) {
 		++lc->starving;
 		RETURN(NULL);
 	}
@@ -280,7 +280,7 @@
 	e = lc_find(lc, enr);
 	if (e) {
 		++lc->hits;
-		if( e->refcnt++ == 0) lc->used++;
+		if (e->refcnt++ == 0) lc->used++;
 		list_move(&e->list,&lc->in_use); // Not evictable...
 	}
 	RETURN(e);
@@ -311,7 +311,7 @@
 	PARANOIA_ENTRY();
 	BUG_ON(e->refcnt == 0);
 	BUG_ON(e == lc->changing_element);
-	if ( --e->refcnt == 0) {
+	if (--e->refcnt == 0) {
 		list_move(&e->list,&lc->lru); // move it to the front of LRU.
 		lc->used--;
 		clear_bit(__LC_STARVING,&lc->flags);
@@ -332,7 +332,7 @@
 {
 	struct lc_element *e;
 
-	if ( index < 0 || index >= lc->nr_elements ) return;
+	if (index < 0 || index >= lc->nr_elements) return;
 
 	e = lc_entry(lc,index);
 	e->lc_number = enr;
@@ -356,7 +356,7 @@
 	seq_printf(seq,"\tnn: lc_number refcnt %s\n ",utext);
 	for(i=0;i<nr_elements;i++) {
 		e = lc_entry(lc,i);
-		if( e->lc_number == LC_FREE ) {
+		if (e->lc_number == LC_FREE) {
 			seq_printf(seq,"\t%2d: FREE\n",i );
 		} else {
 			seq_printf(seq,"\t%2d: %4u %4u    ", i,



More information about the drbd-cvs mailing list