[DRBD-cvs] svn commit by phil - r2433 - in trunk/drbd: . linux - * Changed the alignment of requests issued by w_make_re

drbd-cvs at lists.linbit.com drbd-cvs at lists.linbit.com
Wed Sep 20 10:35:25 CEST 2006


Author: phil
Date: 2006-09-20 10:35:23 +0200 (Wed, 20 Sep 2006)
New Revision: 2433

Modified:
   trunk/drbd/drbd_int.h
   trunk/drbd/drbd_main.c
   trunk/drbd/drbd_nl.c
   trunk/drbd/drbd_req.c
   trunk/drbd/drbd_worker.c
   trunk/drbd/linux/drbd_nl.h
Log:
* Changed the alignment of requests issued by w_make_resync_requests()
  to be always aligned (depending on the size of the request)
* Fixed our max_segemnt_size setting in case our backing device has
  a merge_bvec_function
* Allow the user to force the use of bigger requests on a device with
  a merge_bvec_function



Modified: trunk/drbd/drbd_int.h
===================================================================
--- trunk/drbd/drbd_int.h	2006-09-19 16:04:14 UTC (rev 2432)
+++ trunk/drbd/drbd_int.h	2006-09-20 08:35:23 UTC (rev 2433)
@@ -784,6 +784,7 @@
 	struct file *md_file;
 	struct drbd_md md;
 	struct disk_conf dc; /* The user provided config... */
+	merge_bvec_fn *bmbf; /* short cut to backing devices' merge_bvec_fn */
 };
 
 struct Drbd_Conf {

Modified: trunk/drbd/drbd_main.c
===================================================================
--- trunk/drbd/drbd_main.c	2006-09-19 16:04:14 UTC (rev 2432)
+++ trunk/drbd/drbd_main.c	2006-09-20 08:35:23 UTC (rev 2433)
@@ -638,7 +638,8 @@
 	/*  State sanitising  */
 	if( ns.conn < Connected ) {
 		ns.peer = Unknown;
-		if ( ns.pdsk > DUnknown ) ns.pdsk = DUnknown;
+		if ( ns.pdsk > DUnknown || 
+		     ns.pdsk < Inconsistent ) ns.pdsk = DUnknown;
 	}
 
 	if( ns.conn == StandAlone && ns.disk == Diskless ) {

Modified: trunk/drbd/drbd_nl.c
===================================================================
--- trunk/drbd/drbd_nl.c	2006-09-19 16:04:14 UTC (rev 2432)
+++ trunk/drbd/drbd_nl.c	2006-09-20 08:35:23 UTC (rev 2433)
@@ -553,11 +553,13 @@
 {
 	request_queue_t * const q = mdev->rq_queue;
 	request_queue_t * const b = mdev->bc->backing_bdev->bd_disk->queue;
+	//unsigned int old_max_seg_s = q->max_segment_size;
 
-	unsigned int old_max_seg_s = q->max_segment_size;
-
-	if(b->merge_bvec_fn) {
+	if(b->merge_bvec_fn && mdev->bc->dc.use_bmbv) {
+		mdev->bc->bmbf = b->merge_bvec_fn;
+	} else {
 		max_seg_s = PAGE_SIZE;
+		mdev->bc->bmbf = NULL;
 	}
 
 	q->max_sectors       = max_seg_s >> 9;
@@ -568,16 +570,20 @@
 	q->seg_boundary_mask = PAGE_SIZE-1;
 	blk_queue_stack_limits(q, b);
 
+	// KERNEL BUG. in ll_rw_blk.c
+	// t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
+	// should be 
+	// t->max_segment_size = min_not_zero(...,...)
+	
+	// workaround here:
+	if(q->max_segment_size == 0) q->max_segment_size = max_seg_s;
 
 	if(b->merge_bvec_fn) {
-		WARN("Backing device has merge_bvec_fn()!\n");
+		WARN("Backing device's merge_bvec_fn() = %p\n",
+		     b->merge_bvec_fn);
 	}
+	INFO("max_segment_size ( = BIO size ) = %u\n", q->max_segment_size);
 
-	//if( old_max_seg_s != q->max_segment_size ) {
-		INFO("max_segment_size ( = BIO size ) = %u\n",
-		     q->max_segment_size);
-	//}
-
 	if( q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
 		INFO("Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
 		     q->backing_dev_info.ra_pages,

Modified: trunk/drbd/drbd_req.c
===================================================================
--- trunk/drbd/drbd_req.c	2006-09-19 16:04:14 UTC (rev 2432)
+++ trunk/drbd/drbd_req.c	2006-09-20 08:35:23 UTC (rev 2433)
@@ -1084,18 +1084,25 @@
  * we should use DRBD_MAX_SEGMENT_SIZE instead of AL_EXTENT_SIZE */
 int drbd_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
 {
-	unsigned int bio_offset = (unsigned int)bio->bi_sector << 9; // 32 bit...
+	struct Drbd_Conf* mdev = (drbd_dev*) q->queuedata;
+	unsigned int bio_offset = (unsigned int)bio->bi_sector << 9; // 32 bit
 	unsigned int bio_size = bio->bi_size;
-	int max;
+	int limit, backing_limit;
 
 #if 1
-	max = DRBD_MAX_SEGMENT_SIZE - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
+	limit = DRBD_MAX_SEGMENT_SIZE - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
 #else
-	max = AL_EXTENT_SIZE - ((bio_offset & (AL_EXTENT_SIZE-1)) + bio_size);
+	limit = AL_EXTENT_SIZE - ((bio_offset & (AL_EXTENT_SIZE-1)) + bio_size);
 #endif
-	if (max < 0) max = 0;
-	if (max <= bvec->bv_len && bio_size == 0)
-		return bvec->bv_len;
-	else
-		return max;
+	if (limit < 0) limit = 0;
+	if (limit <= bvec->bv_len && bio_size == 0)
+		limit = bvec->bv_len;
+
+	if(limit && inc_local(mdev)) {
+		backing_limit = mdev->bc->bmbf(q,bio,bvec);
+		limit = min(limit,backing_limit);
+		dec_local(mdev);
+	}
+
+	return limit;
 }

Modified: trunk/drbd/drbd_worker.c
===================================================================
--- trunk/drbd/drbd_worker.c	2006-09-19 16:04:14 UTC (rev 2432)
+++ trunk/drbd/drbd_worker.c	2006-09-20 08:35:23 UTC (rev 2433)
@@ -256,6 +256,7 @@
 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
 	int max_segment_size = mdev->rq_queue->max_segment_size;
 	int number,i,size;
+	int align;
 
 	PARANOIA_BUG_ON(w != &mdev->resync_work);
 
@@ -308,21 +309,29 @@
 
 #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
 		/* try to find some adjacent bits.
-		 * we stop if we have already the maximum req size
-		 * or if it the request would cross a 32k boundary
-		 * (play more nicely with most raid devices).
+		 * we stop if we have already the maximum req size.
 		 *
+		 * Aditionally always align bigger requests, in order to
+		 * be prepared for all stripe sizes of software RAIDs.
+		 *
 		 * we _do_ care about the agreed-uppon q->max_segment_size
 		 * here, as splitting up the requests on the other side is more
 		 * difficult.  the consequence is, that on lvm and md and other
 		 * "indirect" devices, this is dead code, since
 		 * q->max_segment_size will be PAGE_SIZE.
 		 */
+		align=1;
 		for (;;) {
 			if (size + BM_BLOCK_SIZE > max_segment_size)
 				break;
-			if ((sector & ~63ULL) + BM_BIT_TO_SECT(2) <= 64ULL)
+
+			// Be always aligned
+			if (sector & ((1<<(align+3))-1) ) {
+				WARN("sector %llu w.b. unaligned size "
+				     "%d (%d)\n",sector,size,align);
 				break;
+			}
+
 			// do not cross extent boundaries
 			if (( (bit+1) & BM_BLOCKS_PER_BM_EXT_MASK ) == 0)
 				break;
@@ -335,6 +344,7 @@
 				break;
 			bit++;
 			size += BM_BLOCK_SIZE;
+			if( (BM_BLOCK_SIZE<<align) <= size) align++;
 			i++;
 		}
 		/* if we merged some,

Modified: trunk/drbd/linux/drbd_nl.h
===================================================================
--- trunk/drbd/linux/drbd_nl.h	2006-09-19 16:04:14 UTC (rev 2432)
+++ trunk/drbd/linux/drbd_nl.h	2006-09-20 08:35:23 UTC (rev 2433)
@@ -24,6 +24,7 @@
 	INTEGER(	5,	T_MANDATORY,	meta_dev_idx)
 	INTEGER(	6,	T_MAY_IGNORE,	on_io_error)
 	INTEGER(	7,	T_MAY_IGNORE,	fencing)
+	BIT(		37,	T_MAY_IGNORE,	use_bmbv)
 )
 
 PACKET(detach, )



More information about the drbd-cvs mailing list