[DRBD-user] drbd resource ahead / behind problem

envisionrx ron.wells at envision-rx.com
Fri Mar 23 16:12:53 CET 2012


Forgot to repost configs:
drbd.d/global_common.conf:

global {
        usage-count yes;
}

common {
        protocol C;

        handlers {
                pri-on-incon-degr
"/usr/lib/drbd/notify-pri-on-incon-degr.sh;
/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;
reboot -f";
                pri-lost-after-sb
"/usr/lib/drbd/notify-pri-lost-after-sb.sh;
/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;
reboot -f";
                local-io-error "/usr/lib/drbd/notify-io-error.sh;
/usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ;
halt -f";
        }

        startup {
                # wfc-timeout degr-wfc-timeout outdated-wfc-timeout
wait-after-sb
        }

        disk {
           on-io-error detach;
        }

        net {
          after-sb-0pri discard-zero-changes;
          after-sb-1pri consensus;
          after-sb-2pri disconnect;
          ko-count 6;
        }

        syncer {
          rate 50M;
          csums-alg crc32c;
          verify-alg md5;
          use-rle;
        }
}

drbd.conf:

# You can find an example in  /usr/share/doc/drbd.../drbd.conf.example

include "drbd.d/global_common.conf";
include "drbd.d/*.res";

resource meta_lower {
 disk /dev/backingvg/metabacking;
 device /dev/drbd0;
 meta-disk internal;
 disk {
   fencing resource-only;
 }
 handlers {
   fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
   after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
 }
 on openfiler1 {
    address 10.50.153.1:7788;
 }
 on openfiler2 {
    address 10.50.153.2:7788;
 }
}

resource data4_lower {
 device /dev/drbd4;
 disk  /dev/backingvg/1024data4backing;
 meta-disk internal;
 disk {
   fencing resource-only;
 }
 handlers {
   fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
   after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
 }
 on openfiler1 {
    address 10.50.153.1:7792;
 }
 on openfiler2 {
    address 10.50.153.2:7792;
 }
}
resource data5_lower {
 device /dev/drbd5;
 disk /dev/backingvg/2048data5backing;
 meta-disk internal;
 disk {
   fencing resource-only;
 }
 handlers {
   fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
   after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
 }
 on openfiler1 {
    address 10.50.153.1:7793;
 }
 on openfiler2 {
    address 10.50.153.2:7793;
 }
}


resource meta {
 protocol A;
 device /dev/drbd10;
 meta-disk internal;
 handlers {
    before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh";
    after-resync-target "/usr/lib/drbd/unsnapshot-resync-target-lvm.sh";
 }
 net {
   sndbuf-size 512k;
   on-congestion pull-ahead;
   congestion-fill 500k;
 }
 syncer {
   rate 1000k;
 }
 stacked-on-top-of meta_lower {
   address 10.50.150.101:7788;
 }
 on openfiler3 {
   disk /dev/backingvg/metabacking;
   address 10.50.250.4:7788;
 }
}

resource data4 {
 protocol A;
 device /dev/drbd14;
 meta-disk internal;
 handlers {
    before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh";
    after-resync-target "/usr/lib/drbd/unsnapshot-resync-target-lvm.sh";
 }
 net {
   data-integrity-alg md5;
   sndbuf-size 512k;
   on-congestion pull-ahead;
   congestion-fill 500k;
 }
 syncer {
   rate 2500k;
 }
 stacked-on-top-of data4_lower {
   address 10.50.150.101:7792;
 }
 on openfiler3 {
   disk /dev/backingvg/1024data4backing;
   address 10.50.250.4:7792;
 }
}
resource data5 {
 protocol A;
 device /dev/drbd15;
 meta-disk internal;
 handlers {
    before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh";
    after-resync-target "/usr/lib/drbd/unsnapshot-resync-target-lvm.sh";
 }
 net {
   sndbuf-size 512k;
   on-congestion pull-ahead;
   congestion-fill 500k;
 }
 syncer {
   rate 2500k;
 }
 stacked-on-top-of data5_lower {
   address 10.50.150.101:7793;
 }
 on openfiler3 {
   disk /dev/backingvg/2048data5backing;
   address 10.50.250.4:7793;
 }
}

-- 
View this message in context: http://old.nabble.com/drbd-resource-ahead---behind-problem-tp33454636p33544661.html
Sent from the DRBD - User mailing list archive at Nabble.com.




More information about the drbd-user mailing list