[DRBD-user] domU, primary/primary

James Wilson jwilson at transolutions.net
Fri Sep 14 15:57:13 CEST 2007

Note: "permalinks" may not be as permanent as we would like,
direct links of old sources may well be a few messages off.


I currently have drbd running on my clustered domU's. Everything works 
fine except when one hosts gets fenced or rebooted. When the fenced host 
comes back up I always get a Secondary/Unknown state. To get it back to 
Primary/Primary I have to umount the drbd0 disk from the active host 
then drbdadm down all and actually stop the service for drbd on both 
nodes and start the service again. The sync works perfect after this but 
how can I avoid restarting the services to get back to a known state?


global { usage-count yes; }
       common { syncer { rate 100M; } }
       resource r0 {
            protocol C;

            handlers {
                 pri-on-incon-degr "echo o > /proc/sysrq-trigger ; halt -f";
                 pri-lost-after-sb "echo o > /proc/sysrq-trigger ; halt -f";
                 local-io-error "echo o > /proc/sysrq-trigger ; halt -f";
        }

            startup {
                 degr-wfc-timeout 120;    # 2 minutes.
        }
            net {
                 after-sb-0pri discard-older-primary;
                 cram-hmac-alg sha1;
                 shared-secret "shared";
                 allow-two-primaries;
            }

            disk {
                 on-io-error   detach;
            }

            syncer {
                 al-extents 257;
            }

            on samba-u01 {
                 device    /dev/drbd0;
                 disk      /dev/xvdb;
                 address   192.168.7.2:7788;
                 meta-disk  internal;
            }
            on samba-u02 {
                 device    /dev/drbd0;
                 disk      /dev/xvdb;
                 address   192.168.7.3:7788;
                 meta-disk  internal;
            }
       }




More information about the drbd-user mailing list