[DRBD-user] Kernel Panic using LVM2 over DRBD 8.0.6

Dave Buechler dbuechler at cablemo.com
Mon Dec 3 07:50:27 CET 2007

Note: "permalinks" may not be as permanent as we would like,
direct links of old sources may well be a few messages off.


Hi all,

Using a stock CentOS 5.0 install, I have been attempting to establish
replication of a large LVM2 partition (approximately 1TB). 
Unfortunately, after creating drbd0 and completing the initial sync, the
primary node will kernel panic referencing drbd_req.c, line 374 -- I
think.  Forgive me, I don't have the panic message in front of me.

My configuration is as follows:

a partition on sdb1 on both nodes, with a type 8e (LVM)
create metadata on /dev/drbd0
make a node primary with -o flag
finish syncing (or not - it doesn't appear to make any difference)
create physical volume on /dev/drbd0 -- kernel will panic.

Can anyone tell me what I may be doing wrong?  I've been researching the
problem without much luck.  Any insight would be very helpful.

Thanks for your time.

I'm attaching copies of my config files below.

-- 
Regards,
David A. Buechler
System Administrator,
CableAmerica Missouri



drbd.conf:

global {
    usage-count no;
}

common {
  syncer { rate 110M; }
}

resource r0 {

  protocol C;

  handlers {
    pri-on-incon-degr "echo o > /proc/sysrq-trigger ; halt -f";
    pri-lost-after-sb "echo o > /proc/sysrq-trigger ; halt -f";
    local-io-error "echo o > /proc/sysrq-trigger ; halt -f";
  }

  startup {
    degr-wfc-timeout 90;    # 2 minutes.
  }

  disk {
    on-io-error   detach;
  }

  net {
    sndbuf-size 256k;
    max-buffers     8192;
    unplug-watermark   32768;
    max-epoch-size  5;
    after-sb-0pri disconnect;
    after-sb-1pri disconnect;
    after-sb-2pri disconnect;
    rr-conflict disconnect;
  }

  syncer {
    rate 110M;
    al-extents 257;
  }

  on node1 {
    device     /dev/drbd0;
    disk       /dev/sdb1;
    address    172.16.1.1:7788;
    meta-disk internal;
  }

  on node2 {
    device    /dev/drbd0;
    disk      /dev/sedb1;
    address   172.16.1.2:7788;
    disk internal;
  }
}



lvm.conf:

devices {
    scan = [ "/dev" ]
    filter = [ "r|/dev/cdrom|", "r|/dev/sdb1|", "a|/dev/drbd0|" ]
    cache = "/etc/lvm/.cache"
    write_cache_state = 1
    types = [ "drbd", 16 ]
    sysfs_scan = 1
    md_component_detection = 1
}

log {
    verbose = 0
    syslog = 1
    overwrite = 0
    level = 0
    indent = 1
    command_names = 0
    prefix = "  "
}

backup {
    backup = 1
    backup_dir = "/etc/lvm/backup"
    archive = 1
    archive_dir = "/etc/lvm/archive"
    retain_min = 10
    retain_days = 30
}

shell {
    history_size = 100
}

global {
    umask = 077
    test = 0
    activation = 1
    proc = "/proc"
    locking_type = 1
    fallback_to_clustered_locking = 1
    fallback_to_local_locking = 1
    locking_dir = "/var/lock/lvm"
}

activation {
    missing_stripe_filler = "/dev/ioerror"
    reserved_stack = 256
    reserved_memory = 8192
    process_priority = -18
    mirror_region_size = 512
    mirror_log_fault_policy = "allocate"
    mirror_device_fault_policy = "remove"
}



-- 
This message has been scanned for viruses and
dangerous content by MailScanner, and is
believed to be clean.




More information about the drbd-user mailing list