# # drbd.conf example # # parameters you _need_ to change are the hostname, device, disk, # address and port in the "on {}" sections. # # you ought to know about the protocol, inittimeout, # skip-wait and load-only; and the fsckcmd. # # you probably want to set the rate in the syncer sections # # increase timeout and maybe ping-int in net{}, if you see # problems with "conection lost/connection enabled" # (or change your setup to reduce network latency; make sure full # duplex behaves as such; check average roundtrip times while # network is saturated ...) # # # Updating from DRBD-0.6.x # # Using the size parameter in the disk section (was disk-size) is # no longer recommended. since the agreed disk size is not stored # in DRBD's non volatile meta data files. # # global { # use this if you want to define more resources later # without reloading the module. # by default we load the module with exactly as many devices # as configured mentioned in this file. # minor_count=5 # this is for people who set up a drbd device via the # loopback network interface or between two VMs on the same # box, for testing/simulating/presentation # otherwise it could trigger a run_tasq_queue deadlock. # I'm not sure whether this deadlock can happen with two # nodes, but it seems at least extremly unlikely; and since # the io_hints boost performance, keep them enabled. # disable_io_hints # } # # this need not be r#, you may use phony resource names, # like "resource web" or "resource mail", too # resource r0 { # transfer protocol to use. # C: write IO is reported as completed, if we know it has # reached _both_ local and remote DISK. # * for critical transactional data. # B: write IO is reported as completed, if it has reached # local DISK and remote buffer cache. # * for most cases. # A: write IO is reported as completed, if it has reached # local DISK and local tcp send buffer. (see also sndbuf-size) # * for high latency networks protocol = A # what should be done in case the cluster starts up in # degraded mode, but knows it has inconsistent data. incon-degr-cmd="halt -f" startup { # Wait for connection timeout. # The init script blocks the boot process untill the resources # are connected. # In case you want to limit the wait time, do it here. wfc-timeout=30 # Wait for connection timeout if this node was a degraded cluster. # In case a degraded cluster (= cluster with only one node left) # is rebooted, this timeout value is used. degr-wfc-timeout=120 # 2 minutes. } disk { # if the lower level device reports io-error you have the choice of # "pass_on" -> Report the io-error to the upper layers. # Primary -> report it to the mounted file system. # Secondary -> ignore it. # "panic" -> The node leaves the cluster by doing a kernel panic. # "detach" -> The node drops its backing storage device, and # continues in disk less mode. on-io-error = detach # the device size in bytes, default unit is k (1 block == 1024 bytes) # should be the minimum of the sizes of the lower level devices of # the nodes. # Since DRBD-0.7 it is recommended to _not_ use this parameter. # size=1G } net { # this is the size of the tcp socket send buffer # increase it _carefully_ if you want to use proto A over a # high latency network with reasonable write throughput. # defaults to 2*65535; you might try even 1M, but if your kernel or # network driver chokes on that, you have been warned. # sndbuf-size = 512k # timeout = 60 # 6 seconds (unit = 0.1 seconds) # connect-int = 10 # 10 seconds (unit = 1 second) # ping-int = 10 # 10 seconds (unit = 1 second) # DRBD's write are 4k. The minimum is hardcoded to 32 (=128 kb). # For hight performace installations it might help if you increase # that number. These buffers are used to hold datablocks while they are # wirtten to disk. # max-buffers = 2048 # The highest number of data blocks between two write barriers. # If you set this < 10 you might decrease your performance. # max-epoch-size = 2048 } syncer { # Limit the bandwith used by the resynchronisation process. rate=10M # All devices in one group are resynchronized parallel. # Resychronisation of groups is serialized in ascending order. # Put DRBD resources which are on disjunkt physical disks in on goup. # Put DRBD resources on one physical disk in different groups. group=1 # Configures the size of the active set. Each extent is 4M, # 257 Extents ~> 1GB active set size. In case your syncer # runs @ 10MB/sec, all resync after a primary's crash will last # 1GB / ( 10MB/sec ) ~ 102 seconds ~ One Minute and 42 Seconds. # BTW, the hash algorithm works best if the number of al-extents # is prime. (To test the wost case performace use a power of 2) al-extents=257 } on ipdir1 { device=/dev/nb0 disk=/dev/md3 address=192.168.10.1 port=7788 meta-disk=internal # meta-disk=/dev/hde6 # meta-index=0 # You can use a singe block device as store for multiple meta-data # blocks. E.g. use meta-disk=/dev/hde6, meta-index=0 and # meta-disk=/dev/hde6, meta-index=1 for two resources. In this # case the meta-disk would need to be at least 256 MB in size. # 'internal' means, that the last 128 MB of the lower device are # used to store the meta-data. } on ipdir2 { device=/dev/nb0 disk=/dev/md3 address=192.168.10.2 port=7788 meta-disk=internal } }