Note: "permalinks" may not be as permanent as we would like,
direct links of old sources may well be a few messages off.
Hello all,
I'm getting problems with the performance of my drbd installation.
I'm using two servers with drbd 8.3.7 in primary/slave configuration installed on openfiler 2.3.
When I'm copying one file of 2GB directly on the drbd-device I'm getting an i/o of 13MB/s as maximum.
This drbd-device is also used as XEN-shared storage with 8 VM's on it. Now in normal usage the i/o of the
same testing gets 2.6MB/s.
I'm monitored the direct network connection for syncing both nodes and get 236MBit/s or 29.5MB/s
as maximum. This connection uses 2 1GBit network adaptors with crosslink-cable.
My configuration file drbd.conf
global {
# minor-count 64;
# dialog-refresh 5; # 5 seconds
# disable-ip-verification;
usage-count ask;
}
common {
syncer { rate 100M; }
}
resource cluster_metadata {
protocol C;
handlers {
pri-on-incon-degr "echo O > /proc/sysrq-trigger ; halt -f";
pri-lost-after-sb "echo O > /proc/sysrq-trigger ; halt -f";
local-io-error "echo O > /proc/sysrq-trigger ; halt -f";
# outdate-peer "/usr/sbin/drbd-peer-outdater";
}
startup {
# wfc-timeout 0;
degr-wfc-timeout 120; # 2 minutes.
}
disk {
on-io-error detach;
}
net {
after-sb-0pri disconnect;
after-sb-1pri disconnect;
after-sb-2pri disconnect;
rr-conflict disconnect;
}
syncer {
# rate 10M;
# after "r2";
al-extents 257;
}
on filer1 {
device /dev/drbd0;
disk /dev/sdc1;
address 10.3.2.1:7788;
meta-disk internal;
}
on filer2 {
device /dev/drbd0;
disk /dev/sdc1;
address 10.3.2.2:7788;
meta-disk internal;
}
}
resource vg0drbd {
protocol C;
startup {
wfc-timeout 0; ## Infinite!
degr-wfc-timeout 120; ## 2 minutes.
}
disk {
no-disk-flushes;
on-io-error detach;
}
net {
# timeout 60;
# connect-int 10;
# ping-int 10;
# max-buffers 2048;
# max-epoch-size 2048;
sndbuf-size 0;
}
syncer {
after "cluster_metadata";
}
on filer1 {
device /dev/drbd1;
disk /dev/sdc2;
address 10.3.2.1:7789;
meta-disk internal;
}
on filer2 {
device /dev/drbd1;
disk /dev/sdc2;
address 10.3.2.2:7789;
meta-disk internal;
}
}
drbdsetup drbd1 show says:
disk {
size 0s _is_default; # bytes
on-io-error detach;
fencing dont-care _is_default;
no-disk-flushes ;
max-bio-bvecs 0 _is_default;
}
net {
timeout 60 _is_default; # 1/10 seconds
max-epoch-size 2048 _is_default;
max-buffers 2048 _is_default;
unplug-watermark 128 _is_default;
connect-int 10 _is_default; # seconds
ping-int 10 _is_default; # seconds
sndbuf-size 0 _is_default; # bytes
rcvbuf-size 0 _is_default; # bytes
ko-count 0 _is_default;
after-sb-0pri disconnect _is_default;
after-sb-1pri disconnect _is_default;
after-sb-2pri disconnect _is_default;
rr-conflict disconnect _is_default;
ping-timeout 5 _is_default; # 1/10 seconds
}
syncer {
rate 102400k; # bytes/second
after 0;
al-extents 127 _is_default;
}
protocol C;
_this_host {
device minor 1;
disk "/dev/sdc2";
meta-disk internal;
address ipv4 10.3.2.1:7789;
}
_remote_host {
address ipv4 10.3.2.2:7789;
}
Under my volume drbd1 I'm using a RAID5-system with an Adaptec AAC-RAID
controller and 1.8TB disc-capacity.
Can anyone help me to increase the i/o performance ?
Best regards
Andreas Weigand
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.linbit.com/pipermail/drbd-user/attachments/20130415/2e425c93/attachment.htm>