Note: "permalinks" may not be as permanent as we would like,
direct links of old sources may well be a few messages off.
Hi @all,
I've got the following Openfiler HA-Cluster configuration here:
Primary: VirtualMachine (on ESXi 4):
2x2Ghz Intel Xeon
1GB RAM
Secondary: physical Machine:
Intel Atom 330 1,6Ghz
1GB RAM
Both have two GBit interfaces, one for replication an the other one for
direct access.
Now I created an iSCSI volume and connected to it with another ESXi 4 as
datastore and put a VM, for testing purpose, on this iSCSI-datastore.
The ESXi server and the storage cluster are connected by gbit.
If I make a benchmark by writing with dd to the local (virtual disk,
lying on storage cluster) of the test-VM, I get only about 20-25 MB/s. I
really expected atleast a writing speed of about 50MB/s.
If I test writing to local disk of the storage servers, I get read: 232
MB/s and write: 190 MB/s on the primary and on the secondary read: 80
MB/s write: 60 MB/s.
Where's the bottleneck?
What's wrong with this hardware-configuration?
Do you have any tipps, what could I try, to get an acceptable write
perfomance of about 100-120MB/s?
Here's my DRBD.conf:
global {
# minor-count 64;
# dialog-refresh 5; # 5 seconds
# disable-ip-verification;
usage-count ask;
}
common {
syncer {
rate 40M;
al-extents 557;
}
net {
#after-sb-0pri discard-zero-changes;
#after-sb-1pri discard-secondary;
#after-sb-2pri disconnect;
max-epoch-size 8192;
max-buffers 8192;
unplug-watermark 8192;
#sndbuf-size 512k;
}
startup {
degr-wfc-timeout 120;
wfc-timeout 240;
}
}
resource cluster_metadata {
protocol C;
handlers {
pri-on-incon-degr "echo O > /proc/sysrq-trigger ; halt -f";
pri-lost-after-sb "echo O > /proc/sysrq-trigger ; halt -f";
local-io-error "echo O > /proc/sysrq-trigger ; halt -f";
# outdate-peer "/usr/sbin/drbd-peer-outdater";
}
startup {
# wfc-timeout 0;
degr-wfc-timeout 120; # 2 minutes.
}
disk {
on-io-error detach;
}
net {
after-sb-0pri disconnect;
after-sb-1pri disconnect;
after-sb-2pri disconnect;
rr-conflict disconnect;
}
syncer {
al-extents 257;
}
on storage3-a.wor.net {
device /dev/drbd0;
disk /dev/sda2;
address 192.168.93.3:7788;
meta-disk internal;
}
on storage3-b.wor.net {
device /dev/drbd0;
disk /dev/sda2;
address 192.168.93.4:7788;
meta-disk internal;
}
}
resource vg0drbd {
protocol C;
startup {
wfc-timeout 0; ## Infinite!
degr-wfc-timeout 120; ## 2 minutes.
}
disk {
on-io-error detach;
}
net {
# timeout 60;
# connect-int 10;
# ping-int 10;
# max-buffers 2048;
# max-epoch-size 2048;
}
syncer {
after "cluster_metadata";
}
on storage3-a.wor.net {
device /dev/drbd1;
disk /dev/sda5;
address 192.168.93.3:7789;
meta-disk internal;
}
on storage3-b.wor.net {
device /dev/drbd1;
disk /dev/sda5;
address 192.168.93.4:7789;
meta-disk internal;
}
}
resource vg0drbd2 {
protocol C;
startup {
wfc-timeout 0; ## Infinite!
degr-wfc-timeout 120; ## 2 minutes.
}
disk {
on-io-error detach;
}
syncer {
after "vg0drbd";
}
on storage3-a.wor.net {
device /dev/drbd2;
disk /dev/sdb5;
address 192.168.93.3:7790;
meta-disk internal;
}
on storage3-b.wor.net {
device /dev/drbd2;
disk /dev/sda6;
address 192.168.93.4:7790;
meta-disk internal;
}
}
resource vg0drbd3 {
protocol C;
startup {
wfc-timeout 0; ## Infinite!
degr-wfc-timeout 120; ## 2 minutes.
}
disk {
on-io-error detach;
}
syncer {
after "vg0drbd2";
}
on storage3-a.wor.net {
device /dev/drbd3;
disk /dev/sdc5;
address 192.168.93.3:7791;
meta-disk internal;
}
on storage3-b.wor.net {
device /dev/drbd3;
disk /dev/sda7;
address 192.168.93.4:7791;
meta-disk internal;
}
}
resource vg0drbd4 {
protocol C;
startup {
wfc-timeout 0; ## Infinite!
degr-wfc-timeout 120; ## 2 minutes.
}
disk {
on-io-error detach;
}
syncer {
after "vg0drbd3";
}
on storage3-a.wor.net {
device /dev/drbd4;
disk /dev/sdd5;
address 192.168.93.3:7792;
meta-disk internal;
}
on storage3-b.wor.net {
device /dev/drbd4;
disk /dev/sda8;
address 192.168.93.4:7792;
meta-disk internal;
}
}
Greetings from Germany,
Andreas