[DRBD-user] drbd9 protocol A on-congestion pull-ahead
Den
drbdsys at made.net
Fri Mar 23 12:07:45 CET 2018
Hello,
I'm working in performance tuning with protocol A over a 500 Mb/s link
I set up on-congestion pull-ahead, congestion-fill and congestion-extents
During intensive write, the 500Mb/s link is the limit and the
replication is not detached.
My configuration is below, please give me some suggestions or if I
missed something
Thank you
----------------------------------------------------------------
cat /proc/drbd
version: 9.0.12-1 (api:2/proto:86-112)
GIT-hash: 7eb4aef4abbfba8ebb1afbcc30574df74db0063e build by root at int1,
2018-03-18 16:38:45
Transports (api:16): tcp (9.0.12-1)
drbdmanage net-options --protocol A --sites 'farm1:ext1'
drbdmanage net-options --max-buffers 20000 --sites 'farm1:ext1'
drbdmanage net-options --max-epoch-size 20000 --sites 'farm1:ext1'
drbdmanage net-options --sndbuf-size 10485760 --sites 'farm1:ext1'
drbdmanage net-options --on-congestion pull-ahead --sites 'farm1:ext1'
drbdmanage net-options --congestion-fill 524288 --sites 'farm1:ext1'
drbdmanage net-options --congestion-extents 1024 --sites 'farm1:ext1'
drbdmanage disk-options --al-extents 2048 --common
drbdmanage net-options --on-congestion pull-ahead --sites 'farm1:ext1'
cat /var/lib/drbd.d/drbdmanage_global_common.conf
common {
disk {
al-extents 2048;
}
}
cat /var/lib/drbd.d/drbdmanage_vm-101-disk-1.res
resource vm-101-disk-1 {
template-file "/var/lib/drbd.d/drbdmanage_global_common.conf";
net {
allow-two-primaries yes;
cram-hmac-alg sha1;
}
connection {
host int1;
host ext1;
net {
max-epoch-size 20000;
protocol A;
max-buffers 20000;
allow-two-primaries no;
congestion-extents 1024;
on-congestion pull-ahead;
sndbuf-size 10485760;
congestion-fill 524288;
}
}
on int1 {
node-id 1;
volume 0 {
device minor 105;
disk /dev/drbdpool/vm-101-disk-1_00;
disk {
size 52428800k;
}
meta-disk internal;
}
}
on ext1 {
node-id 0;
volume 0 {
device minor 105;
disk /dev/drbdpool/vm-101-disk-1_00;
disk {
size 52428800k;
}
meta-disk internal;
}
}
}
More information about the drbd-user
mailing list