From 77adf4cd648dce41f89469dd185deec6b6b53a0b Mon Sep 17 00:00:00 2001 From: Vikas Gorur Date: Wed, 18 Feb 2009 17:36:07 +0530 Subject: Added all files --- doc/examples/Makefile.am | 8 ++ doc/examples/README | 13 +++ doc/examples/filter.vol | 23 +++++ doc/examples/io-cache.vol | 25 ++++++ doc/examples/io-threads.vol | 21 +++++ doc/examples/posix-locks.vol | 20 +++++ doc/examples/protocol-client.vol | 17 ++++ doc/examples/protocol-server.vol | 25 ++++++ doc/examples/read-ahead.vol | 22 +++++ doc/examples/replicate.vol | 119 ++++++++++++++++++++++++++ doc/examples/stripe.vol | 121 ++++++++++++++++++++++++++ doc/examples/trace.vol | 16 ++++ doc/examples/trash.vol | 20 +++++ doc/examples/unify.vol | 178 +++++++++++++++++++++++++++++++++++++++ doc/examples/write-behind.vol | 26 ++++++ 15 files changed, 654 insertions(+) create mode 100644 doc/examples/Makefile.am create mode 100644 doc/examples/README create mode 100644 doc/examples/filter.vol create mode 100644 doc/examples/io-cache.vol create mode 100644 doc/examples/io-threads.vol create mode 100644 doc/examples/posix-locks.vol create mode 100644 doc/examples/protocol-client.vol create mode 100644 doc/examples/protocol-server.vol create mode 100644 doc/examples/read-ahead.vol create mode 100644 doc/examples/replicate.vol create mode 100644 doc/examples/stripe.vol create mode 100644 doc/examples/trace.vol create mode 100644 doc/examples/trash.vol create mode 100644 doc/examples/unify.vol create mode 100644 doc/examples/write-behind.vol (limited to 'doc/examples') diff --git a/doc/examples/Makefile.am b/doc/examples/Makefile.am new file mode 100644 index 000000000..b4c93f4c9 --- /dev/null +++ b/doc/examples/Makefile.am @@ -0,0 +1,8 @@ +EXTRA = README unify.vol replicate.vol stripe.vol protocol-client.vol protocol-server.vol posix-locks.vol trash.vol write-behind.vol io-threads.vol io-cache.vol read-ahead.vol filter.vol trace.vol +EXTRA_DIST = $(EXTRA) + +docdir = $(datadir)/doc/$(PACKAGE_NAME) +Examplesdir = $(docdir)/examples +Examples_DATA = $(EXTRA) + +CLEANFILES = diff --git a/doc/examples/README b/doc/examples/README new file mode 100644 index 000000000..4d472ac08 --- /dev/null +++ b/doc/examples/README @@ -0,0 +1,13 @@ +GlusterFS's translator feature is very flexible and there are quite a lot of ways one +can configure their filesystem to behave like. + +Volume Specification is a way in which GlusterFS understands how it has to work, based +on what is written there. + +Going through the following URLs may give you more idea about all these. + +* http://www.gluster.org/docs/index.php/GlusterFS +* http://www.gluster.org/docs/index.php/GlusterFS_Volume_Specification +* http://www.gluster.org/docs/index.php/GlusterFS_Translators + +Mail us any doubts, suggestions on 'gluster-devel(at)nongnu.org' diff --git a/doc/examples/filter.vol b/doc/examples/filter.vol new file mode 100644 index 000000000..ca5c59837 --- /dev/null +++ b/doc/examples/filter.vol @@ -0,0 +1,23 @@ +volume client + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 192.168.1.10 # IP address of the remote brick + option remote-subvolume brick # name of the remote volume +end-volume + +## In normal clustered storage type, any of the cluster translators can come here. +# +# Definition of other clients +# +# Definition of cluster translator (may be unify, afr, or unify over afr) +# + +### 'Filter' translator is used on client side (or server side according to needs). This traslator makes all the below translators, (or say volumes) as read-only. Hence if one wants a 'read-only' filesystem, using filter as the top most volume will make it really fast as the fops are returned from this level itself. + +volume filter-ro + type features/filter + option root-squashing enable +# option completely-read-only yes +# translate-uid 1-99=0 + subvolumes client +end-volume \ No newline at end of file diff --git a/doc/examples/io-cache.vol b/doc/examples/io-cache.vol new file mode 100644 index 000000000..5f3eca4c5 --- /dev/null +++ b/doc/examples/io-cache.vol @@ -0,0 +1,25 @@ +volume client + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 192.168.1.10 # IP address of the remote brick + option remote-subvolume brick # name of the remote volume +end-volume + +## In normal clustered storage type, any of the cluster translators can come here. +# +# Definition of other clients +# +# Definition of cluster translator (may be unify, replicate, or unify over replicate) +# + +### 'IO-Cache' translator is best used on client side when a filesystem has file which are not modified frequently but read several times. For example, while compiling a kernel, *.h files are read while compiling every *.c file, in these case, io-cache translator comes very handy, as it keeps the whole file content in the cache, and serves from the cache. +# One can provide the priority of the cache too. + +volume ioc + type performance/io-cache + subvolumes client # In this example it is 'client' you may have to change it according to your spec file. + option page-size 1MB # 128KB is default + option cache-size 64MB # 32MB is default + option force-revalidate-timeout 5 # 1second is default + option priority *.html:2,*:1 # default is *:0 +end-volume diff --git a/doc/examples/io-threads.vol b/doc/examples/io-threads.vol new file mode 100644 index 000000000..9954724e1 --- /dev/null +++ b/doc/examples/io-threads.vol @@ -0,0 +1,21 @@ + +volume brick + type storage/posix # POSIX FS translator + option directory /home/export # Export this directory +end-volume + +### 'IO-threads' translator gives a threading behaviour to File I/O calls. All other normal fops are having default behaviour. Loading this on server side helps to reduce the contension of network. (Which is assumed as a GlusterFS hang). +# One can load it in client side to reduce the latency involved in case of a slow network, when loaded below write-behind. +volume iot + type performance/io-threads + subvolumes brick + option thread-count 4 # default value is 1 +end-volume + +volume server + type protocol/server + subvolumes iot brick + option transport-type tcp # For TCP/IP transport + option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume + option auth.addr.iot.allow 192.168.* # Allow access to "p-locks" volume +end-volume diff --git a/doc/examples/posix-locks.vol b/doc/examples/posix-locks.vol new file mode 100644 index 000000000..b9c9e7a64 --- /dev/null +++ b/doc/examples/posix-locks.vol @@ -0,0 +1,20 @@ + +volume brick + type storage/posix # POSIX FS translator + option directory /home/export # Export this directory +end-volume + +### 'Posix-locks' feature should be added on the server side (as posix volume as subvolume) because it contains the actual file. +volume p-locks + type features/posix-locks + subvolumes brick + option mandatory on +end-volume + +volume server + type protocol/server + subvolumes p-locks brick + option transport-type tcp + option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume + option auth.addr.p-locks.allow 192.168.* # Allow access to "p-locks" volume +end-volume diff --git a/doc/examples/protocol-client.vol b/doc/examples/protocol-client.vol new file mode 100644 index 000000000..43108f2c2 --- /dev/null +++ b/doc/examples/protocol-client.vol @@ -0,0 +1,17 @@ +volume client + type protocol/client + option transport-type tcp # for TCP/IP transport +# option transport-type ib-sdp # for Infiniband transport + option remote-host 192.168.1.10 # IP address of the remote brick +# option transport.socket.remote-port 6996 # default server port is 6996 + +# option transport-type ib-verbs # for Infiniband verbs transport +# option transport.ib-verbs.work-request-send-size 1048576 +# option transport.ib-verbs.work-request-send-count 16 +# option transport.ib-verbs.work-request-recv-size 1048576 +# option transport.ib-verbs.work-request-recv-count 16 +# option transport.ib-verbs.remote-port 6996 # default server port is 6996 + + option remote-subvolume brick # name of the remote volume +# option transport-timeout 30 # default value is 120seconds +end-volume diff --git a/doc/examples/protocol-server.vol b/doc/examples/protocol-server.vol new file mode 100644 index 000000000..88477511f --- /dev/null +++ b/doc/examples/protocol-server.vol @@ -0,0 +1,25 @@ + +### Export volume "brick" with the contents of "/home/export" directory. +volume brick + type storage/posix # POSIX FS translator + option directory /home/export # Export this directory +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport +# option transport.socket.listen-port 6996 # Default is 6996 + +# option transport-type ib-verbs # For Infiniband Verbs transport +# option transport.ib-verbs.work-request-send-size 131072 +# option transport.ib-verbs.work-request-send-count 64 +# option transport.ib-verbs.work-request-recv-size 131072 +# option transport.ib-verbs.work-request-recv-count 64 +# option transport.ib-verbs.listen-port 6996 # Default is 6996 + +# option bind-address 192.168.1.10 # Default is to listen on all interfaces +# option client-volume-filename /etc/glusterfs/glusterfs-client.vol + subvolumes brick + option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume +end-volume diff --git a/doc/examples/read-ahead.vol b/doc/examples/read-ahead.vol new file mode 100644 index 000000000..3ce0d95ac --- /dev/null +++ b/doc/examples/read-ahead.vol @@ -0,0 +1,22 @@ +volume client + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 192.168.1.10 # IP address of the remote brick + option remote-subvolume brick # name of the remote volume +end-volume + +## In normal clustered storage type, any of the cluster translators can come here. +# +# Definition of other clients +# +# Definition of cluster translator (may be unify, replicate, or unify over replicate) +# + +### 'Read-Ahead' translator is best utilized on client side, as it prefetches the file contents when the first read() call is issued. +volume ra + type performance/read-ahead + subvolumes client # In this example it is 'client' you may have to change it according to your spec file. + option page-size 1MB # default is 256KB + option page-count 4 # default is 2 + option force-atime-update no # defalut is 'no' +end-volume diff --git a/doc/examples/replicate.vol b/doc/examples/replicate.vol new file mode 100644 index 000000000..8c9541444 --- /dev/null +++ b/doc/examples/replicate.vol @@ -0,0 +1,119 @@ +### 'NOTE' +# This file has both server spec and client spec to get an understanding of stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file. +# One need to seperate out server spec and client spec to get it working. + +#========================================================================= + +# **** server1 spec file **** + +### Export volume "brick" with the contents of "/home/export" directory. +volume posix1 + type storage/posix # POSIX FS translator + option directory /home/export1 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick1 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix1 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6996 # Default is 6996 +# option client-volume-filename /etc/glusterfs/glusterfs-client.vol + subvolumes brick1 + option auth.addr.brick1.allow * # access to "brick" volume +end-volume + + +#========================================================================= + +# **** server2 spec file **** +volume posix2 + type storage/posix # POSIX FS translator + option directory /home/export2 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick2 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix2 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6997 # Default is 6996 + subvolumes brick2 + option auth.addr.brick2.allow * # Allow access to "brick" volume +end-volume + + +#========================================================================= + +# **** server3 spec file **** + +volume posix3 + type storage/posix # POSIX FS translator + option directory /home/export3 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick3 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix3 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6998 # Default is 6996 + subvolumes brick3 + option auth.addr.brick3.allow * # access to "brick" volume +end-volume + + +#========================================================================= + +# **** Clustered Client config file **** + +### Add client feature and attach to remote subvolume of server1 +volume client1 + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6996 # default server port is 6996 + option remote-subvolume brick1 # name of the remote volume +end-volume + +### Add client feature and attach to remote subvolume of server2 +volume client2 + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6997 # default server port is 6996 + option remote-subvolume brick2 # name of the remote volume +end-volume + +volume client3 + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6998 # default server port is 6996 + option remote-subvolume brick3 # name of the remote volume +end-volume + +## Add replicate feature. +volume replicate + type cluster/replicate + subvolumes client1 client2 client3 +end-volume + diff --git a/doc/examples/stripe.vol b/doc/examples/stripe.vol new file mode 100644 index 000000000..ea24cf860 --- /dev/null +++ b/doc/examples/stripe.vol @@ -0,0 +1,121 @@ + +### 'NOTE' +# This file has both server spec and client spec to get an understanding of stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file. +# One need to seperate out server spec and client spec to get it working. + +#========================================================================= + +# **** server1 spec file **** + +### Export volume "brick" with the contents of "/home/export" directory. +volume posix1 + type storage/posix # POSIX FS translator + option directory /home/export1 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick1 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix1 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6996 # Default is 6996 +# option client-volume-filename /etc/glusterfs/glusterfs-client.vol + subvolumes brick1 + option auth.addr.brick1.allow * # access to "brick" volume +end-volume + + +#========================================================================= + +# **** server2 spec file **** +volume posix2 + type storage/posix # POSIX FS translator + option directory /home/export2 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick2 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix2 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6997 # Default is 6996 + subvolumes brick2 + option auth.addr.brick2.allow * # Allow access to "brick" volume +end-volume + + +#========================================================================= + +# **** server3 spec file **** + +volume posix3 + type storage/posix # POSIX FS translator + option directory /home/export3 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick3 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix3 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6998 # Default is 6996 + subvolumes brick3 + option auth.addr.brick3.allow * # access to "brick" volume +end-volume + + +#========================================================================= + +# **** Clustered Client config file **** + +### Add client feature and attach to remote subvolume of server1 +volume client1 + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6996 # default server port is 6996 + option remote-subvolume brick1 # name of the remote volume +end-volume + +### Add client feature and attach to remote subvolume of server2 +volume client2 + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6997 # default server port is 6996 + option remote-subvolume brick2 # name of the remote volume +end-volume + +volume client3 + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6998 # default server port is 6996 + option remote-subvolume brick3 # name of the remote volume +end-volume + +## Add Stripe Feature. +volume stripe + type cluster/stripe + subvolumes client1 client2 client3 + option block-size 1MB +end-volume + diff --git a/doc/examples/trace.vol b/doc/examples/trace.vol new file mode 100644 index 000000000..3f4864db4 --- /dev/null +++ b/doc/examples/trace.vol @@ -0,0 +1,16 @@ +volume client + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 192.168.1.10 # IP address of the remote brick + option remote-subvolume brick # name of the remote volume +end-volume + +### 'Trace' translator is a very handy debug tool for GlusterFS, as it can be loaded between any of the two volumes without changing the behaviour of the filesystem. +# On client side it can be the top most volume in spec (like now) to understand what calls are made on FUSE filesystem, when a mounted filesystem is accessed. + +volume trace + type debug/trace + subvolumes client +end-volume + +# 'NOTE:' By loading 'debug/trace' translator, filesystem will be very slow as it logs each and every calls to the log file. diff --git a/doc/examples/trash.vol b/doc/examples/trash.vol new file mode 100644 index 000000000..16e71be32 --- /dev/null +++ b/doc/examples/trash.vol @@ -0,0 +1,20 @@ + +volume brick + type storage/posix # POSIX FS translator + option directory /home/export # Export this directory +end-volume + +### 'Trash' translator is best used on server side as it just renames the deleted file inside 'trash-dir', and it makes 4 seperate fops for one unlink call. +volume trashcan + type features/trash + subvolumes brick + option trash-dir /.trashcan +end-volume + +volume server + type protocol/server + subvolumes trashcan brick + option transport-type tcp # For TCP/IP transport + option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume + option auth.addr.trashcan.allow 192.168.* # Allow access to "p-locks" volume +end-volume diff --git a/doc/examples/unify.vol b/doc/examples/unify.vol new file mode 100644 index 000000000..4f7415a23 --- /dev/null +++ b/doc/examples/unify.vol @@ -0,0 +1,178 @@ +### 'NOTE' +# This file has both server spec and client spec to get an understanding of stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file. +# One need to seperate out server spec and client spec to get it working. + + +#========================================================================= + +# **** server1 spec file **** + +### Export volume "brick" with the contents of "/home/export" directory. +volume posix1 + type storage/posix # POSIX FS translator + option directory /home/export1 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick1 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix1 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6996 # Default is 6996 +# option client-volume-filename /etc/glusterfs/glusterfs-client.vol + subvolumes brick1 + option auth.addr.brick1.allow * # access to "brick" volume +end-volume + + +#========================================================================= + +# **** server2 spec file **** +volume posix2 + type storage/posix # POSIX FS translator + option directory /home/export2 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick2 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix2 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6997 # Default is 6996 + subvolumes brick2 + option auth.addr.brick2.allow * # Allow access to "brick" volume +end-volume + + +#========================================================================= + +# **** server3 spec file **** + +volume posix3 + type storage/posix # POSIX FS translator + option directory /home/export3 # Export this directory +end-volume + +### Add POSIX record locking support to the storage brick +volume brick3 + type features/posix-locks + option mandatory on # enables mandatory locking on all files + subvolumes posix3 +end-volume + +### Add network serving capability to above brick. +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6998 # Default is 6996 + subvolumes brick3 + option auth.addr.brick3.allow * # access to "brick" volume +end-volume + +#========================================================================= + +# *** server for namespace *** +### Export volume "brick" with the contents of "/home/export" directory. +volume brick-ns + type storage/posix # POSIX FS translator + option directory /home/export-ns # Export this directory +end-volume + +volume server + type protocol/server + option transport-type tcp # For TCP/IP transport + option transport.socket.listen-port 6999 # Default is 6996 + subvolumes brick-ns + option auth.addr.brick-ns.allow * # access to "brick" volume +end-volume + + +#========================================================================= + +# **** Clustered Client config file **** + +### Add client feature and attach to remote subvolume of server1 +volume client1 + type protocol/client + option transport-type tcp # for TCP/IP transport +# option transport-type ib-sdp # for Infiniband transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6996 # default server port is 6996 + option remote-subvolume brick1 # name of the remote volume +end-volume + +### Add client feature and attach to remote subvolume of server2 +volume client2 + type protocol/client + option transport-type tcp # for TCP/IP transport +# option transport-type ib-sdp # for Infiniband transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6997 # default server port is 6996 + option remote-subvolume brick2 # name of the remote volume +end-volume + +volume client3 + type protocol/client + option transport-type tcp # for TCP/IP transport +# option transport-type ib-sdp # for Infiniband transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6998 # default server port is 6996 + option remote-subvolume brick3 # name of the remote volume +end-volume + + +volume client-ns + type protocol/client + option transport-type tcp # for TCP/IP transport +# option transport-type ib-sdp # for Infiniband transport + option remote-host 127.0.0.1 # IP address of the remote brick + option transport.socket.remote-port 6999 # default server port is 6996 + option remote-subvolume brick-ns # name of the remote volume +end-volume + +### Add unify feature to cluster the servers. Associate an +### appropriate scheduler that matches your I/O demand. +volume bricks + type cluster/unify + option namespace client-ns # this will not be storage child of unify. + subvolumes client1 client2 client3 +### ** ALU Scheduler Option ** + option self-heal background # foreground off # default is foreground + option scheduler alu + option alu.limits.min-free-disk 5% #% + option alu.limits.max-open-files 10000 + option alu.order disk-usage:read-usage:write-usage:open-files-usage:disk-speed-usage + option alu.disk-usage.entry-threshold 2GB + option alu.disk-usage.exit-threshold 128MB + option alu.open-files-usage.entry-threshold 1024 + option alu.open-files-usage.exit-threshold 32 + option alu.read-usage.entry-threshold 20 #% + option alu.read-usage.exit-threshold 4 #% + option alu.write-usage.entry-threshold 20 #% + option alu.write-usage.exit-threshold 4 #% + option alu.disk-speed-usage.entry-threshold 0 # DO NOT SET IT. SPEED IS CONSTANT!!!. + option alu.disk-speed-usage.exit-threshold 0 # DO NOT SET IT. SPEED IS CONSTANT!!!. + option alu.stat-refresh.interval 10sec + option alu.stat-refresh.num-file-create 10 +### ** Random Scheduler ** +# option scheduler random +### ** NUFA Scheduler ** +# option scheduler nufa +# option nufa.local-volume-name posix1 +### ** Round Robin (RR) Scheduler ** +# option scheduler rr +# option rr.limits.min-free-disk 5% #% +end-volume + diff --git a/doc/examples/write-behind.vol b/doc/examples/write-behind.vol new file mode 100644 index 000000000..9c6bae11c --- /dev/null +++ b/doc/examples/write-behind.vol @@ -0,0 +1,26 @@ +volume client + type protocol/client + option transport-type tcp # for TCP/IP transport + option remote-host 192.168.1.10 # IP address of the remote brick + option remote-subvolume brick # name of the remote volume +end-volume + +## In normal clustered storage type, any of the cluster translators can come here. +# +# Definition of other clients +# +# Definition of cluster translator (may be unify, replicate, or unify over replicate) +# + + +### 'Write-behind' translator is a performance booster for write operation. Best used on client side, as its main intension is to reduce the network latency caused for each write operation. + +volume wb + type performance/write-behind + subvolumes client # In this example it is 'client' you may have to change it according to your spec file. + option flush-behind on # default value is 'off' + option window-size 2MB + option aggregate-size 1MB # default value is 0 + option enable_O_SYNC no # default is no + option disable-for-first-nbytes 128KB #default is 1 +end-volume -- cgit