summaryrefslogtreecommitdiffstats
path: root/doc/examples/legacy
diff options
context:
space:
mode:
Diffstat (limited to 'doc/examples/legacy')
-rw-r--r--doc/examples/legacy/Makefile.am8
-rw-r--r--doc/examples/legacy/README13
-rw-r--r--doc/examples/legacy/filter.vol23
-rw-r--r--doc/examples/legacy/io-cache.vol31
-rw-r--r--doc/examples/legacy/io-threads.vol22
-rw-r--r--doc/examples/legacy/posix-locks.vol19
-rw-r--r--doc/examples/legacy/protocol-client.vol12
-rw-r--r--doc/examples/legacy/protocol-server.vol21
-rw-r--r--doc/examples/legacy/read-ahead.vol24
-rw-r--r--doc/examples/legacy/replicate.vol118
-rw-r--r--doc/examples/legacy/stripe.vol120
-rw-r--r--doc/examples/legacy/trace.vol21
-rw-r--r--doc/examples/legacy/trash.vol20
-rw-r--r--doc/examples/legacy/write-behind.vol27
14 files changed, 479 insertions, 0 deletions
diff --git a/doc/examples/legacy/Makefile.am b/doc/examples/legacy/Makefile.am
new file mode 100644
index 000000000..49c9701ef
--- /dev/null
+++ b/doc/examples/legacy/Makefile.am
@@ -0,0 +1,8 @@
+EXTRA = README replicate.vol stripe.vol protocol-client.vol protocol-server.vol posix-locks.vol trash.vol write-behind.vol io-threads.vol io-cache.vol read-ahead.vol filter.vol trace.vol
+EXTRA_DIST = $(EXTRA)
+
+docdir = $(datadir)/doc/$(PACKAGE_NAME)
+Examplesdir = $(docdir)/examples
+Examples_DATA = $(EXTRA)
+
+CLEANFILES =
diff --git a/doc/examples/legacy/README b/doc/examples/legacy/README
new file mode 100644
index 000000000..732751571
--- /dev/null
+++ b/doc/examples/legacy/README
@@ -0,0 +1,13 @@
+GlusterFS's translator feature is very flexible and there are quite a lot of
+ways one can configure their filesystem to behave like.
+
+Volume Specification is a way in which GlusterFS understands how it has to work,
+based on what is written there.
+
+Going through the following URLs may give you more idea about all these.
+
+* http://www.gluster.org/docs/index.php/GlusterFS
+* http://www.gluster.org/docs/index.php/GlusterFS_Volume_Specification
+* http://www.gluster.org/docs/index.php/GlusterFS_Translators
+
+Mail us any doubts, suggestions on 'gluster-devel(at)nongnu.org'
diff --git a/doc/examples/legacy/filter.vol b/doc/examples/legacy/filter.vol
new file mode 100644
index 000000000..59bb23ecf
--- /dev/null
+++ b/doc/examples/legacy/filter.vol
@@ -0,0 +1,23 @@
+volume client
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 192.168.1.10 # IP address of the remote brick
+ option remote-subvolume brick # name of the remote volume
+end-volume
+
+## In normal clustered storage type, any of the cluster translators can come here.
+#
+# Definition of other clients
+#
+# Definition of cluster translator (may be unify, afr, or unify over afr)
+#
+
+### 'Filter' translator is used on client side (or server side according to needs). This traslator makes all the below translators, (or say volumes) as read-only. Hence if one wants a 'read-only' filesystem, using filter as the top most volume will make it really fast as the fops are returned from this level itself.
+
+volume filter-ro
+ type features/filter
+ option root-squashing enable
+# option completely-read-only yes
+# translate-uid 1-99=0
+ subvolumes client
+end-volume
diff --git a/doc/examples/legacy/io-cache.vol b/doc/examples/legacy/io-cache.vol
new file mode 100644
index 000000000..a71745017
--- /dev/null
+++ b/doc/examples/legacy/io-cache.vol
@@ -0,0 +1,31 @@
+volume client
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 192.168.1.10 # IP address of the remote brick
+ option remote-subvolume brick # name of the remote volume
+end-volume
+
+## In normal clustered storage type, any of the cluster translators can come
+# here.
+#
+# Definition of other clients
+#
+# Definition of cluster translator (may be distribute, replicate, or distribute
+# over replicate)
+#
+
+### 'IO-Cache' translator is best used on client side when a filesystem has file
+# which are not modified frequently but read several times. For example, while
+# compiling a kernel, *.h files are read while compiling every *.c file, in
+# these case, io-cache translator comes very handy, as it keeps the whole file
+# content in the cache, and serves from the cache.
+# One can provide the priority of the cache too.
+
+volume ioc
+ type performance/io-cache
+ subvolumes client # In this example it is 'client' you may have to
+ # change it according to your spec file.
+ option cache-size 64MB # 32MB is default
+ option force-revalidate-timeout 5 # 1second is default
+ option priority *.html:2,*:1 # default is *:0
+end-volume
diff --git a/doc/examples/legacy/io-threads.vol b/doc/examples/legacy/io-threads.vol
new file mode 100644
index 000000000..236f5b8b1
--- /dev/null
+++ b/doc/examples/legacy/io-threads.vol
@@ -0,0 +1,22 @@
+volume brick
+ type storage/posix # POSIX FS translator
+ option directory /home/export # Export this directory
+end-volume
+
+### 'IO-threads' translator gives a threading behaviour to File I/O calls. All
+# other normal fops are having default behaviour. Loading this on server side
+# helps to reduce the contension of network. (Which is assumed as a GlusterFS
+# hang).
+
+volume iot
+ type performance/io-threads
+ subvolumes brick
+ option thread-count 4 # default value is 1
+end-volume
+
+volume server
+ type protocol/server
+ subvolumes iot
+ option transport-type tcp # For TCP/IP transport
+ option auth.addr.iot.allow 192.168.*
+end-volume
diff --git a/doc/examples/legacy/posix-locks.vol b/doc/examples/legacy/posix-locks.vol
new file mode 100644
index 000000000..673afa3f8
--- /dev/null
+++ b/doc/examples/legacy/posix-locks.vol
@@ -0,0 +1,19 @@
+volume brick
+ type storage/posix # POSIX FS translator
+ option directory /home/export # Export this directory
+end-volume
+
+# 'Posix-locks' feature should be added on the server side.
+
+volume p-locks
+ type features/posix-locks
+ subvolumes brick
+ option mandatory on
+end-volume
+
+volume server
+ type protocol/server
+ subvolumes p-locks
+ option transport-type tcp
+ option auth.addr.p-locks.allow 192.168.* # Allow access to "p-locks" volume
+end-volume
diff --git a/doc/examples/legacy/protocol-client.vol b/doc/examples/legacy/protocol-client.vol
new file mode 100644
index 000000000..c34ef790d
--- /dev/null
+++ b/doc/examples/legacy/protocol-client.vol
@@ -0,0 +1,12 @@
+volume client
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 192.168.1.10 # IP address of the remote brick
+# option transport.socket.remote-port 24016
+
+# option transport-type rdma # for Infiniband verbs transport
+# option transport.rdma.work-request-send-count 16
+# option transport.rdma.work-request-recv-count 16
+# option transport.rdma.remote-port 24016
+ option remote-subvolume brick # name of the remote volume
+end-volume
diff --git a/doc/examples/legacy/protocol-server.vol b/doc/examples/legacy/protocol-server.vol
new file mode 100644
index 000000000..195e49657
--- /dev/null
+++ b/doc/examples/legacy/protocol-server.vol
@@ -0,0 +1,21 @@
+### Export volume "brick" with the contents of "/home/export" directory.
+volume brick
+ type storage/posix # POSIX FS translator
+ option directory /home/export # Export this directory
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+# option transport.socket.listen-port 24016
+
+# option transport-type rdma
+# option transport.rdma.work-request-send-count 64
+# option transport.rdma.work-request-recv-count 64
+# option transport.rdma.listen-port 24016
+
+# option bind-address 192.168.1.10 # Default is to listen on all interfaces
+ subvolumes brick
+ option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume
+end-volume
diff --git a/doc/examples/legacy/read-ahead.vol b/doc/examples/legacy/read-ahead.vol
new file mode 100644
index 000000000..9e4dba556
--- /dev/null
+++ b/doc/examples/legacy/read-ahead.vol
@@ -0,0 +1,24 @@
+volume client
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 192.168.1.10 # IP address of the remote brick
+ option remote-subvolume brick # name of the remote volume
+end-volume
+
+## In normal clustered storage type, any of the cluster translators can come here.
+#
+# Definition of other clients
+#
+# Definition of cluster translator (may be distribute, replicate, or distribute
+# over replicate)
+#
+
+# 'Read-Ahead' translator is best utilized on client side, as it prefetches
+# the file contents when the first read() call is issued.
+
+volume ra
+ type performance/read-ahead
+ subvolumes client
+ option page-count 4 # default is 2
+ option force-atime-update no # defalut is 'no'
+end-volume
diff --git a/doc/examples/legacy/replicate.vol b/doc/examples/legacy/replicate.vol
new file mode 100644
index 000000000..10626d46f
--- /dev/null
+++ b/doc/examples/legacy/replicate.vol
@@ -0,0 +1,118 @@
+### 'NOTE'
+# This file has both server spec and client spec to get an understanding of
+# replicate spec file. Hence can't be used as it is, as a GlusterFS spec file.
+# One need to seperate out server spec and client spec to get it working.
+
+#=========================================================================
+
+# **** server1 spec file ****
+
+### Export volume "brick" with the contents of "/home/export" directory.
+volume posix1
+ type storage/posix # POSIX FS translator
+ option directory /home/export1 # Export this directory
+end-volume
+
+### Add POSIX record locking support to the storage brick
+volume brick1
+ type features/posix-locks
+ option mandatory on # enables mandatory locking on all files
+ subvolumes posix1
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+ option transport.socket.listen-port 24016
+ subvolumes brick1
+ option auth.addr.brick1.allow * # access to "brick" volume
+end-volume
+
+
+#=========================================================================
+
+# **** server2 spec file ****
+volume posix2
+ type storage/posix # POSIX FS translator
+ option directory /home/export2 # Export this directory
+end-volume
+
+### Add POSIX record locking support to the storage brick
+volume brick2
+ type features/posix-locks
+ option mandatory on # enables mandatory locking on all files
+ subvolumes posix2
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+ option transport.socket.listen-port 24017
+ subvolumes brick2
+ option auth.addr.brick2.allow * # Allow access to "brick" volume
+end-volume
+
+
+#=========================================================================
+
+# **** server3 spec file ****
+
+volume posix3
+ type storage/posix # POSIX FS translator
+ option directory /home/export3 # Export this directory
+end-volume
+
+### Add POSIX record locking support to the storage brick
+volume brick3
+ type features/posix-locks
+ option mandatory on # enables mandatory locking on all files
+ subvolumes posix3
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+ option transport.socket.listen-port 24018
+ subvolumes brick3
+ option auth.addr.brick3.allow * # access to "brick" volume
+end-volume
+
+
+#=========================================================================
+
+# **** Clustered Client config file ****
+
+### Add client feature and attach to remote subvolume of server1
+volume client1
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 127.0.0.1 # IP address of the remote brick
+ option transport.socket.remote-port 24016
+ option remote-subvolume brick1 # name of the remote volume
+end-volume
+
+### Add client feature and attach to remote subvolume of server2
+volume client2
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 127.0.0.1 # IP address of the remote brick
+ option transport.socket.remote-port 24017
+ option remote-subvolume brick2 # name of the remote volume
+end-volume
+
+volume client3
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 127.0.0.1 # IP address of the remote brick
+ option transport.socket.remote-port 24018
+ option remote-subvolume brick3 # name of the remote volume
+end-volume
+
+## Add replicate feature.
+volume replicate
+ type cluster/replicate
+ subvolumes client1 client2 client3
+end-volume
diff --git a/doc/examples/legacy/stripe.vol b/doc/examples/legacy/stripe.vol
new file mode 100644
index 000000000..9524e8198
--- /dev/null
+++ b/doc/examples/legacy/stripe.vol
@@ -0,0 +1,120 @@
+
+### 'NOTE'
+# This file has both server spec and client spec to get an understanding of
+# stripe's spec file. Hence can't be used as it is, as a GlusterFS spec file.
+# One need to seperate out server spec and client spec to get it working.
+
+#=========================================================================
+
+# **** server1 spec file ****
+
+### Export volume "brick" with the contents of "/home/export" directory.
+volume posix1
+ type storage/posix # POSIX FS translator
+ option directory /home/export1 # Export this directory
+end-volume
+
+### Add POSIX record locking support to the storage brick
+volume brick1
+ type features/posix-locks
+ option mandatory on # enables mandatory locking on all files
+ subvolumes posix1
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+ option transport.socket.listen-port 24016
+ subvolumes brick1
+ option auth.addr.brick1.allow * # access to "brick" volume
+end-volume
+
+
+#=========================================================================
+
+# **** server2 spec file ****
+volume posix2
+ type storage/posix # POSIX FS translator
+ option directory /home/export2 # Export this directory
+end-volume
+
+### Add POSIX record locking support to the storage brick
+volume brick2
+ type features/posix-locks
+ option mandatory on # enables mandatory locking on all files
+ subvolumes posix2
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+ option transport.socket.listen-port 24017
+ subvolumes brick2
+ option auth.addr.brick2.allow * # Allow access to "brick" volume
+end-volume
+
+
+#=========================================================================
+
+# **** server3 spec file ****
+
+volume posix3
+ type storage/posix # POSIX FS translator
+ option directory /home/export3 # Export this directory
+end-volume
+
+### Add POSIX record locking support to the storage brick
+volume brick3
+ type features/posix-locks
+ option mandatory on # enables mandatory locking on all files
+ subvolumes posix3
+end-volume
+
+### Add network serving capability to above brick.
+volume server
+ type protocol/server
+ option transport-type tcp # For TCP/IP transport
+ option transport.socket.listen-port 24018
+ subvolumes brick3
+ option auth.addr.brick3.allow * # access to "brick" volume
+end-volume
+
+
+#=========================================================================
+
+# **** Clustered Client config file ****
+
+### Add client feature and attach to remote subvolume of server1
+volume client1
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 127.0.0.1 # IP address of the remote brick
+ option transport.socket.remote-port 24016
+ option remote-subvolume brick1 # name of the remote volume
+end-volume
+
+### Add client feature and attach to remote subvolume of server2
+volume client2
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 127.0.0.1 # IP address of the remote brick
+ option transport.socket.remote-port 24017
+ option remote-subvolume brick2 # name of the remote volume
+end-volume
+
+volume client3
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 127.0.0.1 # IP address of the remote brick
+ option transport.socket.remote-port 24018
+ option remote-subvolume brick3 # name of the remote volume
+end-volume
+
+## Add Stripe Feature.
+volume stripe
+ type cluster/stripe
+ subvolumes client1 client2 client3
+ option block-size 1MB
+end-volume
diff --git a/doc/examples/legacy/trace.vol b/doc/examples/legacy/trace.vol
new file mode 100644
index 000000000..59830f26a
--- /dev/null
+++ b/doc/examples/legacy/trace.vol
@@ -0,0 +1,21 @@
+volume client
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 192.168.1.10 # IP address of the remote brick
+ option remote-subvolume brick # name of the remote volume
+end-volume
+
+### 'Trace' translator is a very handy debug tool for GlusterFS, as it can be
+# loaded between any of the two volumes without changing the behaviour of the
+# filesystem.
+# On client side it can be the top most volume in spec (like now) to understand
+# what calls are made on FUSE filesystem, when a mounted filesystem is
+# accessed.
+
+volume trace
+ type debug/trace
+ subvolumes client
+end-volume
+
+# 'NOTE:' By loading 'debug/trace' translator, filesystem will be very slow as
+# it logs each and every calls to the log file.
diff --git a/doc/examples/legacy/trash.vol b/doc/examples/legacy/trash.vol
new file mode 100644
index 000000000..3fcf315af
--- /dev/null
+++ b/doc/examples/legacy/trash.vol
@@ -0,0 +1,20 @@
+volume brick
+ type storage/posix # POSIX FS translator
+ option directory /home/export # Export this directory
+end-volume
+
+### 'Trash' translator is best used on server side as it just renames the
+# deleted file inside 'trash-dir', and it makes 4 seperate fops for one unlink
+# call.
+volume trashcan
+ type features/trash
+ subvolumes brick
+ option trash-dir /.trashcan
+end-volume
+
+volume server
+ type protocol/server
+ subvolumes trashcan
+ option transport-type tcp # For TCP/IP transport
+ option auth.addr.brick.allow 192.168.* # Allow access to "brick" volume
+end-volume
diff --git a/doc/examples/legacy/write-behind.vol b/doc/examples/legacy/write-behind.vol
new file mode 100644
index 000000000..2b5ed4139
--- /dev/null
+++ b/doc/examples/legacy/write-behind.vol
@@ -0,0 +1,27 @@
+volume client
+ type protocol/client
+ option transport-type tcp # for TCP/IP transport
+ option remote-host 192.168.1.10 # IP address of the remote brick
+ option remote-subvolume brick # name of the remote volume
+end-volume
+
+## In normal clustered storage type, any of the cluster translators can come here.
+#
+# Definition of other clients
+#
+# Definition of cluster translator (may be unify, replicate, or unify over replicate)
+#
+
+
+# 'Write-behind' translator is a performance booster for write operation. Best
+# used on client side, as its main intension is to reduce the network latency
+# caused for each write operation.
+
+volume wb
+ type performance/write-behind
+ subvolumes client
+ option flush-behind on # default value is 'off'
+ option window-size 2MB
+ option enable-O_SYNC no # default is no
+ option disable-for-first-nbytes 128KB #default is 1
+end-volume