From 281c79c3766ca6a912e419d0cde24d1b8c457cbe Mon Sep 17 00:00:00 2001 From: Amar Tumballi Date: Fri, 1 Jun 2012 10:30:34 +0530 Subject: doc: added release-notes directory Change-Id: Idd45c0fe6a0615a3204aad216002894746cea03c Signed-off-by: Amar Tumballi Reviewed-on: http://review.gluster.com/3501 Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- doc/release-notes/en-US/Author_Group.xml | 17 +++ doc/release-notes/en-US/Book_Info.xml | 28 ++++ doc/release-notes/en-US/Chapter.xml | 33 +++++ doc/release-notes/en-US/Download_Install.xml | 107 ++++++++++++++ doc/release-notes/en-US/Key_Features.xml | 72 +++++++++ doc/release-notes/en-US/Known_Issues.xml | 164 +++++++++++++++++++++ doc/release-notes/en-US/Preface.xml | 24 +++ doc/release-notes/en-US/Product_Documentation.xml | 12 ++ doc/release-notes/en-US/Product_Support.xml | 12 ++ doc/release-notes/en-US/Release_Notes.ent | 4 + doc/release-notes/en-US/Release_Notes.xml | 17 +++ doc/release-notes/en-US/Revision_History.xml | 27 ++++ doc/release-notes/en-US/Whats_New.xml | 90 +++++++++++ doc/release-notes/en-US/gfs_introduction.xml | 54 +++++++ .../images/640px-GlusterFS_3.2_Architecture.png | Bin 0 -> 97477 bytes doc/release-notes/en-US/images/icon.svg | 19 +++ doc/release-notes/publican.cfg | 12 ++ 17 files changed, 692 insertions(+) create mode 100644 doc/release-notes/en-US/Author_Group.xml create mode 100644 doc/release-notes/en-US/Book_Info.xml create mode 100644 doc/release-notes/en-US/Chapter.xml create mode 100644 doc/release-notes/en-US/Download_Install.xml create mode 100644 doc/release-notes/en-US/Key_Features.xml create mode 100644 doc/release-notes/en-US/Known_Issues.xml create mode 100644 doc/release-notes/en-US/Preface.xml create mode 100644 doc/release-notes/en-US/Product_Documentation.xml create mode 100644 doc/release-notes/en-US/Product_Support.xml create mode 100644 doc/release-notes/en-US/Release_Notes.ent create mode 100644 doc/release-notes/en-US/Release_Notes.xml create mode 100644 doc/release-notes/en-US/Revision_History.xml create mode 100644 doc/release-notes/en-US/Whats_New.xml create mode 100644 doc/release-notes/en-US/gfs_introduction.xml create mode 100644 doc/release-notes/en-US/images/640px-GlusterFS_3.2_Architecture.png create mode 100644 doc/release-notes/en-US/images/icon.svg create mode 100644 doc/release-notes/publican.cfg diff --git a/doc/release-notes/en-US/Author_Group.xml b/doc/release-notes/en-US/Author_Group.xml new file mode 100644 index 000000000..43a491ea9 --- /dev/null +++ b/doc/release-notes/en-US/Author_Group.xml @@ -0,0 +1,17 @@ + + +%BOOK_ENTITIES; +]> + + + GlusterFS + Developers + + Red Hat + Storage + + gluster-devel@nongnu.org + + + diff --git a/doc/release-notes/en-US/Book_Info.xml b/doc/release-notes/en-US/Book_Info.xml new file mode 100644 index 000000000..d3bbb9f68 --- /dev/null +++ b/doc/release-notes/en-US/Book_Info.xml @@ -0,0 +1,28 @@ + + + +%BOOK_ENTITIES; +]> + + Release Notes + Release Notes for GlusterFS 3.3.0 + GlusterFS + 3.3 + 1 + 1 + + + This Release Notes introduces GlusterFS and provides information including key features and managing the software. + + + + + + + + + + + + diff --git a/doc/release-notes/en-US/Chapter.xml b/doc/release-notes/en-US/Chapter.xml new file mode 100644 index 000000000..8a2957971 --- /dev/null +++ b/doc/release-notes/en-US/Chapter.xml @@ -0,0 +1,33 @@ + + +%BOOK_ENTITIES; +]> + + Test Chapter + + This is a test paragraph + +
+ Test Section 1 + + This is a test paragraph in a section + +
+ +
+ Test Section 2 + + This is a test paragraph in Section 2 + + + + listitem text + + + + +
+ +
+ diff --git a/doc/release-notes/en-US/Download_Install.xml b/doc/release-notes/en-US/Download_Install.xml new file mode 100644 index 000000000..10bc5663b --- /dev/null +++ b/doc/release-notes/en-US/Download_Install.xml @@ -0,0 +1,107 @@ + + +%BOOK_ENTITIES; +]> + + Downloading and Installing GlusterFS + You can download and install the GlusterFS 3.3.0 or upgrade to latest version + +
+ Downloading GlusterFS 3.3 + You can download the latest software to each server in your cluster from . + +
+
+ New Installation + +The installation process for GlusterFS server is available at: + + + +
+
+ Compatibility + Release 3.3 of GlusterFS is not compatible with 2.0.x, 3.0.x, 3.1.x, and 3.2.x releases of GlusterFS. + +
+
+ Upgrade + Red Hat recommends that you back up your data before upgrading to GlusterFS 3.3. + + + Configurations generated outside the scope of gluster CLI are neither recommended nor +supported by Red Hat. + + +
+ Upgrade from GlusterFS v3.2.x or 3.1.x to GlusterFS v3.3 + In an environment with replicated bricks it is recommended that you upgrade a single storage server, +confirm stability, and then upgrade the replica. + + Use the same installation method for the upgrade as the original glusterfs installation. + + Using RPMs on RHEL, CentOS, Fedora + + + Download the 3.3 RPMs from the following location: + + RHEL: + Fedora: + + + Run rpm using the following command: + + # rpm -U glusterfs* + + + + Using dpkg on Debian, Ubuntu + + + + Download the 3.3 packages from + + Debian: + Ubuntu: + + + Run dpkg using the following command: + + # dpkg -i glusterfs* + + + Building from source + + + Download the 3.3 source code from + + + + + Unpack and install GlusterFS using the following commands: + + # gunzip glusterfs-3.3.0.tar.gz +# tar xvf glusterfs-3.3.0.tar +# cd glusterfs-3.3.0 +# ./configure +# make +# make install + + + Stop and start GlusterFS using the following commands, this step will disconnect Gluster Native +clients. + + # killall glusterfsd + # killall glusterfs + # killall glusterd + + + Start GlusterFS using the following command: + + # /etc/init.d/glusterd start + + +
+
+
diff --git a/doc/release-notes/en-US/Key_Features.xml b/doc/release-notes/en-US/Key_Features.xml new file mode 100644 index 000000000..4e11bec84 --- /dev/null +++ b/doc/release-notes/en-US/Key_Features.xml @@ -0,0 +1,72 @@ + + +%BOOK_ENTITIES; +]> + + Key Features + This section describes the key features available in Red Hat Storage. The following is a list of feature highlights of this new version of the Red Hat Storage software: + + + High Availability + The Red Hat Storage provides both synchronous and asynchronous n-way file replication to assure data availability: + + + Synchronous replication provides redundancy and protection within a single data center or multiple data centers and availability zones in a region. + + + Asynchronous geo-replication- Red Hat Storage supports Geo-Rep long distance replication. Customers can configure storage server nodes and Red Hat Storage to asynchronously replicate data +over vast geographical distances. +. + + + + + Deploy in Minutes + The Red Hat Storage S can be deployed in minutes, providing one of the fastest ways to create an on-demand, high-performance, petabyte-scale storage environment. + + + No Application Rewrites + Red Hat Storage Appliance provides full support for the semantics of a normal Linux file system like ext4 so there is no need to rewrite applications when moving data to the cloud as with cloud-based object storage. + + + Flexibility + + Runs in userspace, eliminating the need for complex +kernel patches or dependencies. + + + + Scalability + + Elastic volume management enables storage volumes +to be abstracted from the hardware so data and hardware can be managed independently. Storage can be +added while data continues to be available, with no +application interruption. Volumes can grow across +machines in the system and can be migrated within the +system to rebalance capacity. Storage server nodes +can be added on the fly. + + + + Simple Management + + Simple, single command for storage management. It also includes performance monitoring and analysis tools like Top and Profile. Top provides visibility into the workload pattern and Profile provides performance +statistics over a user-defined +time period for metrics including latency and amount of +data read or written. + + + + No Metadata Server + Rather than using a centralized or +distributed metadata server, Red Hat Storage software + uses an elastic hashing algorithm to locate +data in the storage pool removing this common source +of I/O bottlenecks and vulnerability to failure. Data +access is fully parallelized and performance scales +linearly. + + + + diff --git a/doc/release-notes/en-US/Known_Issues.xml b/doc/release-notes/en-US/Known_Issues.xml new file mode 100644 index 000000000..834ed5336 --- /dev/null +++ b/doc/release-notes/en-US/Known_Issues.xml @@ -0,0 +1,164 @@ + + +%BOOK_ENTITIES; +]> + + Known Issues + + The following are the known issues: + + + + + Issues related to Distributed Replicated Volumes: + + + + When process has done + + cd + + into a directory, stat of deleted file recreates it (directory self- +heal not triggered). + + In GlusterFS replicated setup, if you are inside a directory (for example, Test directory) of +replicated volume. From another node, you will delete a file inside Test directory. Then if you +perform stat operation on the same file name, the file will be automatically created. (that is, a +proper directory self-heal is not triggered when process has done cd into a path). + + + + + + Issues related to Distributed Volumes: + + + + Rebalance does not happen if bricks are down. + + + Currently while running rebalance, make sure all the bricks are in operating or connected state. + + + + + + + glusterfsd - Error return code is not proper after daemonizing the process. + + Due to this, scripts that mount glusterfs or start glusterfs process must not depend on its return +value. + + + + After # gluster volume replace-brick VOLNAME Brick New-Brick commit command +is issued, the file system operations on that particular volume, which are in transit will fail. + + + + Command # gluster volume replace-brick ... will fail in a RDMA set up. + + + + If files and directories have different GFIDs on different backends, GlusterFS client may hang or +display errors. + + Work Around: The workaround for this issue is explained at +. + + + + Issues related to Directory Quota: + + + + Some writes can appear to pass even though the quota limit is exceeded (write returns +success). This is because they could be cached in write-behind. However disk-space would +not exceed the quota limit, since when writes to backend happen, quota does not allow +them. Hence it is advised that applications should check for return value of close call. + + + + If a user has done cd into a directory on which the administrator is setting the limit, even + though the command succeeds and the new limit value will be applicable to all the users +except for those users’ who has done cd in to that particular directory. The old limit value + will be applicable until the user has cd out of that directory. + + + + Rename operation (that is, removing oldpath and creating newpath) requires additional disk +space equal to file size. This is because, during rename, it subtracts the size on oldpath after +rename operation is performed, but it checks whether quota limit is exceeded on parents of +newfile before rename operation. + + + + With striped volumes, Quota feature is not available. + + + + + Issues related to POSIX ACLs: + + + + Even though POSIX ACLs are set on the file or directory, the + (plus) sign in the file + permissions will not be displayed. This is for performance optimization and will be fixed in a + future release. + + + + + + When glusterfs is mounted with -o acl, directory read performance can be bad. Commands +like recursive directory listing can be slower than normal. + + + + + + When POSIX ACLs are set and multiple NFS clients are used, there could be inconsistency in +the way ACLs are applied due to attribute caching in NFS. For a consistent view of POSIX ACLs +in a multiple client setup, use -o noac option on NFS mount to switch off attribute caching. + This could have a performance impact on operations involving attributes. + + + + + + If you have enabled Gluster NLM, you cannot mount kernel NFS client on your storage nodes. + + + + + Error with lost and found directory while using multiple disks. + Work Around: You must ensure that the brick directories are one of the extra directories in the back-end mount point. For Example, if /dev/sda1 is mounted on /export1, use /export1/volume as the glusterfs's export directory. + + + Due to enhancements in Graphs, you may experience excessive memory usage with this release. + + + + + After you restart the NFS server, the unlock within the grace-period may fail and previously held locks may not be reclaimed. + + + + + After a rebalancing a volume, if you run rm -rf command at the mount point to remove all contents of the current working directory recursively without prompting, you may get "Directory not Empty" error message. + + + + + The following is a known missing (minor) feature: + + + + locks - mandatory locking is not supported. + + + + + + diff --git a/doc/release-notes/en-US/Preface.xml b/doc/release-notes/en-US/Preface.xml new file mode 100644 index 000000000..597dc5da0 --- /dev/null +++ b/doc/release-notes/en-US/Preface.xml @@ -0,0 +1,24 @@ + + + +%BOOK_ENTITIES; +]> + + Preface + This guide describes how to configure, operate, and manage Gluster File System (GlusterFS). +
+ Audience + This guide is intended for Systems Administrators interested in configuring and managing GlusterFS. + This guide assumes that you are familiar with the Linux operating system, concepts of File System, GlusterFS concepts, and GlusterFS Installation +
+
+ License + The License information is available at . +
+ + + + + +
diff --git a/doc/release-notes/en-US/Product_Documentation.xml b/doc/release-notes/en-US/Product_Documentation.xml new file mode 100644 index 000000000..77329ab01 --- /dev/null +++ b/doc/release-notes/en-US/Product_Documentation.xml @@ -0,0 +1,12 @@ + + +%BOOK_ENTITIES; +]> + + Product Documentation + Product documentation of GlusterFS + + is available at . + + diff --git a/doc/release-notes/en-US/Product_Support.xml b/doc/release-notes/en-US/Product_Support.xml new file mode 100644 index 000000000..ab44e5182 --- /dev/null +++ b/doc/release-notes/en-US/Product_Support.xml @@ -0,0 +1,12 @@ + + +%BOOK_ENTITIES; +]> + + Product Support + + + You can reach support at . + + diff --git a/doc/release-notes/en-US/Release_Notes.ent b/doc/release-notes/en-US/Release_Notes.ent new file mode 100644 index 000000000..9275f166d --- /dev/null +++ b/doc/release-notes/en-US/Release_Notes.ent @@ -0,0 +1,4 @@ + + + + diff --git a/doc/release-notes/en-US/Release_Notes.xml b/doc/release-notes/en-US/Release_Notes.xml new file mode 100644 index 000000000..d23fb4d7f --- /dev/null +++ b/doc/release-notes/en-US/Release_Notes.xml @@ -0,0 +1,17 @@ + + +%BOOK_ENTITIES; +]> + + + + + + + + + + + + diff --git a/doc/release-notes/en-US/Revision_History.xml b/doc/release-notes/en-US/Revision_History.xml new file mode 100644 index 000000000..f473d0a29 --- /dev/null +++ b/doc/release-notes/en-US/Revision_History.xml @@ -0,0 +1,27 @@ + + + +%BOOK_ENTITIES; +]> + + Revision History + + + + 1-0 + Mon Apr 9 2012 + + Divya + Muntimadugu + divya@redhat.com + + + + Draft + + + + + + diff --git a/doc/release-notes/en-US/Whats_New.xml b/doc/release-notes/en-US/Whats_New.xml new file mode 100644 index 000000000..c320c1aa3 --- /dev/null +++ b/doc/release-notes/en-US/Whats_New.xml @@ -0,0 +1,90 @@ + + +%BOOK_ENTITIES; +]> + + What is New in this Release? + This section describes the key features available in GlusterFS. The following is a list of feature highlights of this new version of the GlusterFS software: + + + Unified File and Object Storage + Unified File and Object Storage (UFO) unifies NAS and object storage technology. It provides a system for data storage that enables users to access the same data, both as an object and as a file, thus simplifying management and controlling storage costs. + + + Replicate Improvements (Pro-active Self-heal) + In replicate module, previously you had to manually trigger a self-heal when a brick goes offline and comes back online, to bring all the replicas in sync. Now the pro-active self-heal daemon runs in the background, diagnoses issues and automatically initiates self-healing when the brick comes on-line. You can view the list of files that need healing, the list of files which are recently healed, list of files which are in split-brain state, and you can manually trigger self-heal on the entire volume or only on the files which need healing. + + + Network Lock Manager + + GlusterFS 3.3 includes network lock manager (NLM) v4. NLM is a standard and an extension to NFSv3 which allows NFSv3 clients to lock on files across the network. NLM is required to make applications running on top of NFSv3 mount points to use the standard fcntl() (POSIX) and flock() (BSD) lock system calls to synchronize access across clients. + + + + Volume Statedump + + Statedump is a mechanism through which you can get details of all internal variables and state of the glusterfs process at the time of issuing the command.You can perform statedumps of the brick processes and nfs server process of a volume using the statedump command. The statedump information is useful while debugging. + + + + Volume Status and Brick Information + + You can display the status information about a specific volume, brick or all volumes, as needed. Volume status information includes memory usage, memory pool details of the bricks, inode tables of the volume, pending calls of the volume and other statistics. This information can be used to understand the current status of the brick, nfs processes, and overall file +system. Status information can also be used to monitor and debug the volume information. + + + Geo-replication Enhancements + Now you can configure a secure slave using SSH so that master is granted a restricted access. You need not specify configuration parameters regarding the slave on the master-side configuration. You can also rotate the log file of a particular master-slave session, all sessions of a mater volume, and all geo-replication sessions, as needed. You can also set ignore-deletes option to 1 so that the file deleted on master will not trigger a delete operation on the slave. Hence, the slave will remain as a superset of the master and can be used to recover the master in case of crash and/or accidental delete. + + + + Mount Server Fail-over + Now there is an option to add backup volfile server while mounting fuse client. When the first volfile server fails, then the server specified in backupvolfile-server option is used as volfile server to mount the client. You can also specify the number of attempts to fetch while mounting glusterFS server. This option is useful when you mount a server with multiple IPs. + + + + Debugging Locks + You can use statedump command to list the locks held on files. The statedump output also provides information on each lock with its range, basename, PID of the application holding the lock, and so on. You can analyze and know which locks are valid and relevant at a point of time. After ensuring that the no application is using the file, you can clear the lock using the clear lock command. + + + + Change in Working Directory + The working directory of glusterd is changed to /var/lib/glusterd from /etc/glusterd. + + + + Hadoop Compatible Storage + GlusterFS provides compatibility for Apache Hadoop and it uses the standard file system APIs available in Hadoop to provide a new storage option for Hadoop deployments. Existing MapReduce based applications can use GlusterFS seamlessly. This new functionality opens up data within Hadoop deployments to any file-based or object-based application. + + + Granual Locking for Large Files + Enables using GlusterFS as a backing store for preserving large files like virtual machine images. Granualar locking enables internal file operations (like self-heal) without blocking user level file operations. The latency for user I/O is reduced during self-heal operation. + + + + Configuration Enhancements + + + Remove Brick Enhancements + Previously, remove-bick command was used to remove a brick that is inaccessible due to hardware or network failure. And as a clean-up operation to remove dead server details from the volume configuration. Now remove-brick command can migrate data to existing bricks before deleting given brick. + + + Rebalance Enhancements + GlusterFS 3.3 supports open file rebalance and files that have hardlinks. Rebalance has been enchanced to be more efficient with respect to network usage, completion time, and amount of data movement and starts migration of data immediately without waiting for directory layout to be fixed. + + + Dynamic Alteration of Volume Type + You can now the change type of the volume from Distributed volume to Distributed Replicated Volume when performing add-brick and remove-brick operation. You must specify the replica count paramenter to increase the number of replicas to change it to distributed replicated volume. + Currently, changing of stripe count while changing volume configurations is not supported. + + + + + + Read-only Volume + GlusterFS 3.3 enables you to mount volumes as read-only. While mounting the client, you can mount it as read-only for the volumes and you can also make the entire volume as read-only for all the clients (including NFS clients) using volume set option. + + + + diff --git a/doc/release-notes/en-US/gfs_introduction.xml b/doc/release-notes/en-US/gfs_introduction.xml new file mode 100644 index 000000000..5fd887305 --- /dev/null +++ b/doc/release-notes/en-US/gfs_introduction.xml @@ -0,0 +1,54 @@ + + + + Introducing Gluster File System + GlusterFS is an open source, clustered file system capable of scaling to several petabytes and handling thousands of clients. GlusterFS can be flexibly combined with commodity physical, virtual, and cloud resources to deliver highly available and performant enterprise storage at a fraction of the cost of traditional solutions. + GlusterFS clusters together storage building blocks over Infiniband RDMA and/or TCP/IP interconnect, aggregating disk and memory resources and managing data in a single global namespace. GlusterFS is based on a stackable user space design, delivering exceptional performance for diverse workloads. + +
+ Virtualized Cloud Environments + + + Virtualized Cloud Environments + + + + + +
+ GlusterFS is designed for today's high-performance, virtualized cloud environments. Unlike traditional data centers, cloud environments require multi-tenancy along with the ability to grow or shrink resources on demand. Enterprises can scale capacity, performance, and availability on demand, with no vendor lock-in, across on-premise, public cloud, and hybrid environments. + GlusterFS is in production at thousands of enterprises spanning media, healthcare, government, education, web 2.0, and financial services. The following table lists the commercial offerings and its documentation location: + + + + + + + + Product + Documentation Location + + + + + Red Hat Storage Software Appliance + + + + + + Red Hat Virtual Storage Appliance + + + + + + Red Hat Storage + + + + + + + +
diff --git a/doc/release-notes/en-US/images/640px-GlusterFS_3.2_Architecture.png b/doc/release-notes/en-US/images/640px-GlusterFS_3.2_Architecture.png new file mode 100644 index 000000000..95f89ec82 Binary files /dev/null and b/doc/release-notes/en-US/images/640px-GlusterFS_3.2_Architecture.png differ diff --git a/doc/release-notes/en-US/images/icon.svg b/doc/release-notes/en-US/images/icon.svg new file mode 100644 index 000000000..b2f16d0f6 --- /dev/null +++ b/doc/release-notes/en-US/images/icon.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/doc/release-notes/publican.cfg b/doc/release-notes/publican.cfg new file mode 100644 index 000000000..8afb0d03b --- /dev/null +++ b/doc/release-notes/publican.cfg @@ -0,0 +1,12 @@ +# Config::Simple 4.59 +# Thu Apr 5 16:25:43 2012 + +xml_lang: "en-US" +type: Book +brand: Gluster_Brand +prod_url: http://www.gluster.org +doc_url: http://www.gluster.com/community/documentation/index.php/Main_Page +condition: gfs +show_remarks: 1 + + -- cgit