summaryrefslogtreecommitdiffstats
path: root/ufo/etc
diff options
context:
space:
mode:
Diffstat (limited to 'ufo/etc')
-rw-r--r--ufo/etc/account-server/1.conf-gluster19
-rw-r--r--ufo/etc/container-server/1.conf-gluster21
-rw-r--r--ufo/etc/fs.conf-gluster17
-rw-r--r--ufo/etc/object-server/1.conf-gluster36
-rw-r--r--ufo/etc/proxy-server.conf-gluster69
-rw-r--r--ufo/etc/swift.conf-gluster91
6 files changed, 0 insertions, 253 deletions
diff --git a/ufo/etc/account-server/1.conf-gluster b/ufo/etc/account-server/1.conf-gluster
deleted file mode 100644
index da8f31726..000000000
--- a/ufo/etc/account-server/1.conf-gluster
+++ /dev/null
@@ -1,19 +0,0 @@
-[DEFAULT]
-devices = /mnt/gluster-object
-mount_check = true
-bind_port = 6012
-user = root
-log_facility = LOG_LOCAL2
-
-[pipeline:main]
-pipeline = account-server
-
-[app:account-server]
-use = egg:gluster_swift_ufo#account
-
-[account-replicator]
-vm_test_mode = yes
-
-[account-auditor]
-
-[account-reaper]
diff --git a/ufo/etc/container-server/1.conf-gluster b/ufo/etc/container-server/1.conf-gluster
deleted file mode 100644
index acad62135..000000000
--- a/ufo/etc/container-server/1.conf-gluster
+++ /dev/null
@@ -1,21 +0,0 @@
-[DEFAULT]
-devices = /mnt/gluster-object
-mount_check = true
-bind_port = 6011
-user = root
-log_facility = LOG_LOCAL2
-
-[pipeline:main]
-pipeline = container-server
-
-[app:container-server]
-use = egg:gluster_swift_ufo#container
-
-[container-replicator]
-vm_test_mode = yes
-
-[container-updater]
-
-[container-auditor]
-
-[container-sync]
diff --git a/ufo/etc/fs.conf-gluster b/ufo/etc/fs.conf-gluster
deleted file mode 100644
index 71a9b0313..000000000
--- a/ufo/etc/fs.conf-gluster
+++ /dev/null
@@ -1,17 +0,0 @@
-[DEFAULT]
-# IP address of a GlusterFS volume server member. By default, we assume the
-# local host.
-mount_ip = localhost
-
-# By default it is assumed the Gluster volumes can be accessed using other
-# methods besides UFO (not object only), which disables a caching
-# optimizations in order to keep in sync with file system changes.
-object_only = no
-
-# Performance optimization parameter. When turned off, the filesystem will
-# see a reduced number of stat calls, resulting in substantially faster
-# response time for GET and HEAD container requests on containers with large
-# numbers of objects, at the expense of an accurate count of combined bytes
-# used by all objects in the container. For most installations "off" works
-# fine.
-accurate_size_in_listing = off \ No newline at end of file
diff --git a/ufo/etc/object-server/1.conf-gluster b/ufo/etc/object-server/1.conf-gluster
deleted file mode 100644
index 0d85546cd..000000000
--- a/ufo/etc/object-server/1.conf-gluster
+++ /dev/null
@@ -1,36 +0,0 @@
-[DEFAULT]
-devices = /mnt/gluster-object
-mount_check = true
-bind_port = 6010
-# If not doing the above, setting this value initially to match the number of
-# CPUs is a good starting point for determining the right value.
-workers = 1
-
-[pipeline:main]
-pipeline = object-server
-
-[app:object-server]
-use = egg:gluster_swift_ufo#object
-user = root
-log_facility = LOG_LOCAL2
-# Timeout clients that don't read or write to the proxy server after 5
-# seconds.
-conn_timeout = 5
-# For high load situations, once connected to a container server, allow for
-# delays communicating with it.
-node_timeout = 60
-# Adjust this value to match the stripe width of the underlying storage array
-# (not the stripe element size). This will provide a reasonable starting point
-# for tuning this value.
-disk_chunk_size = 65536
-# Adjust this value match whatever is set for the disk_chunk_size
-# initially. This will provide a reasonable starting point for tuning this
-# value.
-network_chunk_size = 65556
-
-[object-replicator]
-vm_test_mode = yes
-
-[object-updater]
-
-[object-auditor]
diff --git a/ufo/etc/proxy-server.conf-gluster b/ufo/etc/proxy-server.conf-gluster
deleted file mode 100644
index e04efecea..000000000
--- a/ufo/etc/proxy-server.conf-gluster
+++ /dev/null
@@ -1,69 +0,0 @@
-[DEFAULT]
-bind_port = 8080
-user = root
-log_facility = LOG_LOCAL1
-# Consider using 1 worker per CPU
-workers = 1
-
-[pipeline:main]
-pipeline = healthcheck cache tempauth proxy-server
-
-[app:proxy-server]
-use = egg:gluster_swift_ufo#proxy
-log_facility = LOG_LOCAL1
-# The API allows for account creation and deletion, but since Gluster/Swift
-# automounts a Gluster volume for a given account, there is no way to create
-# or delete an account. So leave this off.
-allow_account_management = false
-account_autocreate = true
-# Only need to recheck the account exists once a day
-recheck_account_existence = 86400
-# May want to consider bumping this up if containers are created and destroyed
-# infrequently.
-recheck_container_existence = 60
-# Timeout clients that don't read or write to the proxy server after 5
-# seconds.
-client_timeout = 5
-# Give more time to connect to the object, container or account servers in
-# cases of high load.
-conn_timeout = 5
-# For high load situations, once connected to an object, container or account
-# server, allow for delays communicating with them.
-node_timeout = 60
-# May want to consider bumping up this value to 1 - 4 MB depending on how much
-# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
-# stripe width (not stripe element size) of your storage volume is a good
-# starting point. See below for sizing information.
-object_chunk_size = 65536
-# If you do decide to increase the object_chunk_size, then consider lowering
-# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
-# be queued to the object server for processing. Given one proxy server worker
-# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
-# * 1,024 bytes of memory in the worse case (default values). Be sure the
-# amount of memory available on the system can accommodate increased values
-# for object_chunk_size.
-put_queue_depth = 10
-
-[filter:tempauth]
-use = egg:swift#tempauth
-# Here you need to add users explicitly. See the OpenStack Swift Deployment
-# Guide for more information. The user and user64 directives take the
-# following form:
-# user_<account>_<username> = <key> [group] [group] [...] [storage_url]
-# user64_<account_b64>_<username_b64> = <key> [group] [group] [...] [storage_url]
-# Where you use user64 for accounts and/or usernames that include underscores.
-#
-# NOTE (and WARNING): The account name must match the device name specified
-# when generating the account, container, and object build rings.
-#
-# E.g.
-# user_ufo0_admin = abc123 .admin
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-
-[filter:cache]
-use = egg:swift#memcache
-# Update this line to contain a comma separated list of memcache servers
-# shared by all nodes running the proxy-server service.
-memcache_servers = localhost:11211
diff --git a/ufo/etc/swift.conf-gluster b/ufo/etc/swift.conf-gluster
deleted file mode 100644
index 25c3ca157..000000000
--- a/ufo/etc/swift.conf-gluster
+++ /dev/null
@@ -1,91 +0,0 @@
-[DEFAULT]
-
-
-[swift-hash]
-# random unique string that can never change (DO NOT LOSE)
-swift_hash_path_suffix = gluster
-
-
-# The swift-constraints section sets the basic constraints on data
-# saved in the swift cluster.
-
-[swift-constraints]
-
-# max_file_size is the largest "normal" object that can be saved in
-# the cluster. This is also the limit on the size of each segment of
-# a "large" object when using the large object manifest support.
-# This value is set in bytes. Setting it to lower than 1MiB will cause
-# some tests to fail. It is STRONGLY recommended to leave this value at
-# the default (5 * 2**30 + 2).
-
-# FIXME: Really? Gluster can handle a 2^64 sized file? And can the fronting
-# web service handle such a size? I think with UFO, we need to keep with the
-# default size from Swift and encourage users to research what size their web
-# services infrastructure can handle.
-
-max_file_size = 18446744073709551616
-
-
-# max_meta_name_length is the max number of bytes in the utf8 encoding
-# of the name portion of a metadata header.
-
-#max_meta_name_length = 128
-
-
-# max_meta_value_length is the max number of bytes in the utf8 encoding
-# of a metadata value
-
-#max_meta_value_length = 256
-
-
-# max_meta_count is the max number of metadata keys that can be stored
-# on a single account, container, or object
-
-#max_meta_count = 90
-
-
-# max_meta_overall_size is the max number of bytes in the utf8 encoding
-# of the metadata (keys + values)
-
-#max_meta_overall_size = 4096
-
-
-# max_object_name_length is the max number of bytes in the utf8 encoding of an
-# object name: Gluster FS can handle much longer file names, but the length
-# between the slashes of the URL is handled below. Remember that most web
-# clients can't handle anything greater than 2048, and those that do are
-# rather clumsy.
-
-max_object_name_length = 2048
-
-# max_object_name_component_length (GlusterFS) is the max number of bytes in
-# the utf8 encoding of an object name component (the part between the
-# slashes); this is a limit imposed by the underlying file system (for XFS it
-# is 255 bytes).
-
-max_object_name_component_length = 255
-
-# container_listing_limit is the default (and max) number of items
-# returned for a container listing request
-
-#container_listing_limit = 10000
-
-
-# account_listing_limit is the default (and max) number of items returned
-# for an account listing request
-
-#account_listing_limit = 10000
-
-
-# max_account_name_length is the max number of bytes in the utf8 encoding of
-# an account name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_account_name_length = 255
-
-
-# max_container_name_length is the max number of bytes in the utf8 encoding
-# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_container_name_length = 255