summaryrefslogtreecommitdiffstats
path: root/test/unit
diff options
context:
space:
mode:
authorPrashanth Pai <ppai@redhat.com>2015-11-02 11:55:17 +0530
committerThiago da Silva <thiago@redhat.com>2016-03-07 10:38:49 -0800
commitea4750a366123f78411d90082733642376dc6afc (patch)
tree5124b5a407791afcd2dd1cfef00a3959cbb26033 /test/unit
parentc5d76cdd2e2e99d4ac65b645b17cf8a43e4ccab4 (diff)
Rebase to stable/kilo
This change ports most of swiftonfile object server fixes and changes into gluster-swift. Storage policy as a feature is not usable here (it doesn't make sense). The hacky way of creating zero byte tracker objects for object expiration has not been ported to this release due to scalability issues and the need to have a separate volume. Change-Id: I17ba27dacea9ac000bdb8934700996e4d17f4251 Signed-off-by: Prashanth Pai <ppai@redhat.com> Reviewed-on: http://review.gluster.org/13269 Reviewed-by: Thiago da Silva <thiago@redhat.com> Tested-by: Thiago da Silva <thiago@redhat.com>
Diffstat (limited to 'test/unit')
-rw-r--r--test/unit/__init__.py602
-rw-r--r--test/unit/obj/test_diskfile.py138
-rw-r--r--test/unit/obj/test_expirer.py385
-rw-r--r--test/unit/proxy/controllers/test_account.py16
-rw-r--r--test/unit/proxy/controllers/test_base.py463
-rw-r--r--test/unit/proxy/controllers/test_container.py93
-rwxr-xr-xtest/unit/proxy/controllers/test_obj.py1412
-rw-r--r--test/unit/proxy/test_server.py3991
8 files changed, 5919 insertions, 1181 deletions
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index a1bfef8..372fb58 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -20,71 +20,288 @@ import copy
import logging
import errno
import sys
-from contextlib import contextmanager
-from collections import defaultdict
+from contextlib import contextmanager, closing
+from collections import defaultdict, Iterable
+import itertools
+from numbers import Number
from tempfile import NamedTemporaryFile
import time
+import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
+from swift.common.utils import Timestamp
from test import get_config
-from swift.common.utils import config_true_value, LogAdapter
+from swift.common import swob, utils
+from swift.common.ring import Ring, RingData
from hashlib import md5
-from eventlet import sleep, Timeout
import logging.handlers
from httplib import HTTPException
-from numbers import Number
+from swift.common import storage_policy
+from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
+import functools
+import cPickle as pickle
+from gzip import GzipFile
+import mock as mocklib
+import inspect
+
+EMPTY_ETAG = md5().hexdigest()
+
+# try not to import this module from swift
+if not os.path.basename(sys.argv[0]).startswith('swift'):
+ # never patch HASH_PATH_SUFFIX AGAIN!
+ utils.HASH_PATH_SUFFIX = 'endcap'
+
+
+def patch_policies(thing_or_policies=None, legacy_only=False,
+ with_ec_default=False, fake_ring_args=None):
+ if isinstance(thing_or_policies, (
+ Iterable, storage_policy.StoragePolicyCollection)):
+ return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
+
+ if legacy_only:
+ default_policies = [
+ StoragePolicy(0, name='legacy', is_default=True),
+ ]
+ default_ring_args = [{}]
+ elif with_ec_default:
+ default_policies = [
+ ECStoragePolicy(0, name='ec', is_default=True,
+ ec_type='jerasure_rs_vand', ec_ndata=10,
+ ec_nparity=4, ec_segment_size=4096),
+ StoragePolicy(1, name='unu'),
+ ]
+ default_ring_args = [{'replicas': 14}, {}]
+ else:
+ default_policies = [
+ StoragePolicy(0, name='nulo', is_default=True),
+ StoragePolicy(1, name='unu'),
+ ]
+ default_ring_args = [{}, {}]
+
+ fake_ring_args = fake_ring_args or default_ring_args
+ decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
+
+ if not thing_or_policies:
+ return decorator
+ else:
+ # it's a thing, we return the wrapped thing instead of the decorator
+ return decorator(thing_or_policies)
+
+
+class PatchPolicies(object):
+ """
+ Why not mock.patch? In my case, when used as a decorator on the class it
+ seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
+ patched yet)
+ """
+
+ def __init__(self, policies, fake_ring_args=None):
+ if isinstance(policies, storage_policy.StoragePolicyCollection):
+ self.policies = policies
+ else:
+ self.policies = storage_policy.StoragePolicyCollection(policies)
+ self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
+
+ def _setup_rings(self):
+ """
+ Our tests tend to use the policies rings like their own personal
+ playground - which can be a problem in the particular case of a
+ patched TestCase class where the FakeRing objects are scoped in the
+ call to the patch_policies wrapper outside of the TestCase instance
+ which can lead to some bled state.
+
+ To help tests get better isolation without having to think about it,
+ here we're capturing the args required to *build* a new FakeRing
+ instances so we can ensure each test method gets a clean ring setup.
+
+ The TestCase can always "tweak" these fresh rings in setUp - or if
+ they'd prefer to get the same "reset" behavior with custom FakeRing's
+ they can pass in their own fake_ring_args to patch_policies instead of
+ setting the object_ring on the policy definitions.
+ """
+ for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
+ if fake_ring_arg is not None:
+ policy.object_ring = FakeRing(**fake_ring_arg)
+
+ def __call__(self, thing):
+ if isinstance(thing, type):
+ return self._patch_class(thing)
+ else:
+ return self._patch_method(thing)
+
+ def _patch_class(self, cls):
+ """
+ Creating a new class that inherits from decorated class is the more
+ common way I've seen class decorators done - but it seems to cause
+ infinite recursion when super is called from inside methods in the
+ decorated class.
+ """
+
+ orig_setUp = cls.setUp
+ orig_tearDown = cls.tearDown
+
+ def setUp(cls_self):
+ self._orig_POLICIES = storage_policy._POLICIES
+ if not getattr(cls_self, '_policies_patched', False):
+ storage_policy._POLICIES = self.policies
+ self._setup_rings()
+ cls_self._policies_patched = True
+
+ orig_setUp(cls_self)
+
+ def tearDown(cls_self):
+ orig_tearDown(cls_self)
+ storage_policy._POLICIES = self._orig_POLICIES
+
+ cls.setUp = setUp
+ cls.tearDown = tearDown
+
+ return cls
+
+ def _patch_method(self, f):
+ @functools.wraps(f)
+ def mywrapper(*args, **kwargs):
+ self._orig_POLICIES = storage_policy._POLICIES
+ try:
+ storage_policy._POLICIES = self.policies
+ self._setup_rings()
+ return f(*args, **kwargs)
+ finally:
+ storage_policy._POLICIES = self._orig_POLICIES
+ return mywrapper
+
+ def __enter__(self):
+ self._orig_POLICIES = storage_policy._POLICIES
+ storage_policy._POLICIES = self.policies
+ def __exit__(self, *args):
+ storage_policy._POLICIES = self._orig_POLICIES
-class FakeRing(object):
- def __init__(self, replicas=3, max_more_nodes=0):
+class FakeRing(Ring):
+
+ def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
+ base_port=1000):
+ """
+ :param part_power: make part calculation based on the path
+
+ If you set a part_power when you setup your FakeRing the parts you get
+ out of ring methods will actually be based on the path - otherwise we
+ exercise the real ring code, but ignore the result and return 1.
+ """
+ self._base_port = base_port
+ self.max_more_nodes = max_more_nodes
+ self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
- self.replicas = replicas
- self.max_more_nodes = max_more_nodes
- self.devs = {}
+ self.set_replicas(replicas)
+ self._reload()
+
+ def _reload(self):
+ self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
- self.devs = {}
+ self._devs = []
+ for x in range(self.replicas):
+ ip = '10.0.0.%s' % x
+ port = self._base_port + x
+ self._devs.append({
+ 'ip': ip,
+ 'replication_ip': ip,
+ 'port': port,
+ 'replication_port': port,
+ 'device': 'sd' + (chr(ord('a') + x)),
+ 'zone': x % 3,
+ 'region': x % 2,
+ 'id': x,
+ })
@property
def replica_count(self):
return self.replicas
- def get_part(self, account, container=None, obj=None):
- return 1
-
- def get_nodes(self, account, container=None, obj=None):
- devs = []
- for x in xrange(self.replicas):
- devs.append(self.devs.get(x))
- if devs[x] is None:
- self.devs[x] = devs[x] = \
- {'ip': '10.0.0.%s' % x,
- 'port': 1000 + x,
- 'device': 'sd' + (chr(ord('a') + x)),
- 'zone': x % 3,
- 'region': x % 2,
- 'id': x}
- return 1, devs
-
- def get_part_nodes(self, part):
- return self.get_nodes('blah')[1]
+ def _get_part_nodes(self, part):
+ return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
# replicas^2 is the true cap
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
self.replicas * self.replicas)):
yield {'ip': '10.0.0.%s' % x,
- 'port': 1000 + x,
+ 'replication_ip': '10.0.0.%s' % x,
+ 'port': self._base_port + x,
+ 'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
+def write_fake_ring(path, *devs):
+ """
+ Pretty much just a two node, two replica, 2 part power ring...
+ """
+ dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
+ 'port': 6000}
+ dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
+ 'port': 6000}
+
+ dev1_updates, dev2_updates = devs or ({}, {})
+
+ dev1.update(dev1_updates)
+ dev2.update(dev2_updates)
+
+ replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
+ devs = [dev1, dev2]
+ part_shift = 30
+ with closing(GzipFile(path, 'wb')) as f:
+ pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
+
+
+class FabricatedRing(Ring):
+ """
+ When a FakeRing just won't do - you can fabricate one to meet
+ your tests needs.
+ """
+
+ def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
+ part_power=4):
+ self.devices = devices
+ self.nodes = nodes
+ self.port = port
+ self.replicas = 6
+ self.part_power = part_power
+ self._part_shift = 32 - self.part_power
+ self._reload()
+
+ def _reload(self, *args, **kwargs):
+ self._rtime = time.time() * 2
+ if hasattr(self, '_replica2part2dev_id'):
+ return
+ self._devs = [{
+ 'region': 1,
+ 'zone': 1,
+ 'weight': 1.0,
+ 'id': i,
+ 'device': 'sda%d' % i,
+ 'ip': '10.0.0.%d' % (i % self.nodes),
+ 'replication_ip': '10.0.0.%d' % (i % self.nodes),
+ 'port': self.port,
+ 'replication_port': self.port,
+ } for i in range(self.devices)]
+
+ self._replica2part2dev_id = [
+ [None] * 2 ** self.part_power
+ for i in range(self.replicas)
+ ]
+ dev_ids = itertools.cycle(range(self.devices))
+ for p in range(2 ** self.part_power):
+ for r in range(self.replicas):
+ self._replica2part2dev_id[r][p] = next(dev_ids)
+
+
class FakeMemcache(object):
def __init__(self):
@@ -152,24 +369,13 @@ def tmpfile(content):
xattr_data = {}
-def _get_inode(fd_or_name):
- try:
- if isinstance(fd_or_name, int):
- fd = fd_or_name
- else:
- try:
- fd = fd_or_name.fileno()
- except AttributeError:
- fd = None
- if fd is None:
- ino = os.stat(fd_or_name).st_ino
- else:
- ino = os.fstat(fd).st_ino
- except OSError as err:
- ioerr = IOError()
- ioerr.errno = err.errno
- raise ioerr
- return ino
+def _get_inode(fd):
+ if not isinstance(fd, int):
+ try:
+ fd = fd.fileno()
+ except AttributeError:
+ return os.stat(fd).st_ino
+ return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
@@ -183,9 +389,7 @@ def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
- e = IOError("Fake IOError")
- e.errno = errno.ENODATA
- raise e
+ raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
@@ -214,6 +418,22 @@ def temptree(files, contents=''):
rmtree(tempdir)
+def with_tempdir(f):
+ """
+ Decorator to give a single test a tempdir as argument to test method.
+ """
+ @functools.wraps(f)
+ def wrapped(*args, **kwargs):
+ tempdir = mkdtemp()
+ args = list(args)
+ args.append(tempdir)
+ try:
+ return f(*args, **kwargs)
+ finally:
+ rmtree(tempdir)
+ return wrapped
+
+
class NullLoggingHandler(logging.Handler):
def emit(self, record):
@@ -239,8 +459,8 @@ class UnmockTimeModule(object):
logging.time = UnmockTimeModule()
-class FakeLogger(logging.Logger):
- # a thread safe logger
+class FakeLogger(logging.Logger, object):
+ # a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
@@ -250,42 +470,57 @@ class FakeLogger(logging.Logger):
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
+ self.parent = None
+
+ store_in = {
+ logging.ERROR: 'error',
+ logging.WARNING: 'warning',
+ logging.INFO: 'info',
+ logging.DEBUG: 'debug',
+ logging.CRITICAL: 'critical',
+ }
+
+ def _log(self, level, msg, *args, **kwargs):
+ store_name = self.store_in[level]
+ cargs = [msg]
+ if any(args):
+ cargs.extend(args)
+ captured = dict(kwargs)
+ if 'exc_info' in kwargs and \
+ not isinstance(kwargs['exc_info'], tuple):
+ captured['exc_info'] = sys.exc_info()
+ self.log_dict[store_name].append((tuple(cargs), captured))
+ super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
- self.lines_dict = defaultdict(list)
-
- def _store_in(store_name):
- def stub_fn(self, *args, **kwargs):
- self.log_dict[store_name].append((args, kwargs))
- return stub_fn
-
- def _store_and_log_in(store_name):
- def stub_fn(self, *args, **kwargs):
- self.log_dict[store_name].append((args, kwargs))
- self._log(store_name, args[0], args[1:], **kwargs)
- return stub_fn
+ self.lines_dict = {'critical': [], 'error': [], 'info': [],
+ 'warning': [], 'debug': []}
def get_lines_for_level(self, level):
+ if level not in self.lines_dict:
+ raise KeyError(
+ "Invalid log level '%s'; valid levels are %s" %
+ (level,
+ ', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
- error = _store_and_log_in('error')
- info = _store_and_log_in('info')
- warning = _store_and_log_in('warning')
- warn = _store_and_log_in('warning')
- debug = _store_and_log_in('debug')
+ def all_log_lines(self):
+ return dict((level, msgs) for level, msgs in self.lines_dict.items()
+ if len(msgs) > 0)
- def exception(self, *args, **kwargs):
- self.log_dict['exception'].append((args, kwargs,
- str(sys.exc_info()[1])))
- print 'FakeLogger Exception: %s' % self.log_dict
+ def _store_in(store_name):
+ def stub_fn(self, *args, **kwargs):
+ self.log_dict[store_name].append((args, kwargs))
+ return stub_fn
# mock out the StatsD logging methods:
+ update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
- update_stats = _store_in('update_stats')
+ transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
@@ -328,7 +563,7 @@ class FakeLogger(logging.Logger):
print 'WARNING: unable to format log message %r %% %r' % (
record.msg, record.args)
raise
- self.lines_dict[record.levelno].append(line)
+ self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
@@ -345,19 +580,40 @@ class DebugLogger(FakeLogger):
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
- self.formatter = logging.Formatter("%(server)s: %(message)s")
+ self.formatter = logging.Formatter(
+ "%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print self.formatter.format(record)
- def write(self, *args):
- print args
+
+class DebugLogAdapter(utils.LogAdapter):
+
+ def _send_to_logger(name):
+ def stub_fn(self, *args, **kwargs):
+ return getattr(self.logger, name)(*args, **kwargs)
+ return stub_fn
+
+ # delegate to FakeLogger's mocks
+ update_stats = _send_to_logger('update_stats')
+ increment = _send_to_logger('increment')
+ decrement = _send_to_logger('decrement')
+ timing = _send_to_logger('timing')
+ timing_since = _send_to_logger('timing_since')
+ transfer_rate = _send_to_logger('transfer_rate')
+ set_statsd_prefix = _send_to_logger('set_statsd_prefix')
+
+ def __getattribute__(self, name):
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError:
+ return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
- return LogAdapter(DebugLogger(), name)
+ return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
@@ -374,7 +630,8 @@ def fake_syslog_handler():
logging.handlers.SysLogHandler = FakeLogger
-if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
+if utils.config_true_value(
+ get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
@@ -447,17 +704,66 @@ def mock(update):
delattr(module, attr)
+class SlowBody(object):
+ """
+ This will work with our fake_http_connect, if you hand in these
+ instead of strings it will make reads take longer by the given
+ amount. It should be a little bit easier to extend than the
+ current slow kwarg - which inserts whitespace in the response.
+ Also it should be easy to detect if you have one of these (or a
+ subclass) for the body inside of FakeConn if we wanted to do
+ something smarter than just duck-type the str/buffer api
+ enough to get by.
+ """
+
+ def __init__(self, body, slowness):
+ self.body = body
+ self.slowness = slowness
+
+ def slowdown(self):
+ eventlet.sleep(self.slowness)
+
+ def __getitem__(self, s):
+ return SlowBody(self.body[s], self.slowness)
+
+ def __len__(self):
+ return len(self.body)
+
+ def __radd__(self, other):
+ self.slowdown()
+ return other + self.body
+
+
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
- expect_status=None, headers=None):
- self.status = status
- if expect_status is None:
- self.expect_status = self.status
+ headers=None, expect_headers=None, connection_id=None,
+ give_send=None):
+ # connect exception
+ if isinstance(status, (Exception, eventlet.Timeout)):
+ raise status
+ if isinstance(status, tuple):
+ self.expect_status = list(status[:-1])
+ self.status = status[-1]
+ self.explicit_expect_list = True
else:
- self.expect_status = expect_status
+ self.expect_status, self.status = ([], status)
+ self.explicit_expect_list = False
+ if not self.expect_status:
+ # when a swift backend service returns a status before reading
+ # from the body (mostly an error response) eventlet.wsgi will
+ # respond with that status line immediately instead of 100
+ # Continue, even if the client sent the Expect 100 header.
+ # BufferedHttp and the proxy both see these error statuses
+ # when they call getexpect, so our FakeConn tries to act like
+ # our backend services and return certain types of responses
+ # as expect statuses just like a real backend server would do.
+ if self.status in (507, 412, 409):
+ self.expect_status = [status]
+ else:
+ self.expect_status = [100, 100]
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
@@ -466,30 +772,41 @@ def fake_http_connect(*code_iter, **kwargs):
self.etag = etag
self.body = body
self.headers = headers or {}
+ self.expect_headers = expect_headers or {}
self.timestamp = timestamp
+ self.connection_id = connection_id
+ self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
+ # be nice to trixy bits with node_iter's
+ eventlet.sleep()
def getresponse(self):
- if kwargs.get('raise_exc'):
+ if self.expect_status and self.explicit_expect_list:
+ raise Exception('Test did not consume all fake '
+ 'expect status: %r' % (self.expect_status,))
+ if isinstance(self.status, (Exception, eventlet.Timeout)):
+ raise self.status
+ exc = kwargs.get('raise_exc')
+ if exc:
+ if isinstance(exc, (Exception, eventlet.Timeout)):
+ raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
- raise Timeout()
+ raise eventlet.Timeout()
return self
def getexpect(self):
- if self.expect_status == -2:
- raise HTTPException()
- if self.expect_status == -3:
- return FakeConn(507)
- if self.expect_status == -4:
- return FakeConn(201)
- if self.expect_status == 412:
- return FakeConn(412)
- return FakeConn(100)
+ expect_status = self.expect_status.pop(0)
+ if isinstance(self.expect_status, (Exception, eventlet.Timeout)):
+ raise self.expect_status
+ headers = dict(self.expect_headers)
+ if expect_status == 409:
+ headers['X-Backend-Timestamp'] = self.timestamp
+ return FakeConn(expect_status, headers=headers)
def getheaders(self):
etag = self.etag
@@ -499,19 +816,23 @@ def fake_http_connect(*code_iter, **kwargs):
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
- headers = {'content-length': len(self.body),
- 'content-type': 'x-application/test',
- 'x-timestamp': self.timestamp,
- 'last-modified': self.timestamp,
- 'x-object-meta-test': 'testing',
- 'x-delete-at': '9876543210',
- 'etag': etag,
- 'x-works': 'yes'}
+ headers = swob.HeaderKeyDict({
+ 'content-length': len(self.body),
+ 'content-type': 'x-application/test',
+ 'x-timestamp': self.timestamp,
+ 'x-backend-timestamp': self.timestamp,
+ 'last-modified': self.timestamp,
+ 'x-object-meta-test': 'testing',
+ 'x-delete-at': '9876543210',
+ 'etag': etag,
+ 'x-works': 'yes',
+ })
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
- del headers['x-timestamp']
+ # when timestamp is None, HeaderKeyDict raises KeyError
+ headers.pop('x-timestamp', None)
try:
if container_ts_iter.next() is False:
headers['x-container-timestamp'] = '1'
@@ -538,34 +859,45 @@ def fake_http_connect(*code_iter, **kwargs):
if am_slow:
if self.sent < 4:
self.sent += 1
- sleep(value)
+ eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
+ if self.give_send:
+ self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
- sleep(value)
+ eventlet.sleep(value)
def getheader(self, name, default=None):
- return dict(self.getheaders()).get(name.lower(), default)
+ return swob.HeaderKeyDict(self.getheaders()).get(name, default)
+
+ def close(self):
+ pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
- if isinstance(kwargs.get('headers'), list):
+ if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
+ if isinstance(kwargs.get('expect_headers'), (list, tuple)):
+ expect_headers_iter = iter(kwargs['expect_headers'])
+ else:
+ expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
+ len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
+ conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
@@ -573,21 +905,22 @@ def fake_http_connect(*code_iter, **kwargs):
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
- sleep(0.1)
+ eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
+ i, status = conn_id_and_code_iter.next()
if 'give_connect' in kwargs:
- kwargs['give_connect'](*args, **ckwargs)
- status = code_iter.next()
- if isinstance(status, tuple):
- status, expect_status = status
- else:
- expect_status = status
+ give_conn_fn = kwargs['give_connect']
+ argspec = inspect.getargspec(give_conn_fn)
+ if argspec.keywords or 'connection_id' in argspec.args:
+ ckwargs['connection_id'] = i
+ give_conn_fn(*args, **ckwargs)
etag = etag_iter.next()
headers = headers_iter.next()
+ expect_headers = expect_headers_iter.next()
timestamp = timestamps_iter.next()
if status <= 0:
@@ -597,8 +930,39 @@ def fake_http_connect(*code_iter, **kwargs):
else:
body = body_iter.next()
return FakeConn(status, etag, body=body, timestamp=timestamp,
- expect_status=expect_status, headers=headers)
+ headers=headers, expect_headers=expect_headers,
+ connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
+
+
+@contextmanager
+def mocked_http_conn(*args, **kwargs):
+ requests = []
+
+ def capture_requests(ip, port, method, path, headers, qs, ssl):
+ req = {
+ 'ip': ip,
+ 'port': port,
+ 'method': method,
+ 'path': path,
+ 'headers': headers,
+ 'qs': qs,
+ 'ssl': ssl,
+ }
+ requests.append(req)
+ kwargs.setdefault('give_connect', capture_requests)
+ fake_conn = fake_http_connect(*args, **kwargs)
+ fake_conn.requests = requests
+ with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
+ new=fake_conn):
+ yield fake_conn
+ left_over_status = list(fake_conn.code_iter)
+ if left_over_status:
+ raise AssertionError('left over status %r' % left_over_status)
+
+
+def make_timestamp_iter():
+ return iter(Timestamp(t) for t in itertools.count(int(time.time())))
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 1fe0904..6e2d0b1 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -26,18 +26,18 @@ from eventlet import tpool
from mock import Mock, patch
from hashlib import md5
from copy import deepcopy
+from contextlib import nested
from gluster.swift.common.exceptions import AlreadyExistsAsDir, \
AlreadyExistsAsFile
-from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
- DiskFileNoSpace, DiskFileNotOpen
+from swift.common.exceptions import DiskFileNoSpace, DiskFileNotOpen, \
+ DiskFileNotExist, DiskFileExpired
from swift.common.utils import ThreadPool
-from gluster.swift.common.exceptions import GlusterFileSystemOSError
import gluster.swift.common.utils
from gluster.swift.common.utils import normalize_timestamp
import gluster.swift.obj.diskfile
-from gluster.swift.obj.diskfile import DiskFileWriter, DiskFile, OnDiskManager
-from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, X_TYPE, \
+from gluster.swift.obj.diskfile import DiskFileWriter, DiskFileManager
+from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, \
X_OBJECT_TYPE, DIR_OBJECT
from test.unit.common.test_utils import _initxattr, _destroyxattr
@@ -136,7 +136,7 @@ class TestDiskFile(unittest.TestCase):
self.td = tempfile.mkdtemp()
self.conf = dict(devices=self.td, mb_per_sync=2,
keep_cache_size=(1024 * 1024), mount_check=False)
- self.mgr = OnDiskManager(self.conf, self.lg)
+ self.mgr = DiskFileManager(self.conf, self.lg)
def tearDown(self):
tpool.execute = self._orig_tpool_exc
@@ -150,7 +150,7 @@ class TestDiskFile(unittest.TestCase):
shutil.rmtree(self.td)
def _get_diskfile(self, d, p, a, c, o, **kwargs):
- return self.mgr.get_diskfile(d, a, c, o, **kwargs)
+ return self.mgr.get_diskfile(d, p, a, c, o, **kwargs)
def test_constructor_no_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
@@ -161,18 +161,16 @@ class TestDiskFile(unittest.TestCase):
assert gdf._gid == DEFAULT_GID
assert gdf._obj == "z"
assert gdf._obj_path == ""
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar"), gdf._datadir
- assert gdf._datadir == gdf._put_datadir
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar"), gdf._put_datadir
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
assert gdf._is_dir is False
- assert gdf._logger == self.lg
assert gdf._fd is None
def test_constructor_leadtrail_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "/b/a/z/")
assert gdf._obj == "z"
assert gdf._obj_path == "b/a"
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "b", "a"), gdf._datadir
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "b", "a"), gdf._put_datadir
def test_open_no_metadata(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -323,7 +321,7 @@ class TestDiskFile(unittest.TestCase):
def test_reader_disk_chunk_size(self):
conf = dict(disk_chunk_size=64)
conf.update(self.conf)
- self.mgr = OnDiskManager(conf, self.lg)
+ self.mgr = DiskFileManager(conf, self.lg)
gdf = self._create_and_get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
reader = gdf.reader()
@@ -669,7 +667,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == the_cont
+ assert gdf._put_datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
@@ -699,7 +697,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == the_cont
+ assert gdf._put_datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
@@ -734,7 +732,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == the_cont
+ assert gdf._put_datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
@@ -760,10 +758,9 @@ class TestDiskFile(unittest.TestCase):
try:
with gdf.create() as dw:
assert dw._tmppath is not None
- tmppath = dw._tmppath
dw.write(body)
dw.put(metadata)
- except GlusterFileSystemOSError:
+ except OSError:
pass
else:
self.fail("Expected exception DiskFileError")
@@ -775,7 +772,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._obj == "z"
assert gdf._obj_path == the_obj_path
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
assert gdf._data_file == os.path.join(
self.td, "vol0", "bar", "b", "a", "z")
@@ -811,8 +808,8 @@ class TestDiskFile(unittest.TestCase):
assert not gdf._is_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
gdf.delete(normalize_timestamp(later))
- assert os.path.isdir(gdf._datadir)
- assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_same_timestamp(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -826,8 +823,8 @@ class TestDiskFile(unittest.TestCase):
assert not gdf._is_dir
now = float(gdf.read_metadata()['X-Timestamp'])
gdf.delete(normalize_timestamp(now))
- assert os.path.isdir(gdf._datadir)
- assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_file_not_found(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -845,8 +842,8 @@ class TestDiskFile(unittest.TestCase):
os.unlink(the_file)
gdf.delete(normalize_timestamp(later))
- assert os.path.isdir(gdf._datadir)
- assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_file_unlink_error(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -879,8 +876,8 @@ class TestDiskFile(unittest.TestCase):
finally:
os.chmod(the_path, stats.st_mode)
- assert os.path.isdir(gdf._datadir)
- assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_delete_is_dir(self):
the_path = os.path.join(self.td, "vol0", "bar")
@@ -890,18 +887,18 @@ class TestDiskFile(unittest.TestCase):
assert gdf._data_file == the_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
gdf.delete(normalize_timestamp(later))
- assert os.path.isdir(gdf._datadir)
- assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
+ assert os.path.isdir(gdf._put_datadir)
+ assert not os.path.exists(os.path.join(gdf._put_datadir, gdf._obj))
def test_create(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
saved_fd = None
with gdf.create() as dw:
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
- assert os.path.isdir(gdf._datadir)
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf._put_datadir)
saved_tmppath = dw._tmppath
- assert os.path.dirname(saved_tmppath) == gdf._datadir
+ assert os.path.dirname(saved_tmppath) == gdf._put_datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
@@ -921,10 +918,10 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
with gdf.create() as dw:
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
- assert os.path.isdir(gdf._datadir)
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf._put_datadir)
saved_tmppath = dw._tmppath
- assert os.path.dirname(saved_tmppath) == gdf._datadir
+ assert os.path.dirname(saved_tmppath) == gdf._put_datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
@@ -936,10 +933,10 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
with gdf.create() as dw:
- assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
- assert os.path.isdir(gdf._datadir)
+ assert gdf._put_datadir == os.path.join(self.td, "vol0", "bar", "dir")
+ assert os.path.isdir(gdf._put_datadir)
saved_tmppath = dw._tmppath
- assert os.path.dirname(saved_tmppath) == gdf._datadir
+ assert os.path.dirname(saved_tmppath) == gdf._put_datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
@@ -973,3 +970,70 @@ class TestDiskFile(unittest.TestCase):
assert os.path.exists(gdf._data_file) # Real file exists
assert not os.path.exists(tmppath) # Temp file does not exist
+
+ def test_fd_closed_when_diskfile_open_raises_exception_race(self):
+ # do_open() succeeds but read_metadata() fails(GlusterFS)
+ _m_do_open = Mock(return_value=999)
+ _m_do_fstat = Mock(return_value=
+ os.stat_result((33261, 2753735, 2053, 1, 1000,
+ 1000, 6873, 1431415969,
+ 1376895818, 1433139196)))
+ _m_rmd = Mock(side_effect=IOError(errno.ENOENT,
+ os.strerror(errno.ENOENT)))
+ _m_do_close = Mock()
+ _m_log = Mock()
+
+ with nested(
+ patch("gluster.swift.obj.diskfile.do_open", _m_do_open),
+ patch("gluster.swift.obj.diskfile.do_fstat", _m_do_fstat),
+ patch("gluster.swift.obj.diskfile.read_metadata", _m_rmd),
+ patch("gluster.swift.obj.diskfile.do_close", _m_do_close),
+ patch("gluster.swift.obj.diskfile.logging.warn", _m_log)):
+ gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
+ try:
+ with gdf.open():
+ pass
+ except DiskFileNotExist:
+ pass
+ else:
+ self.fail("Expecting DiskFileNotExist")
+ _m_do_fstat.assert_called_once_with(999)
+ _m_rmd.assert_called_once_with(999)
+ _m_do_close.assert_called_once_with(999)
+ self.assertFalse(gdf._fd)
+ # Make sure ENOENT failure is logged
+ self.assertTrue("failed with ENOENT" in _m_log.call_args[0][0])
+
+ def test_fd_closed_when_diskfile_open_raises_DiskFileExpired(self):
+ # A GET/DELETE on an expired object should close fd
+ the_path = os.path.join(self.td, "vol0", "bar")
+ the_file = os.path.join(the_path, "z")
+ os.makedirs(the_path)
+ with open(the_file, "w") as fd:
+ fd.write("1234")
+ md = {
+ 'X-Type': 'Object',
+ 'X-Object-Type': 'file',
+ 'Content-Length': str(os.path.getsize(the_file)),
+ 'ETag': md5("1234").hexdigest(),
+ 'X-Timestamp': os.stat(the_file).st_mtime,
+ 'X-Delete-At': 0, # This is in the past
+ 'Content-Type': 'application/octet-stream'}
+ _metadata[_mapit(the_file)] = md
+
+ _m_do_close = Mock()
+
+ with patch("gluster.swift.obj.diskfile.do_close", _m_do_close):
+ gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
+ try:
+ with gdf.open():
+ pass
+ except DiskFileExpired:
+ # Confirm that original exception is re-raised
+ pass
+ else:
+ self.fail("Expecting DiskFileExpired")
+ self.assertEqual(_m_do_close.call_count, 1)
+ self.assertFalse(gdf._fd)
+ # Close the actual fd, as we had mocked do_close
+ os.close(_m_do_close.call_args[0][0])
diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py
index 236775e..9701027 100644
--- a/test/unit/obj/test_expirer.py
+++ b/test/unit/obj/test_expirer.py
@@ -16,12 +16,14 @@
import urllib
from time import time
from unittest import main, TestCase
-from test.unit import FakeLogger
+from test.unit import FakeRing, mocked_http_conn, debug_logger
from copy import deepcopy
+from tempfile import mkdtemp
+from shutil import rmtree
import mock
-from swift.common import internal_client
+from swift.common import internal_client, utils
from swift.obj import expirer
@@ -39,6 +41,7 @@ def not_sleep(seconds):
class TestObjectExpirer(TestCase):
maxDiff = None
+ internal_client = None
def setUp(self):
global not_sleep
@@ -49,9 +52,14 @@ class TestObjectExpirer(TestCase):
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
- def teardown(self):
+ self.rcache = mkdtemp()
+ self.conf = {'recon_cache_path': self.rcache}
+ self.logger = debug_logger('test-recon')
+
+ def tearDown(self):
+ rmtree(self.rcache)
internal_client.sleep = self.old_sleep
- internal_client.loadapp = self.loadapp
+ internal_client.loadapp = self.old_loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
@@ -59,7 +67,9 @@ class TestObjectExpirer(TestCase):
'processes': 5,
'process': 1,
}
- self.assertEqual((5, 1), x.get_process_values(vals))
+ x.get_process_values(vals)
+ self.assertEqual(x.processes, 5)
+ self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
@@ -67,7 +77,9 @@ class TestObjectExpirer(TestCase):
'process': 1,
}
x = expirer.ObjectExpirer(vals)
- self.assertEqual((5, 1), x.get_process_values({}))
+ x.get_process_values({})
+ self.assertEqual(x.processes, 5)
+ self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
@@ -123,11 +135,13 @@ class TestObjectExpirer(TestCase):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
+ self.obj_containers_in_order = []
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
+ self.obj_containers_in_order.append(container)
class InternalClient(object):
@@ -139,21 +153,22 @@ class TestObjectExpirer(TestCase):
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
- return [{'name': x} for x in self.containers.keys()]
+ return [{'name': unicode(x)} for x in self.containers.keys()]
def iter_objects(self, account, container):
- return [{'name': x} for x in self.containers[container]]
+ return [{'name': unicode(x)}
+ for x in self.containers[container]]
def delete_container(*a, **kw):
pass
containers = {
- 0: set('1-one 2-two 3-three'.split()),
- 1: set('2-two 3-three 4-four'.split()),
- 2: set('5-five 6-six'.split()),
- 3: set('7-seven'.split()),
+ '0': set('1-one 2-two 3-three'.split()),
+ '1': set('2-two 3-three 4-four'.split()),
+ '2': set('5-five 6-six'.split()),
+ '3': set(u'7-seven\u2661'.split()),
}
- x = ObjectExpirer({})
+ x = ObjectExpirer(self.conf)
x.swift = InternalClient(containers)
deleted_objects = {}
@@ -162,10 +177,16 @@ class TestObjectExpirer(TestCase):
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
+ self.assertEqual(containers['3'].pop(),
+ deleted_objects['3'].pop().decode('utf8'))
self.assertEqual(containers, deleted_objects)
+ self.assertEqual(len(set(x.obj_containers_in_order[:4])), 4)
def test_delete_object(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, test, account, container, obj):
self.test = test
self.account = account
@@ -173,12 +194,6 @@ class TestObjectExpirer(TestCase):
self.obj = obj
self.delete_object_called = False
- def delete_object(self, account, container, obj):
- self.test.assertEqual(self.account, account)
- self.test.assertEqual(self.container, container)
- self.test.assertEqual(self.obj, obj)
- self.delete_object_called = True
-
class DeleteActualObject(object):
def __init__(self, test, actual_obj, timestamp):
self.test = test
@@ -196,48 +211,55 @@ class TestObjectExpirer(TestCase):
actual_obj = 'actual_obj'
timestamp = 'timestamp'
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer({}, logger=self.logger)
x.swift = \
InternalClient(self, x.expiring_objects_account, container, obj)
x.delete_actual_object = \
DeleteActualObject(self, actual_obj, timestamp)
+ delete_object_called = []
+
+ def pop_queue(c, o):
+ self.assertEqual(container, c)
+ self.assertEqual(obj, o)
+ delete_object_called[:] = [True]
+
+ x.pop_queue = pop_queue
+
x.delete_object(actual_obj, timestamp, container, obj)
- self.assertTrue(x.swift.delete_object_called)
+ self.assertTrue(delete_object_called)
self.assertTrue(x.delete_actual_object.called)
def test_report(self):
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
- self.assertEqual(x.logger.log_dict['info'], [])
+ self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
- self.assertTrue('completed' in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
- self.assertTrue('so far' not in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
+ self.assertTrue(
+ 'completed' in str(x.logger.get_lines_for_level('info')))
+ self.assertTrue(
+ 'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
- self.assertTrue('completed' not in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
- self.assertTrue('so far' in x.logger.log_dict['info'][-1][0][0],
- x.logger.log_dict['info'])
+ self.assertTrue(
+ 'completed' not in str(x.logger.get_lines_for_level('info')))
+ self.assertTrue(
+ 'so far' in str(x.logger.get_lines_for_level('info')))
def test_run_once_nothing_to_do(self):
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
- self.assertEqual(x.logger.log_dict['exception'],
- [(("Unhandled exception",), {},
- "'str' object has no attribute "
- "'get_account_info'")])
+ self.assertEqual(x.logger.get_lines_for_level('error'),
+ ["Unhandled exception: "])
+ log_args, log_kwargs = x.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ "'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
class InternalClient(object):
@@ -247,15 +269,47 @@ class TestObjectExpirer(TestCase):
def iter_containers(*a, **kw):
return []
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
+
+ def test_run_once_unicode_problem(self):
+ class InternalClient(object):
+
+ container_ring = FakeRing()
+
+ def get_account_info(*a, **kw):
+ return 1, 2
+
+ def iter_containers(*a, **kw):
+ return [{'name': u'1234'}]
+
+ def iter_objects(*a, **kw):
+ return [{'name': u'1234-troms\xf8'}]
+
+ def make_request(*a, **kw):
+ pass
+
+ def delete_container(*a, **kw):
+ pass
+
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
+ x.swift = InternalClient()
+
+ requests = []
+
+ def capture_requests(ipaddr, port, method, path, *args, **kwargs):
+ requests.append((method, path))
+
+ with mocked_http_conn(
+ 200, 200, 200, give_connect=capture_requests):
+ x.run_once()
+ self.assertEqual(len(requests), 3)
def test_container_timestamp_break(self):
class InternalClient(object):
@@ -271,28 +325,28 @@ class TestObjectExpirer(TestCase):
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf,
+ logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
- for exccall in x.logger.log_dict['exception']:
- self.assertTrue(
- 'This should not have been called' not in exccall[0][0])
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ logs = x.logger.all_log_lines()
+ self.assertEqual(logs['info'], [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
+ self.assertTrue('error' not in logs)
# Reverse test to be sure it still would blow up the way expected.
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.swift = InternalClient([{'name': str(int(time() - 86400))}])
+ fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.run_once()
self.assertEqual(
- x.logger.log_dict['exception'],
- [(('Unhandled exception',), {},
- str(Exception('This should not have been called')))])
+ x.logger.get_lines_for_level('error'), [
+ 'Unhandled exception: '])
+ log_args, log_kwargs = x.logger.log_dict['error'][-1]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ 'This should not have been called')
def test_object_timestamp_break(self):
class InternalClient(object):
@@ -315,41 +369,36 @@ class TestObjectExpirer(TestCase):
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.run_once()
- for exccall in x.logger.log_dict['exception']:
- self.assertTrue(
- 'This should not have been called' not in exccall[0][0])
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
-
+ self.assertTrue('error' not in x.logger.all_log_lines())
+ self.assertEqual(x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
# Reverse test to be sure it still would blow up the way expected.
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
ts = int(time() - 86400)
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
self.assertEqual(
- excswhiledeleting,
+ x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj '
- 'This should not have been called' % (ts, ts)])
+ 'This should not have been called: ' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -363,9 +412,6 @@ class TestObjectExpirer(TestCase):
def delete_container(*a, **kw):
pass
- def delete_object(*a, **kw):
- raise Exception('This should not have been called')
-
def iter_objects(self, *a, **kw):
return self.objects
@@ -375,49 +421,48 @@ class TestObjectExpirer(TestCase):
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.iter_containers = lambda: [str(int(time() - 86400))]
ts = int(time() - 86400)
- x.delete_actual_object = deliberately_blow_up
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
+ x.iter_containers = lambda: [str(int(time() - 86400))]
+ x.delete_actual_object = deliberately_blow_up
+ x.pop_queue = should_not_get_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
+ error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(
- excswhiledeleting,
+ error_lines,
['Exception while deleting object %d %d-actual-obj '
- 'failed to delete actual object' % (ts, ts)])
+ 'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
# Reverse test to be sure it still would blow up the way expected.
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
ts = int(time() - 86400)
- x.delete_actual_object = lambda o, t: None
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
+ self.logger._clear()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
+ x.delete_actual_object = lambda o, t: None
+ x.pop_queue = should_not_get_called
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
self.assertEqual(
- excswhiledeleting,
+ self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj This should '
- 'not have been called' % (ts, ts)])
+ 'not have been called: ' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -437,23 +482,27 @@ class TestObjectExpirer(TestCase):
def iter_objects(self, *a, **kw):
return self.objects
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ fake_swift = InternalClient(
+ [{'name': str(int(time() - 86400))}],
+ [{'name': '%d-acc/c/actual-obj' % int(time() - 86400)}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
x.delete_actual_object = lambda o, t: None
+ x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
- x.swift = InternalClient(
- [{'name': str(int(time() - 86400))}],
- [{'name': '%d-actual-obj' % int(time() - 86400)}])
- x.run_once()
- self.assertEqual(x.report_objects, 1)
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 1 objects expired',), {})])
+ with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0):
+ x.run_once()
+ self.assertEqual(x.report_objects, 1)
+ self.assertEqual(
+ x.logger.get_lines_for_level('info'),
+ ['Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 1 objects expired'])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -479,24 +528,28 @@ class TestObjectExpirer(TestCase):
if isinstance(actual_obj, unicode):
got_unicode[0] = True
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
- x.delete_actual_object = delete_actual_object_test_for_unicode
- self.assertEqual(x.report_objects, 0)
- x.swift = InternalClient(
+ fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger,
+ swift=fake_swift)
+ x.delete_actual_object = delete_actual_object_test_for_unicode
+ x.pop_queue = lambda c, o: None
+ self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 1 objects expired',), {})])
+ x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 1 objects expired',
+ ])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
+
+ container_ring = None
+
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
@@ -519,8 +572,7 @@ class TestObjectExpirer(TestCase):
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer(self.conf, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
@@ -538,28 +590,24 @@ class TestObjectExpirer(TestCase):
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
- excswhiledeleting = []
- for exccall in x.logger.log_dict['exception']:
- if exccall[0][0].startswith('Exception while deleting '):
- excswhiledeleting.append(exccall[0][0])
- self.assertEqual(sorted(excswhiledeleting), sorted([
+ error_lines = x.logger.get_lines_for_level('error')
+ self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
- 'delete actual object' % (cts, ots),
+ 'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
- 'delete actual object' % (cts, ots),
+ 'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
- 'delete actual object' % (cts + 1, ots),
+ 'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
- 'delete actual object' % (cts + 1, ots),
+ 'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
- 'container' % (cts,),
+ 'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
- 'container' % (cts + 1,)]))
- self.assertEqual(
- x.logger.log_dict['info'],
- [(('Pass beginning; 1 possible containers; '
- '2 possible objects',), {}),
- (('Pass completed in 0s; 0 objects expired',), {})])
+ 'container: ' % (cts + 1,)]))
+ self.assertEqual(x.logger.get_lines_for_level('info'), [
+ 'Pass beginning; 1 possible containers; 2 possible objects',
+ 'Pass completed in 0s; 0 objects expired',
+ ])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
@@ -594,8 +642,7 @@ class TestObjectExpirer(TestCase):
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
- x = expirer.ObjectExpirer({})
- x.logger = FakeLogger()
+ x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
@@ -606,9 +653,11 @@ class TestObjectExpirer(TestCase):
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
- self.assertEqual(x.logger.log_dict['exception'],
- [(('Unhandled exception',), {},
- 'exception 1')])
+ self.assertEqual(x.logger.get_lines_for_level('error'),
+ ['Unhandled exception: '])
+ log_args, log_kwargs = x.logger.log_dict['error'][0]
+ self.assertEqual(str(log_kwargs['exc_info'][1]),
+ 'exception 1')
def test_delete_actual_object(self):
got_env = [None]
@@ -643,7 +692,7 @@ class TestObjectExpirer(TestCase):
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
- def test_delete_actual_object_handles_404(self):
+ def test_delete_actual_object_raises_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
@@ -652,7 +701,8 @@ class TestObjectExpirer(TestCase):
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
- x.delete_actual_object('/path/to/object', '1234')
+ self.assertRaises(internal_client.UnexpectedResponse,
+ x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_handles_412(self):
@@ -692,10 +742,31 @@ class TestObjectExpirer(TestCase):
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
- self.assertTrue(x.swift.make_request.called)
+ self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.quote(name))
+ def test_pop_queue(self):
+ class InternalClient(object):
+ container_ring = FakeRing()
+ x = expirer.ObjectExpirer({}, logger=self.logger,
+ swift=InternalClient())
+ requests = []
+
+ def capture_requests(ipaddr, port, method, path, *args, **kwargs):
+ requests.append((method, path))
+ with mocked_http_conn(
+ 200, 200, 200, give_connect=capture_requests) as fake_conn:
+ x.pop_queue('c', 'o')
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ for method, path in requests:
+ self.assertEqual(method, 'DELETE')
+ device, part, account, container, obj = utils.split_path(
+ path, 5, 5, True)
+ self.assertEqual(account, '.expiring_objects')
+ self.assertEqual(container, 'c')
+ self.assertEqual(obj, 'o')
+
if __name__ == '__main__':
main()
diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py
index 47f76dc..23ad0a1 100644
--- a/test/unit/proxy/controllers/test_account.py
+++ b/test/unit/proxy/controllers/test_account.py
@@ -20,18 +20,21 @@ from swift.common.swob import Request, Response
from swift.common.middleware.acl import format_acl
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_account_info
-from swift.common.constraints import MAX_ACCOUNT_NAME_LENGTH as MAX_ANAME_LEN
+from swift.common import constraints
from test.unit import fake_http_connect, FakeRing, FakeMemcache
+from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
import swift.proxy.controllers.base
+from test.unit import patch_policies
+
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
- self.app = proxy_server.Application(None, FakeMemcache(),
- account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ account_ring=FakeRing(), container_ring=FakeRing())
def test_account_info_in_response_env(self):
controller = proxy_server.AccountController(self.app, 'AUTH_bob')
@@ -79,7 +82,8 @@ class TestAccountController(unittest.TestCase):
self.assertEquals(410, resp.status_int)
def test_long_acct_names(self):
- long_acct_name = '%sLongAccountName' % ('Very' * (MAX_ANAME_LEN // 4))
+ long_acct_name = '%sLongAccountName' % (
+ 'Very' * (constraints.MAX_ACCOUNT_NAME_LENGTH // 4))
controller = proxy_server.AccountController(self.app, long_acct_name)
req = Request.blank('/v1/%s' % long_acct_name)
diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py
index 0c94f90..037e28b 100644
--- a/test/unit/proxy/controllers/test_base.py
+++ b/test/unit/proxy/controllers/test_base.py
@@ -13,87 +13,159 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import itertools
+from collections import defaultdict
import unittest
from mock import patch
from swift.proxy.controllers.base import headers_to_container_info, \
headers_to_account_info, headers_to_object_info, get_container_info, \
get_container_memcache_key, get_account_info, get_account_memcache_key, \
- get_object_env_key, _get_cache_key, get_info, get_object_info, \
- Controller, GetOrHeadHandler
-from swift.common.swob import Request, HTTPException, HeaderKeyDict
+ get_object_env_key, get_info, get_object_info, \
+ Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache, \
+ bytes_to_skip
+from swift.common.swob import Request, HTTPException, HeaderKeyDict, \
+ RESPONSE_REASONS
+from swift.common import exceptions
from swift.common.utils import split_path
+from swift.common.http import is_success
+from swift.common.storage_policy import StoragePolicy
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.proxy import server as proxy_server
from swift.common.request_helpers import get_sys_meta_prefix
-
-FakeResponse_status_int = 201
+from test.unit import patch_policies
class FakeResponse(object):
- def __init__(self, headers, env, account, container, obj):
- self.headers = headers
- self.status_int = FakeResponse_status_int
- self.environ = env
- if obj:
- env_key = get_object_env_key(account, container, obj)
- else:
- cache_key, env_key = _get_cache_key(account, container)
- if account and container and obj:
- info = headers_to_object_info(headers, FakeResponse_status_int)
- elif account and container:
- info = headers_to_container_info(headers, FakeResponse_status_int)
- else:
- info = headers_to_account_info(headers, FakeResponse_status_int)
- env[env_key] = info
+ base_headers = {}
+ def __init__(self, status_int=200, headers=None, body=''):
+ self.status_int = status_int
+ self._headers = headers or {}
+ self.body = body
+
+ @property
+ def headers(self):
+ if is_success(self.status_int):
+ self._headers.update(self.base_headers)
+ return self._headers
-class FakeRequest(object):
- def __init__(self, env, path, swift_source=None):
- self.environ = env
- (version, account, container, obj) = split_path(path, 2, 4, True)
- self.account = account
- self.container = container
- self.obj = obj
- if obj:
- stype = 'object'
- self.headers = {'content-length': 5555,
- 'content-type': 'text/plain'}
- else:
- stype = container and 'container' or 'account'
- self.headers = {'x-%s-object-count' % (stype): 1000,
- 'x-%s-bytes-used' % (stype): 6666}
- if swift_source:
- meta = 'x-%s-meta-fakerequest-swift-source' % stype
- self.headers[meta] = swift_source
- def get_response(self, app):
- return FakeResponse(self.headers, self.environ, self.account,
- self.container, self.obj)
+class AccountResponse(FakeResponse):
+ base_headers = {
+ 'x-account-container-count': 333,
+ 'x-account-object-count': 1000,
+ 'x-account-bytes-used': 6666,
+ }
-class FakeCache(object):
- def __init__(self, val):
- self.val = val
- def get(self, *args):
- return self.val
+class ContainerResponse(FakeResponse):
+ base_headers = {
+ 'x-container-object-count': 1000,
+ 'x-container-bytes-used': 6666,
+ }
+
+class ObjectResponse(FakeResponse):
+
+ base_headers = {
+ 'content-length': 5555,
+ 'content-type': 'text/plain'
+ }
+
+
+class DynamicResponseFactory(object):
+
+ def __init__(self, *statuses):
+ if statuses:
+ self.statuses = iter(statuses)
+ else:
+ self.statuses = itertools.repeat(200)
+ self.stats = defaultdict(int)
+
+ response_type = {
+ 'obj': ObjectResponse,
+ 'container': ContainerResponse,
+ 'account': AccountResponse,
+ }
+
+ def _get_response(self, type_):
+ self.stats[type_] += 1
+ class_ = self.response_type[type_]
+ return class_(self.statuses.next())
+
+ def get_response(self, environ):
+ (version, account, container, obj) = split_path(
+ environ['PATH_INFO'], 2, 4, True)
+ if obj:
+ resp = self._get_response('obj')
+ elif container:
+ resp = self._get_response('container')
+ else:
+ resp = self._get_response('account')
+ resp.account = account
+ resp.container = container
+ resp.obj = obj
+ return resp
+
+
+class FakeApp(object):
+
+ recheck_container_existence = 30
+ recheck_account_existence = 30
+
+ def __init__(self, response_factory=None, statuses=None):
+ self.responses = response_factory or \
+ DynamicResponseFactory(*statuses or [])
+ self.sources = []
+
+ def __call__(self, environ, start_response):
+ self.sources.append(environ.get('swift.source'))
+ response = self.responses.get_response(environ)
+ reason = RESPONSE_REASONS[response.status_int][0]
+ start_response('%d %s' % (response.status_int, reason),
+ [(k, v) for k, v in response.headers.items()])
+ # It's a bit strnage, but the get_info cache stuff relies on the
+ # app setting some keys in the environment as it makes requests
+ # (in particular GETorHEAD_base) - so our fake does the same
+ _set_info_cache(self, environ, response.account,
+ response.container, response)
+ if response.obj:
+ _set_object_info_cache(self, environ, response.account,
+ response.container, response.obj,
+ response)
+ return iter(response.body)
+
+
+class FakeCache(FakeMemcache):
+ def __init__(self, stub=None, **pre_cached):
+ super(FakeCache, self).__init__()
+ if pre_cached:
+ self.store.update(pre_cached)
+ self.stub = stub
+
+ def get(self, key):
+ return self.stub or self.store.get(key)
+
+
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestFuncs(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing)
+ container_ring=FakeRing())
def test_GETorHEAD_base(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o/with/slashes')
+ ring = FakeRing()
+ nodes = list(ring.get_part_nodes(0)) + list(ring.get_more_nodes(0))
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o/with/slashes')
self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ)
self.assertEqual(
@@ -101,14 +173,14 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a/c/o')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o')
self.assertTrue('swift.object/a/c/o' in resp.environ)
self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200)
req = Request.blank('/v1/a/c')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'container', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'container', iter(nodes), 'part',
'/a/c')
self.assertTrue('swift.container/a/c' in resp.environ)
self.assertEqual(resp.environ['swift.container/a/c']['status'], 200)
@@ -116,150 +188,166 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
- resp = base.GETorHEAD_base(req, 'account', FakeRing(), 'part',
+ resp = base.GETorHEAD_base(req, 'account', iter(nodes), 'part',
'/a')
self.assertTrue('swift.account/a' in resp.environ)
self.assertEqual(resp.environ['swift.account/a']['status'], 200)
def test_get_info(self):
- global FakeResponse_status_int
+ app = FakeApp()
# Do a non cached call to account
env = {}
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- info_a = get_info(None, env, 'a')
+ info_a = get_info(app, env, 'a')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_a['status'], 200)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # Make sure the app was called
+ self.assertEqual(app.responses.stats['account'], 1)
# Do an env cached call to account
- info_a = get_info(None, env, 'a')
+ info_a = get_info(app, env, 'a')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_a['status'], 200)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # Make sure the app was NOT called AGAIN
+ self.assertEqual(app.responses.stats['account'], 1)
# This time do env cached call to account and non cached to container
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- info_c = get_info(None, env, 'a', 'c')
+ info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_c['status'], 200)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
self.assertEquals(env.get('swift.container/a/c'), info_c)
+ # Make sure the app was called for container
+ self.assertEqual(app.responses.stats['container'], 1)
# This time do a non cached call to account than non cached to
# container
+ app = FakeApp()
env = {} # abandon previous call to env
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- info_c = get_info(None, env, 'a', 'c')
+ info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_c['status'], 200)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
self.assertEquals(env.get('swift.container/a/c'), info_c)
+ # check app calls both account and container
+ self.assertEqual(app.responses.stats['account'], 1)
+ self.assertEqual(app.responses.stats['container'], 1)
# This time do an env cached call to container while account is not
# cached
del(env['swift.account/a'])
- info_c = get_info(None, env, 'a', 'c')
+ info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
- self.assertEquals(info_a['status'], 201)
+ self.assertEquals(info_a['status'], 200)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set and account still not cached
self.assertEquals(env.get('swift.container/a/c'), info_c)
+ # no additional calls were made
+ self.assertEqual(app.responses.stats['account'], 1)
+ self.assertEqual(app.responses.stats['container'], 1)
# Do a non cached call to account not found with ret_not_found
+ app = FakeApp(statuses=(404,))
env = {}
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- try:
- FakeResponse_status_int = 404
- info_a = get_info(None, env, 'a', ret_not_found=True)
- finally:
- FakeResponse_status_int = 201
+ info_a = get_info(app, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEquals(info_a['status'], 404)
- self.assertEquals(info_a['bytes'], 6666)
- self.assertEquals(info_a['total_object_count'], 1000)
+ self.assertEquals(info_a['bytes'], None)
+ self.assertEquals(info_a['total_object_count'], None)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # and account was called
+ self.assertEqual(app.responses.stats['account'], 1)
# Do a cached call to account not found with ret_not_found
- info_a = get_info(None, env, 'a', ret_not_found=True)
+ info_a = get_info(app, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEquals(info_a['status'], 404)
- self.assertEquals(info_a['bytes'], 6666)
- self.assertEquals(info_a['total_object_count'], 1000)
+ self.assertEquals(info_a['bytes'], None)
+ self.assertEquals(info_a['total_object_count'], None)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
+ # add account was NOT called AGAIN
+ self.assertEqual(app.responses.stats['account'], 1)
# Do a non cached call to account not found without ret_not_found
+ app = FakeApp(statuses=(404,))
env = {}
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- try:
- FakeResponse_status_int = 404
- info_a = get_info(None, env, 'a')
- finally:
- FakeResponse_status_int = 201
+ info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEquals(info_a, None)
self.assertEquals(env['swift.account/a']['status'], 404)
+ # and account was called
+ self.assertEqual(app.responses.stats['account'], 1)
# Do a cached call to account not found without ret_not_found
info_a = get_info(None, env, 'a')
# Check that you got proper info
self.assertEquals(info_a, None)
self.assertEquals(env['swift.account/a']['status'], 404)
+ # add account was NOT called AGAIN
+ self.assertEqual(app.responses.stats['account'], 1)
def test_get_container_info_swift_source(self):
- req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_container_info(req.environ, 'app', swift_source='MC')
- self.assertEquals(resp['meta']['fakerequest-swift-source'], 'MC')
+ app = FakeApp()
+ req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache()})
+ get_container_info(req.environ, app, swift_source='MC')
+ self.assertEqual(app.sources, ['GET_INFO', 'MC'])
def test_get_object_info_swift_source(self):
+ app = FakeApp()
req = Request.blank("/v1/a/c/o",
- environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_object_info(req.environ, 'app', swift_source='LU')
- self.assertEquals(resp['meta']['fakerequest-swift-source'], 'LU')
+ environ={'swift.cache': FakeCache()})
+ get_object_info(req.environ, app, swift_source='LU')
+ self.assertEqual(app.sources, ['LU'])
def test_get_container_info_no_cache(self):
req = Request.blank("/v1/AUTH_account/cont",
environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_container_info(req.environ, 'xxx')
+ resp = get_container_info(req.environ, FakeApp())
+ self.assertEquals(resp['storage_policy'], '0')
self.assertEquals(resp['bytes'], 6666)
self.assertEquals(resp['object_count'], 1000)
+ def test_get_container_info_no_account(self):
+ responses = DynamicResponseFactory(404, 200)
+ app = FakeApp(responses)
+ req = Request.blank("/v1/AUTH_does_not_exist/cont")
+ info = get_container_info(req.environ, app)
+ self.assertEqual(info['status'], 0)
+
+ def test_get_container_info_no_auto_account(self):
+ responses = DynamicResponseFactory(404, 200)
+ app = FakeApp(responses)
+ req = Request.blank("/v1/.system_account/cont")
+ info = get_container_info(req.environ, app)
+ self.assertEqual(info['status'], 200)
+ self.assertEquals(info['bytes'], 6666)
+ self.assertEquals(info['object_count'], 1000)
+
def test_get_container_info_cache(self):
- cached = {'status': 404,
- 'bytes': 3333,
- 'object_count': 10,
- # simplejson sometimes hands back strings, sometimes unicodes
- 'versions': u"\u1F4A9"}
+ cache_stub = {
+ 'status': 404, 'bytes': 3333, 'object_count': 10,
+ # simplejson sometimes hands back strings, sometimes unicodes
+ 'versions': u"\u1F4A9"}
req = Request.blank("/v1/account/cont",
- environ={'swift.cache': FakeCache(cached)})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_container_info(req.environ, 'xxx')
+ environ={'swift.cache': FakeCache(cache_stub)})
+ resp = get_container_info(req.environ, FakeApp())
+ self.assertEquals(resp['storage_policy'], '0')
self.assertEquals(resp['bytes'], 3333)
self.assertEquals(resp['object_count'], 10)
self.assertEquals(resp['status'], 404)
@@ -275,18 +363,16 @@ class TestFuncs(unittest.TestCase):
self.assertEquals(resp['bytes'], 3867)
def test_get_account_info_swift_source(self):
- req = Request.blank("/v1/a", environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'a', swift_source='MC')
- self.assertEquals(resp['meta']['fakerequest-swift-source'], 'MC')
+ app = FakeApp()
+ req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()})
+ get_account_info(req.environ, app, swift_source='MC')
+ self.assertEqual(app.sources, ['MC'])
def test_get_account_info_no_cache(self):
+ app = FakeApp()
req = Request.blank("/v1/AUTH_account",
environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'xxx')
+ resp = get_account_info(req.environ, app)
self.assertEquals(resp['bytes'], 6666)
self.assertEquals(resp['total_object_count'], 1000)
@@ -297,9 +383,7 @@ class TestFuncs(unittest.TestCase):
'total_object_count': 10}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'xxx')
+ resp = get_account_info(req.environ, FakeApp())
self.assertEquals(resp['bytes'], 3333)
self.assertEquals(resp['total_object_count'], 10)
self.assertEquals(resp['status'], 404)
@@ -312,9 +396,7 @@ class TestFuncs(unittest.TestCase):
'meta': {}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_account_info(req.environ, 'xxx')
+ resp = get_account_info(req.environ, FakeApp())
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['bytes'], '3333')
self.assertEquals(resp['container_count'], 234)
@@ -344,11 +426,13 @@ class TestFuncs(unittest.TestCase):
self.assertEquals(resp['type'], 'application/json')
def test_get_object_info_no_env(self):
+ app = FakeApp()
req = Request.blank("/v1/account/cont/obj",
environ={'swift.cache': FakeCache({})})
- with patch('swift.proxy.controllers.base.'
- '_prepare_pre_auth_info_request', FakeRequest):
- resp = get_object_info(req.environ, 'xxx')
+ resp = get_object_info(req.environ, app)
+ self.assertEqual(app.responses.stats['account'], 0)
+ self.assertEqual(app.responses.stats['container'], 0)
+ self.assertEqual(app.responses.stats['obj'], 1)
self.assertEquals(resp['length'], 5555)
self.assertEquals(resp['type'], 'text/plain')
@@ -443,6 +527,15 @@ class TestFuncs(unittest.TestCase):
self.assertEquals(resp['meta']['whatevs'], 14)
self.assertEquals(resp['meta']['somethingelse'], 0)
+ def test_headers_to_object_info_sys_meta(self):
+ prefix = get_sys_meta_prefix('object')
+ headers = {'%sWhatevs' % prefix: 14,
+ '%ssomethingelse' % prefix: 0}
+ resp = headers_to_object_info(headers.items(), 200)
+ self.assertEquals(len(resp['sysmeta']), 2)
+ self.assertEquals(resp['sysmeta']['whatevs'], 14)
+ self.assertEquals(resp['sysmeta']['somethingelse'], 0)
+
def test_headers_to_object_info_values(self):
headers = {
'content-length': '1024',
@@ -457,7 +550,7 @@ class TestFuncs(unittest.TestCase):
resp,
headers_to_object_info(headers.items(), 200))
- def test_have_quorum(self):
+ def test_base_have_quorum(self):
base = Controller(self.app)
# just throw a bunch of test cases at it
self.assertEqual(base.have_quorum([201, 404], 3), False)
@@ -470,6 +563,32 @@ class TestFuncs(unittest.TestCase):
self.assertEqual(base.have_quorum([404, 404], 2), True)
self.assertEqual(base.have_quorum([201, 404, 201, 201], 4), True)
+ def test_best_response_overrides(self):
+ base = Controller(self.app)
+ responses = [
+ (302, 'Found', '', 'The resource has moved temporarily.'),
+ (100, 'Continue', '', ''),
+ (404, 'Not Found', '', 'Custom body'),
+ ]
+ server_type = "Base DELETE"
+ req = Request.blank('/v1/a/c/o', method='DELETE')
+ statuses, reasons, headers, bodies = zip(*responses)
+
+ # First test that you can't make a quorum with only overridden
+ # responses
+ overrides = {302: 204, 100: 204}
+ resp = base.best_response(req, statuses, reasons, bodies, server_type,
+ headers=headers, overrides=overrides)
+ self.assertEqual(resp.status, '503 Internal Server Error')
+
+ # next make a 404 quorum and make sure the last delete (real) 404
+ # status is the one returned.
+ overrides = {100: 404}
+ resp = base.best_response(req, statuses, reasons, bodies, server_type,
+ headers=headers, overrides=overrides)
+ self.assertEqual(resp.status, '404 Not Found')
+ self.assertEqual(resp.body, 'Custom body')
+
def test_range_fast_forward(self):
req = Request.blank('/')
handler = GetOrHeadHandler(None, req, None, None, None, None, {})
@@ -512,7 +631,8 @@ class TestFuncs(unittest.TestCase):
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
expected_headers = {'x-base-meta-owner': '',
- 'x-base-meta-size': '151M'}
+ 'x-base-meta-size': '151M',
+ 'connection': 'close'}
for k, v in expected_headers.iteritems():
self.assertTrue(k in dst_headers)
self.assertEqual(v, dst_headers[k])
@@ -532,3 +652,88 @@ class TestFuncs(unittest.TestCase):
self.assertEqual(v, dst_headers[k.lower()])
for k, v in bad_hdrs.iteritems():
self.assertFalse(k.lower() in dst_headers)
+
+ def test_client_chunk_size(self):
+
+ class TestSource(object):
+ def __init__(self, chunks):
+ self.chunks = list(chunks)
+
+ def read(self, _read_size):
+ if self.chunks:
+ return self.chunks.pop(0)
+ else:
+ return ''
+
+ source = TestSource((
+ 'abcd', '1234', 'abc', 'd1', '234abcd1234abcd1', '2'))
+ req = Request.blank('/v1/a/c/o')
+ node = {}
+ handler = GetOrHeadHandler(self.app, req, None, None, None, None, {},
+ client_chunk_size=8)
+
+ app_iter = handler._make_app_iter(req, node, source)
+ client_chunks = list(app_iter)
+ self.assertEqual(client_chunks, [
+ 'abcd1234', 'abcd1234', 'abcd1234', 'abcd12'])
+
+ def test_client_chunk_size_resuming(self):
+
+ class TestSource(object):
+ def __init__(self, chunks):
+ self.chunks = list(chunks)
+
+ def read(self, _read_size):
+ if self.chunks:
+ chunk = self.chunks.pop(0)
+ if chunk is None:
+ raise exceptions.ChunkReadTimeout()
+ else:
+ return chunk
+ else:
+ return ''
+
+ node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'}
+
+ source1 = TestSource(['abcd', '1234', 'abc', None])
+ source2 = TestSource(['efgh5678'])
+ req = Request.blank('/v1/a/c/o')
+ handler = GetOrHeadHandler(
+ self.app, req, 'Object', None, None, None, {},
+ client_chunk_size=8)
+
+ app_iter = handler._make_app_iter(req, node, source1)
+ with patch.object(handler, '_get_source_and_node',
+ lambda: (source2, node)):
+ client_chunks = list(app_iter)
+ self.assertEqual(client_chunks, ['abcd1234', 'efgh5678'])
+ self.assertEqual(handler.backend_headers['Range'], 'bytes=8-')
+
+ def test_bytes_to_skip(self):
+ # if you start at the beginning, skip nothing
+ self.assertEqual(bytes_to_skip(1024, 0), 0)
+
+ # missed the first 10 bytes, so we've got 1014 bytes of partial
+ # record
+ self.assertEqual(bytes_to_skip(1024, 10), 1014)
+
+ # skipped some whole records first
+ self.assertEqual(bytes_to_skip(1024, 4106), 1014)
+
+ # landed on a record boundary
+ self.assertEqual(bytes_to_skip(1024, 1024), 0)
+ self.assertEqual(bytes_to_skip(1024, 2048), 0)
+
+ # big numbers
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32), 0)
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 1), 2 ** 20 - 1)
+ self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 2 ** 19), 2 ** 19)
+
+ # odd numbers
+ self.assertEqual(bytes_to_skip(123, 0), 0)
+ self.assertEqual(bytes_to_skip(123, 23), 100)
+ self.assertEqual(bytes_to_skip(123, 247), 122)
+
+ # prime numbers
+ self.assertEqual(bytes_to_skip(11, 7), 4)
+ self.assertEqual(bytes_to_skip(97, 7873823), 55)
diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py
index 7c8ecf7..715cd94 100644
--- a/test/unit/proxy/controllers/test_container.py
+++ b/test/unit/proxy/controllers/test_container.py
@@ -16,19 +16,61 @@
import mock
import unittest
+from eventlet import Timeout
+
from swift.common.swob import Request
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info
from test.unit import fake_http_connect, FakeRing, FakeMemcache
+from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
+from swift.common import utils
+
+from test.unit import patch_policies, mocked_http_conn, debug_logger
+from test.unit.proxy.test_server import node_error_count
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerController(unittest.TestCase):
def setUp(self):
+ # SOF
+ self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
+ self._orig_hash_prefix = utils.HASH_PATH_PREFIX
+ utils.HASH_PATH_SUFFIX = 'endcap'
+ utils.HASH_PATH_PREFIX = ''
+ self.logger = debug_logger()
+ self.container_ring = FakeRing(max_more_nodes=9)
self.app = proxy_server.Application(None, FakeMemcache(),
+ logger=self.logger,
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=self.container_ring)
+
+ self.account_info = {
+ 'status': 200,
+ 'container_count': '10',
+ 'total_object_count': '100',
+ 'bytes': '1000',
+ 'meta': {},
+ 'sysmeta': {},
+ }
+
+ class FakeAccountInfoContainerController(
+ proxy_server.ContainerController):
+
+ def account_info(controller, *args, **kwargs):
+ patch_path = 'swift.proxy.controllers.base.get_info'
+ with mock.patch(patch_path) as mock_get_info:
+ mock_get_info.return_value = dict(self.account_info)
+ return super(FakeAccountInfoContainerController,
+ controller).account_info(
+ *args, **kwargs)
+ _orig_get_controller = self.app.get_controller
+
+ def wrapped_get_controller(*args, **kwargs):
+ with mock.patch('swift.proxy.server.ContainerController',
+ new=FakeAccountInfoContainerController):
+ return _orig_get_controller(*args, **kwargs)
+ self.app.get_controller = wrapped_get_controller
def test_container_info_in_response_env(self):
controller = proxy_server.ContainerController(self.app, 'a', 'c')
@@ -118,6 +160,53 @@ class TestContainerController(unittest.TestCase):
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
+ def test_node_errors(self):
+ self.app.sort_nodes = lambda n: n
+
+ for method in ('PUT', 'DELETE', 'POST'):
+ def test_status_map(statuses, expected):
+ self.app._error_limiting = {}
+ req = Request.blank('/v1/a/c', method=method)
+ with mocked_http_conn(*statuses) as fake_conn:
+ print 'a' * 50
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, expected)
+ for req in fake_conn.requests:
+ self.assertEqual(req['method'], method)
+ self.assert_(req['path'].endswith('/a/c'))
+
+ base_status = [201] * 3
+ # test happy path
+ test_status_map(list(base_status), 201)
+ for i in range(3):
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[i]), 0)
+ # single node errors and test isolation
+ for i in range(3):
+ status_list = list(base_status)
+ status_list[i] = 503
+ status_list.append(201)
+ test_status_map(status_list, 201)
+ for j in range(3):
+ expected = 1 if j == i else 0
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[j]), expected)
+ # timeout
+ test_status_map((201, Timeout(), 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[1]), 1)
+
+ # exception
+ test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[0]), 1)
+
+ # insufficient storage
+ test_status_map((201, 201, 507, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, self.container_ring.devs[2]),
+ self.app.error_suppression_limit + 1)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index 4942691..a38e753 100755
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -14,185 +14,1397 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import email.parser
+import itertools
+import random
+import time
import unittest
+from collections import defaultdict
from contextlib import contextmanager
+import json
+from hashlib import md5
import mock
+from eventlet import Timeout
import swift
+from swift.common import utils, swob
from swift.proxy import server as proxy_server
-from swift.common.swob import HTTPException
-from test.unit import FakeRing, FakeMemcache, fake_http_connect, debug_logger
+from swift.proxy.controllers import obj
+from swift.proxy.controllers.base import get_info as _real_get_info
+from swift.common.storage_policy import POLICIES, ECDriverError
+
+from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
+ debug_logger, patch_policies, SlowBody
+from test.unit.proxy.test_server import node_error_count
+
+
+def unchunk_body(chunked_body):
+ body = ''
+ remaining = chunked_body
+ while remaining:
+ hex_length, remaining = remaining.split('\r\n', 1)
+ length = int(hex_length, 16)
+ body += remaining[:length]
+ remaining = remaining[length + 2:]
+ return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
- swift.proxy.controllers.base.http_connect = new_connect
- swift.proxy.controllers.obj.http_connect = new_connect
- swift.proxy.controllers.account.http_connect = new_connect
- swift.proxy.controllers.container.http_connect = new_connect
- yield new_connect
- swift.proxy.controllers.base.http_connect = old_connect
- swift.proxy.controllers.obj.http_connect = old_connect
- swift.proxy.controllers.account.http_connect = old_connect
- swift.proxy.controllers.container.http_connect = old_connect
-
-
-class TestObjControllerWriteAffinity(unittest.TestCase):
+ try:
+ swift.proxy.controllers.base.http_connect = new_connect
+ swift.proxy.controllers.obj.http_connect = new_connect
+ swift.proxy.controllers.account.http_connect = new_connect
+ swift.proxy.controllers.container.http_connect = new_connect
+ yield new_connect
+ left_over_status = list(new_connect.code_iter)
+ if left_over_status:
+ raise AssertionError('left over status %r' % left_over_status)
+ finally:
+ swift.proxy.controllers.base.http_connect = old_connect
+ swift.proxy.controllers.obj.http_connect = old_connect
+ swift.proxy.controllers.account.http_connect = old_connect
+ swift.proxy.controllers.container.http_connect = old_connect
+
+
+class PatchedObjControllerApp(proxy_server.Application):
+ """
+ This patch is just a hook over the proxy server's __call__ to ensure
+ that calls to get_info will return the stubbed value for
+ container_info if it's a container info call.
+ """
+
+ container_info = {}
+ per_container_info = {}
+
+ def __call__(self, *args, **kwargs):
+
+ def _fake_get_info(app, env, account, container=None, **kwargs):
+ if container:
+ if container in self.per_container_info:
+ return self.per_container_info[container]
+ return self.container_info
+ else:
+ return _real_get_info(app, env, account, container, **kwargs)
+
+ mock_path = 'swift.proxy.controllers.base.get_info'
+ with mock.patch(mock_path, new=_fake_get_info):
+ return super(
+ PatchedObjControllerApp, self).__call__(*args, **kwargs)
+
+
+class BaseObjectControllerMixin(object):
+ container_info = {
+ 'write_acl': None,
+ 'read_acl': None,
+ 'storage_policy': None,
+ 'sync_key': None,
+ 'versions': None,
+ }
+
+ # this needs to be set on the test case
+ controller_cls = None
+
def setUp(self):
- self.app = proxy_server.Application(
+ # setup fake rings with handoffs
+ for policy in POLICIES:
+ policy.object_ring.max_more_nodes = policy.object_ring.replicas
+
+ self.logger = debug_logger('proxy-server')
+ self.logger.thread_locals = ('txn1', '127.0.0.2')
+ self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing(max_more_nodes=9))
- self.app.request_node_count = lambda replicas: 10000000
- self.app.sort_nodes = lambda l: l # stop shuffling the primary nodes
+ container_ring=FakeRing(), logger=self.logger)
+ # you can over-ride the container_info just by setting it on the app
+ self.app.container_info = dict(self.container_info)
+ # default policy and ring references
+ self.policy = POLICIES.default
+ self.obj_ring = self.policy.object_ring
+ self._ts_iter = (utils.Timestamp(t) for t in
+ itertools.count(int(time.time())))
+
+ def ts(self):
+ return self._ts_iter.next()
+
+ def replicas(self, policy=None):
+ policy = policy or POLICIES.default
+ return policy.object_ring.replicas
+
+ def quorum(self, policy=None):
+ policy = policy or POLICIES.default
+ return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ # this test needs a stable node order - most don't
+ self.app.sort_nodes = lambda l: l
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = None
-
- all_nodes = self.app.object_ring.get_part_nodes(1)
- all_nodes.extend(self.app.object_ring.get_more_nodes(1))
+ object_ring = self.app.get_object_ring(None)
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
- self.app.object_ring, 1))
+ object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
- self.app.write_affinity_node_count = lambda ring: 4
+ # we'll write to one more than replica count local nodes
+ self.app.write_affinity_node_count = lambda r: r + 1
+
+ object_ring = self.app.get_object_ring(None)
+ # make our fake ring have plenty of nodes, and not get limited
+ # artificially by the proxy max request node count
+ object_ring.max_more_nodes = 100000
+ self.app.request_node_count = lambda r: 100000
- all_nodes = self.app.object_ring.get_part_nodes(1)
- all_nodes.extend(self.app.object_ring.get_more_nodes(1))
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
+ # i guess fake_ring wants the get_more_nodes iter to more safely be
+ # converted to a list with a smallish sort of limit which *can* be
+ # lower than max_more_nodes
+ fake_rings_real_max_more_nodes_value = object_ring.replicas ** 2
+ self.assertEqual(len(all_nodes), fake_rings_real_max_more_nodes_value)
+
+ # make sure we have enough local nodes (sanity)
+ all_local_nodes = [n for n in all_nodes if
+ self.app.write_affinity_is_local_fn(n)]
+ self.assertTrue(len(all_local_nodes) >= self.replicas() + 1)
+
+ # finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
- self.app.object_ring, 1))
+ object_ring, 1))
# the local nodes move up in the ordering
- self.assertEqual([1, 1, 1, 1],
- [node['region'] for node in local_first_nodes[:4]])
+ self.assertEqual([1] * (self.replicas() + 1), [
+ node['region'] for node in local_first_nodes[
+ :self.replicas() + 1]])
# we don't skip any nodes
+ self.assertEqual(len(all_nodes), len(local_first_nodes))
+ self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
+
+ def test_iter_nodes_local_first_best_effort(self):
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+ self.app.write_affinity_is_local_fn = (
+ lambda node: node['region'] == 1)
+
+ object_ring = self.app.get_object_ring(None)
+ all_nodes = object_ring.get_part_nodes(1)
+ all_nodes.extend(object_ring.get_more_nodes(1))
+
+ local_first_nodes = list(controller.iter_nodes_local_first(
+ object_ring, 1))
+
+ # we won't have quite enough local nodes...
+ self.assertEqual(len(all_nodes), self.replicas() +
+ POLICIES.default.object_ring.max_more_nodes)
+ all_local_nodes = [n for n in all_nodes if
+ self.app.write_affinity_is_local_fn(n)]
+ self.assertEqual(len(all_local_nodes), self.replicas())
+ # but the local nodes we do have are at the front of the local iter
+ first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
+ self.assertEqual(sorted(all_local_nodes),
+ sorted(first_n_local_first_nodes))
+ # but we *still* don't *skip* any nodes
+ self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_connect_put_node_timeout(self):
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
- self.app.conn_timeout = 0.1
- with set_http_connect(200, slow_connect=True):
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+ self.app.conn_timeout = 0.05
+ with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', '', {}, ('', ''))
self.assertTrue(res is None)
+ def test_DELETE_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [204] * self.replicas()
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_missing_one(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [404] + [204] * (self.replicas() - 1)
+ random.shuffle(codes)
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
-class TestObjController(unittest.TestCase):
- def setUp(self):
- logger = debug_logger('proxy-server')
- logger.thread_locals = ('txn1', '127.0.0.2')
- self.app = proxy_server.Application(
- None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing(),
- logger=logger)
- self.controller = proxy_server.ObjectController(self.app,
- 'a', 'c', 'o')
- self.controller.container_info = mock.MagicMock(return_value={
- 'partition': 1,
- 'nodes': [
- {'ip': '127.0.0.1', 'port': '1', 'device': 'sda'},
- {'ip': '127.0.0.1', 'port': '2', 'device': 'sda'},
- {'ip': '127.0.0.1', 'port': '3', 'device': 'sda'},
- ],
- 'write_acl': None,
- 'read_acl': None,
- 'sync_key': None,
- 'versions': None})
+ def test_DELETE_not_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [404] * (self.replicas() - 1) + [204]
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_DELETE_mostly_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ mostly_204s = [204] * self.quorum()
+ codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
+ self.assertEqual(len(codes), self.replicas())
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_mostly_not_found(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ mostly_404s = [404] * self.quorum()
+ codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
+ self.assertEqual(len(codes), self.replicas())
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_DELETE_half_not_found_statuses(self):
+ self.obj_ring.set_replicas(4)
+
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ with set_http_connect(404, 204, 404, 204):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_DELETE_half_not_found_headers_and_body(self):
+ # Transformed responses have bogus bodies and headers, so make sure we
+ # send the client headers and body from a real node's response.
+ self.obj_ring.set_replicas(4)
+
+ status_codes = (404, 404, 204, 204)
+ bodies = ('not found', 'not found', '', '')
+ headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
+
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ with set_http_connect(*status_codes, body_iter=bodies,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+ self.assertEquals(resp.headers.get('Pick-Me'), 'yes')
+ self.assertEquals(resp.body, '')
+
+ def test_DELETE_handoff(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
+ codes = [204] * self.replicas()
+ with set_http_connect(507, *codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 204)
+
+ def test_POST_non_int_delete_after(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-After', resp.body)
+
+ def test_PUT_non_int_delete_after(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-After', resp.body)
+
+ def test_POST_negative_delete_after(self):
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '-60'})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-After in past', resp.body)
+
+ def test_PUT_negative_delete_after(self):
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '-60'})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-After in past', resp.body)
+
+ def test_POST_delete_at_non_integer(self):
+ t = str(int(time.time() + 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-At', resp.body)
+
+ def test_PUT_delete_at_non_integer(self):
+ t = str(int(time.time() - 100)) + '.1'
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('Non-integer X-Delete-At', resp.body)
+
+ def test_POST_delete_at_in_past(self):
+ t = str(int(time.time() - 100))
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-At in past', resp.body)
+
+ def test_PUT_delete_at_in_past(self):
+ t = str(int(time.time() - 100))
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual('X-Delete-At in past', resp.body)
+
+ def test_HEAD_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
+ with set_http_connect(200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_HEAD_x_newest(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ with set_http_connect(200, 200, 200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_HEAD_x_newest_different_timestamps(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ timestamps = [next(ts) for i in range(3)]
+ newest_timestamp = timestamps[-1]
+ random.shuffle(timestamps)
+ backend_response_headers = [{
+ 'X-Backend-Timestamp': t.internal,
+ 'X-Timestamp': t.normal
+ } for t in timestamps]
+ with set_http_connect(200, 200, 200,
+ headers=backend_response_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
+
+ def test_HEAD_x_newest_with_two_vector_timestamps(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(time.time(), offset=offset)
+ for offset in itertools.count())
+ timestamps = [next(ts) for i in range(3)]
+ newest_timestamp = timestamps[-1]
+ random.shuffle(timestamps)
+ backend_response_headers = [{
+ 'X-Backend-Timestamp': t.internal,
+ 'X-Timestamp': t.normal
+ } for t in timestamps]
+ with set_http_connect(200, 200, 200,
+ headers=backend_response_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['x-backend-timestamp'],
+ newest_timestamp.internal)
+
+ def test_HEAD_x_newest_with_some_missing(self):
+ req = swob.Request.blank('/v1/a/c/o', method='HEAD',
+ headers={'X-Newest': 'true'})
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ request_count = self.app.request_node_count(self.obj_ring.replicas)
+ backend_response_headers = [{
+ 'x-timestamp': next(ts).normal,
+ } for i in range(request_count)]
+ responses = [404] * (request_count - 1)
+ responses.append(200)
+ request_log = []
+
+ def capture_requests(ip, port, device, part, method, path,
+ headers=None, **kwargs):
+ req = {
+ 'ip': ip,
+ 'port': port,
+ 'device': device,
+ 'part': part,
+ 'method': method,
+ 'path': path,
+ 'headers': headers,
+ }
+ request_log.append(req)
+ with set_http_connect(*responses,
+ headers=backend_response_headers,
+ give_connect=capture_requests):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ for req in request_log:
+ self.assertEqual(req['method'], 'HEAD')
+ self.assertEqual(req['path'], '/a/c/o')
+
+ def test_container_sync_delete(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='DELETE', headers={
+ 'X-Timestamp': ts.next().internal})
+ codes = [409] * self.obj_ring.replicas
+ ts_iter = itertools.repeat(ts.next().internal)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_PUT_requires_length(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 411)
+
+# end of BaseObjectControllerMixin
+
+
+@patch_policies()
+class TestReplicatedObjController(BaseObjectControllerMixin,
+ unittest.TestCase):
+
+ controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
- resp = self.controller.PUT(req)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_PUT_if_none_match(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
- resp = self.controller.PUT(req)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
- with set_http_connect(201, (412, 412), 201):
- resp = self.controller.PUT(req)
+ with set_http_connect(201, 412, 201):
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
- with set_http_connect(201, 201, 201):
- resp = self.controller.PUT(req)
+ with set_http_connect():
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
+ def test_PUT_connect_exceptions(self):
+ object_ring = self.app.get_object_ring(None)
+ self.app.sort_nodes = lambda n: n # disable shuffle
+
+ def test_status_map(statuses, expected):
+ self.app._error_limiting = {}
+ req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
+ body='test body')
+ with set_http_connect(*statuses):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, expected)
+
+ base_status = [201] * 3
+ # test happy path
+ test_status_map(list(base_status), 201)
+ for i in range(3):
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[i]), 0)
+ # single node errors and test isolation
+ for i in range(3):
+ status_list = list(base_status)
+ status_list[i] = 503
+ test_status_map(status_list, 201)
+ for j in range(3):
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[j]), 1 if j == i else 0)
+ # connect errors
+ test_status_map((201, Timeout(), 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[1]), 1)
+ test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[0]), 1)
+ # expect errors
+ test_status_map((201, 201, (503, None), 201), 201)
+ self.assertEqual(node_error_count(
+ self.app, object_ring.devs[2]), 1)
+ test_status_map(((507, None), 201, 201, 201), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
+ # response errors
+ test_status_map(((100, Timeout()), 201, 201), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[0]), 1)
+ test_status_map((201, 201, (100, Exception())), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[2]), 1)
+ test_status_map((201, (100, 507), 201), 201)
+ self.assertEqual(
+ node_error_count(self.app, object_ring.devs[1]),
+ self.app.error_suppression_limit + 1)
+
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200):
- resp = self.controller.GET(req)
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
- def test_DELETE_simple(self):
+ def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(204, 204, 204):
- resp = self.controller.DELETE(req)
- self.assertEquals(resp.status_int, 204)
+ with set_http_connect(503, 200):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_handoff(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ codes = [503] * self.obj_ring.replicas + [200]
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
- def test_POST_simple(self):
+ def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(200, 200, 200, 201, 201, 201):
- resp = self.controller.POST(req)
+ codes = [404] * (self.obj_ring.replicas +
+ self.obj_ring.max_more_nodes)
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 404)
+
+ def test_POST_as_COPY_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
+ head_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = head_resp + put_resp
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 202)
+ def test_POST_delete_at(self):
+ t = str(int(time.time() + 100))
+ req = swob.Request.blank('/v1/a/c/o', method='POST',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ post_headers = []
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ if method == 'POST':
+ post_headers.append(headers)
+ x_newest_responses = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ post_resp = [200] * self.obj_ring.replicas
+ codes = x_newest_responses + post_resp
+ with set_http_connect(*codes, give_connect=capture_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+ for given_headers in post_headers:
+ self.assertEquals(given_headers.get('X-Delete-At'), t)
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_PUT_delete_at(self):
+ t = str(int(time.time() + 100))
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-At': t})
+ put_headers = []
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ if method == 'PUT':
+ put_headers.append(headers)
+ codes = [201] * self.obj_ring.replicas
+ with set_http_connect(*codes, give_connect=capture_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+ for given_headers in put_headers:
+ self.assertEquals(given_headers.get('X-Delete-At'), t)
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_PUT_converts_delete_after_to_delete_at(self):
+ req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
+ headers={'Content-Type': 'foo/bar',
+ 'X-Delete-After': '60'})
+ put_headers = []
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ if method == 'PUT':
+ put_headers.append(headers)
+ codes = [201] * self.obj_ring.replicas
+ t = time.time()
+ with set_http_connect(*codes, give_connect=capture_headers):
+ with mock.patch('time.time', lambda: t):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+ expected_delete_at = str(int(t) + 60)
+ for given_headers in put_headers:
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ expected_delete_at)
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_container_sync_put_x_timestamp_not_found(self):
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ self.app.container_info['storage_policy'] = policy_index
+ put_timestamp = utils.Timestamp(time.time()).normal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+ codes = [201] * self.obj_ring.replicas
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_container_sync_put_x_timestamp_match(self):
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ self.app.container_info['storage_policy'] = policy_index
+ put_timestamp = utils.Timestamp(time.time()).normal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+ ts_iter = itertools.repeat(put_timestamp)
+ codes = [409] * self.obj_ring.replicas
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_older(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ self.app.container_info['storage_policy'] = policy_index
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': ts.next().internal})
+ ts_iter = itertools.repeat(ts.next().internal)
+ codes = [409] * self.obj_ring.replicas
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_newer(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ orig_timestamp = ts.next().internal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': ts.next().internal})
+ ts_iter = itertools.repeat(orig_timestamp)
+ codes = [201] * self.obj_ring.replicas
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_put_x_timestamp_conflict(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': ts.next().internal})
+ ts_iter = iter([ts.next().internal, None, None])
+ codes = [409] + [201] * (self.obj_ring.replicas - 1)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_race(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ put_timestamp = ts.next().internal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+
+ # object nodes they respond 409 because another in-flight request
+ # finished and now the on disk timestamp is equal to the request.
+ put_ts = [put_timestamp] * self.obj_ring.replicas
+ codes = [409] * self.obj_ring.replicas
+
+ ts_iter = iter(put_ts)
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_container_sync_put_x_timestamp_unsynced_race(self):
+ ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
+ test_indexes = [None] + [int(p) for p in POLICIES]
+ for policy_index in test_indexes:
+ put_timestamp = ts.next().internal
+ req = swob.Request.blank(
+ '/v1/a/c/o', method='PUT', headers={
+ 'Content-Length': 0,
+ 'X-Timestamp': put_timestamp})
+
+ # only one in-flight request finished
+ put_ts = [None] * (self.obj_ring.replicas - 1)
+ put_resp = [201] * (self.obj_ring.replicas - 1)
+ put_ts += [put_timestamp]
+ put_resp += [409]
+
+ ts_iter = iter(put_ts)
+ codes = put_resp
+ with set_http_connect(*codes, timestamps=ts_iter):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
def test_COPY_simple(self):
+ req = swift.common.swob.Request.blank(
+ '/v1/a/c/o', method='COPY',
+ headers={'Content-Length': 0,
+ 'Destination': 'c/o-copy'})
+ head_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = head_resp + put_resp
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_log_info(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ req.headers['x-copy-from'] = 'some/where'
+ req.headers['Content-Length'] = 0
+ # override FakeConn default resp headers to keep log_info clean
+ resp_headers = {'x-delete-at': None}
+ head_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = head_resp + put_resp
+ with set_http_connect(*codes, headers=resp_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+ self.assertEquals(
+ req.environ.get('swift.log_info'), ['x-copy-from:some/where'])
+ # and then check that we don't do that for originating POSTs
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(200, 200, 200, 201, 201, 201):
- resp = self.controller.POST(req)
- self.assertEquals(resp.status_int, 202)
+ req.method = 'POST'
+ req.headers['x-copy-from'] = 'else/where'
+ with set_http_connect(*codes, headers=resp_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEquals(req.environ.get('swift.log_info'), None)
- def test_HEAD_simple(self):
+
+@patch_policies(legacy_only=True)
+class TestObjControllerLegacyCache(TestReplicatedObjController):
+ """
+ This test pretends like memcache returned a stored value that should
+ resemble whatever "old" format. It catches KeyErrors you'd get if your
+ code was expecting some new format during a rolling upgrade.
+ """
+
+ # in this case policy_index is missing
+ container_info = {
+ 'read_acl': None,
+ 'write_acl': None,
+ 'sync_key': None,
+ 'versions': None,
+ }
+
+ def test_invalid_storage_policy_cache(self):
+ self.app.container_info['storage_policy'] = 1
+ for method in ('GET', 'HEAD', 'POST', 'PUT', 'COPY'):
+ req = swob.Request.blank('/v1/a/c/o', method=method)
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 503)
+
+
+@patch_policies(with_ec_default=True)
+class TestECObjController(BaseObjectControllerMixin, unittest.TestCase):
+ container_info = {
+ 'read_acl': None,
+ 'write_acl': None,
+ 'sync_key': None,
+ 'versions': None,
+ 'storage_policy': '0',
+ }
+
+ controller_cls = obj.ECObjectController
+
+ def test_determine_chunk_destinations(self):
+ class FakePutter(object):
+ def __init__(self, index):
+ self.node_index = index
+
+ controller = self.controller_cls(
+ self.app, 'a', 'c', 'o')
+
+ # create a dummy list of putters, check no handoffs
+ putters = []
+ for index in range(0, 4):
+ putters.append(FakePutter(index))
+ got = controller._determine_chunk_destinations(putters)
+ expected = {}
+ for i, p in enumerate(putters):
+ expected[p] = i
+ self.assertEquals(got, expected)
+
+ # now lets make a handoff at the end
+ putters[3].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[3].node_index = 3
+
+ # now lets make a handoff at the start
+ putters[0].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[0].node_index = 0
+
+ # now lets make a handoff in the middle
+ putters[2].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+ putters[2].node_index = 0
+
+ # now lets make all of them handoffs
+ for index in range(0, 4):
+ putters[index].node_index = None
+ got = controller._determine_chunk_destinations(putters)
+ self.assertEquals(got, expected)
+
+ def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
- with set_http_connect(200, 200, 200, 201, 201, 201):
- resp = self.controller.POST(req)
- self.assertEquals(resp.status_int, 202)
+ get_resp = [200] * self.policy.ec_ndata
+ with set_http_connect(*get_resp):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
- def test_PUT_log_info(self):
- # mock out enough to get to the area of the code we want to test
- with mock.patch('swift.proxy.controllers.obj.check_object_creation',
- mock.MagicMock(return_value=None)):
- req = swift.common.swob.Request.blank('/v1/a/c/o')
- req.headers['x-copy-from'] = 'somewhere'
- try:
- self.controller.PUT(req)
- except HTTPException:
- pass
- self.assertEquals(
- req.environ.get('swift.log_info'), ['x-copy-from:somewhere'])
- # and then check that we don't do that for originating POSTs
- req = swift.common.swob.Request.blank('/v1/a/c/o')
- req.method = 'POST'
- req.headers['x-copy-from'] = 'elsewhere'
- try:
- self.controller.PUT(req)
- except HTTPException:
- pass
- self.assertEquals(req.environ.get('swift.log_info'), None)
+ def test_GET_simple_x_newest(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o',
+ headers={'X-Newest': 'true'})
+ codes = [200] * self.replicas()
+ codes += [404] * self.obj_ring.max_more_nodes
+ with set_http_connect(*codes):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_error(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ get_resp = [503] + [200] * self.policy.ec_ndata
+ with set_http_connect(*get_resp):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+
+ def test_GET_with_body(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o')
+ # turn a real body into fragments
+ segment_size = self.policy.ec_segment_size
+ real_body = ('asdf' * segment_size)[:-10]
+ # split it up into chunks
+ chunks = [real_body[x:x + segment_size]
+ for x in range(0, len(real_body), segment_size)]
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = self.policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+ # sanity
+ sanity_body = ''
+ for fragment_payload in fragment_payloads:
+ sanity_body += self.policy.pyeclib_driver.decode(
+ fragment_payload)
+ self.assertEqual(len(real_body), len(sanity_body))
+ self.assertEqual(real_body, sanity_body)
+
+ node_fragments = zip(*fragment_payloads)
+ self.assertEqual(len(node_fragments), self.replicas()) # sanity
+ responses = [(200, ''.join(node_fragments[i]), {})
+ for i in range(POLICIES.default.ec_ndata)]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 200)
+ self.assertEqual(len(real_body), len(resp.body))
+ self.assertEqual(real_body, resp.body)
+
+ def test_PUT_simple(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_with_explicit_commit_status(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 100, 201)] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_error(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [503] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_mostly_success(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.quorum()
+ codes += [503] * (self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_error_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 503, Exception('not used'))] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_mostly_success_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * self.quorum()
+ codes += [(100, 503, Exception('not used'))] * (
+ self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_mostly_error_commit(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [(100, 503, Exception('not used'))] * self.quorum()
+ codes += [201] * (self.replicas() - len(codes))
+ random.shuffle(codes)
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 503)
+
+ def test_PUT_commit_timeout(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * (self.replicas() - 1)
+ codes.append((100, Timeout(), Exception('not used')))
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_commit_exception(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ codes = [201] * (self.replicas() - 1)
+ codes.append((100, Exception('kaboom!'), Exception('not used')))
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_PUT_with_body(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
+ segment_size = self.policy.ec_segment_size
+ test_body = ('asdf' * segment_size)[:-10]
+ etag = md5(test_body).hexdigest()
+ size = len(test_body)
+ req.body = test_body
+ codes = [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+
+ put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
+
+ def capture_body(conn_id, chunk):
+ put_requests[conn_id]['chunks'].append(chunk)
+
+ def capture_headers(ip, port, device, part, method, path, headers,
+ **kwargs):
+ conn_id = kwargs['connection_id']
+ put_requests[conn_id]['boundary'] = headers[
+ 'X-Backend-Obj-Multipart-Mime-Boundary']
+
+ with set_http_connect(*codes, expect_headers=expect_headers,
+ give_send=capture_body,
+ give_connect=capture_headers):
+ resp = req.get_response(self.app)
+
+ self.assertEquals(resp.status_int, 201)
+ frag_archives = []
+ for connection_id, info in put_requests.items():
+ body = unchunk_body(''.join(info['chunks']))
+ self.assertTrue(info['boundary'] is not None,
+ "didn't get boundary for conn %r" % (
+ connection_id,))
+
+ # email.parser.FeedParser doesn't know how to take a multipart
+ # message and boundary together and parse it; it only knows how
+ # to take a string, parse the headers, and figure out the
+ # boundary on its own.
+ parser = email.parser.FeedParser()
+ parser.feed(
+ "Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
+ info['boundary'])
+ parser.feed(body)
+ message = parser.close()
+
+ self.assertTrue(message.is_multipart()) # sanity check
+ mime_parts = message.get_payload()
+ self.assertEqual(len(mime_parts), 3)
+ obj_part, footer_part, commit_part = mime_parts
+
+ # attach the body to frag_archives list
+ self.assertEqual(obj_part['X-Document'], 'object body')
+ frag_archives.append(obj_part.get_payload())
+
+ # validate some footer metadata
+ self.assertEqual(footer_part['X-Document'], 'object metadata')
+ footer_metadata = json.loads(footer_part.get_payload())
+ self.assertTrue(footer_metadata)
+ expected = {
+ 'X-Object-Sysmeta-EC-Content-Length': str(size),
+ 'X-Backend-Container-Update-Override-Size': str(size),
+ 'X-Object-Sysmeta-EC-Etag': etag,
+ 'X-Backend-Container-Update-Override-Etag': etag,
+ 'X-Object-Sysmeta-EC-Segment-Size': str(segment_size),
+ }
+ for header, value in expected.items():
+ self.assertEqual(footer_metadata[header], value)
+
+ # sanity on commit message
+ self.assertEqual(commit_part['X-Document'], 'put commit')
+
+ self.assertEqual(len(frag_archives), self.replicas())
+ fragment_size = self.policy.fragment_size
+ node_payloads = []
+ for fa in frag_archives:
+ payload = [fa[x:x + fragment_size]
+ for x in range(0, len(fa), fragment_size)]
+ node_payloads.append(payload)
+ fragment_payloads = zip(*node_payloads)
+
+ expected_body = ''
+ for fragment_payload in fragment_payloads:
+ self.assertEqual(len(fragment_payload), self.replicas())
+ if True:
+ fragment_payload = list(fragment_payload)
+ expected_body += self.policy.pyeclib_driver.decode(
+ fragment_payload)
+
+ self.assertEqual(len(test_body), len(expected_body))
+ self.assertEqual(test_body, expected_body)
+
+ def test_PUT_old_obj_server(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ responses = [
+ # one server will response 100-continue but not include the
+ # needful expect headers and the connection will be dropped
+ ((100, Exception('not used')), {}),
+ ] + [
+ # and pleanty of successful responses too
+ (201, {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes',
+ }),
+ ] * self.replicas()
+ random.shuffle(responses)
+ if responses[-1][0] != 201:
+ # whoops, stupid random
+ responses = responses[1:] + [responses[0]]
+ codes, expect_headers = zip(*responses)
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_from_replicated(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c2 use replicated storage policy 1
+ self.app.per_container_info['c2']['storage_policy'] = '1'
+
+ # a put request with copy from source c2
+ req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
+ body='', headers={
+ 'X-Copy-From': 'c2/o'})
+
+ # c2 get
+ codes = [200] * self.replicas(POLICIES[1])
+ codes += [404] * POLICIES[1].object_ring.max_more_nodes
+ # c1 put
+ codes += [201] * self.replicas()
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_to_replicated(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c1 use replicated storage policy 1
+ self.app.per_container_info['c1']['storage_policy'] = '1'
+
+ # a put request with copy from source c2
+ req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
+ body='', headers={
+ 'X-Copy-From': 'c2/o'})
+
+ # c2 get
+ codes = [200] * self.replicas()
+ codes += [404] * self.obj_ring.max_more_nodes
+ headers = {
+ 'X-Object-Sysmeta-Ec-Content-Length': 0,
+ }
+ # c1 put
+ codes += [201] * self.replicas(POLICIES[1])
+ with set_http_connect(*codes, headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 201)
+
+ def test_COPY_cross_policy_type_unknown(self):
+ self.app.per_container_info = {
+ 'c1': self.app.container_info.copy(),
+ 'c2': self.app.container_info.copy(),
+ }
+ # make c1 use some made up storage policy index
+ self.app.per_container_info['c1']['storage_policy'] = '13'
+
+ # a COPY request of c2 with destination in c1
+ req = swift.common.swob.Request.blank('/v1/a/c2/o', method='COPY',
+ body='', headers={
+ 'Destination': 'c1/o'})
+ with set_http_connect():
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 503)
+
+ def _make_ec_archive_bodies(self, test_body, policy=None):
+ policy = policy or self.policy
+ segment_size = policy.ec_segment_size
+ # split up the body into buffers
+ chunks = [test_body[x:x + segment_size]
+ for x in range(0, len(test_body), segment_size)]
+ # encode the buffers into fragment payloads
+ fragment_payloads = []
+ for chunk in chunks:
+ fragments = self.policy.pyeclib_driver.encode(chunk)
+ if not fragments:
+ break
+ fragment_payloads.append(fragments)
+
+ # join up the fragment payloads per node
+ ec_archive_bodies = [''.join(fragments)
+ for fragments in zip(*fragment_payloads)]
+ return ec_archive_bodies
+
+ def test_GET_mismatched_fragment_archives(self):
+ segment_size = self.policy.ec_segment_size
+ test_data1 = ('test' * segment_size)[:-333]
+ # N.B. the object data *length* here is different
+ test_data2 = ('blah1' * segment_size)[:-333]
+
+ etag1 = md5(test_data1).hexdigest()
+ etag2 = md5(test_data2).hexdigest()
+
+ ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
+ ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
+
+ headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1}
+ # here we're going to *lie* and say the etag here matches
+ headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1}
+
+ responses1 = [(200, body, headers1)
+ for body in ec_archive_bodies1]
+ responses2 = [(200, body, headers2)
+ for body in ec_archive_bodies2]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ # sanity check responses1
+ responses = responses1[:self.policy.ec_ndata]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(md5(resp.body).hexdigest(), etag1)
+
+ # sanity check responses2
+ responses = responses2[:self.policy.ec_ndata]
+ status_codes, body_iter, headers = zip(*responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(md5(resp.body).hexdigest(), etag2)
+
+ # now mix the responses a bit
+ mix_index = random.randint(0, self.policy.ec_ndata - 1)
+ mixed_responses = responses1[:self.policy.ec_ndata]
+ mixed_responses[mix_index] = responses2[mix_index]
+
+ status_codes, body_iter, headers = zip(*mixed_responses)
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ try:
+ resp.body
+ except ECDriverError:
+ pass
+ else:
+ self.fail('invalid ec fragment response body did not blow up!')
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ msg = error_lines[0]
+ self.assertTrue('Error decoding fragments' in msg)
+ self.assertTrue('/a/c/o' in msg)
+ log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
+ self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
+
+ def test_GET_read_timeout(self):
+ segment_size = self.policy.ec_segment_size
+ test_data = ('test' * segment_size)[:-333]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = self._make_ec_archive_bodies(test_data)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ self.app.recoverable_node_timeout = 0.01
+ responses = [(200, SlowBody(body, 0.1), headers)
+ for body in ec_archive_bodies]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ status_codes, body_iter, headers = zip(*responses + [
+ (404, '', {}) for i in range(
+ self.policy.object_ring.max_more_nodes)])
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ # do this inside the fake http context manager, it'll try to
+ # resume but won't be able to give us all the right bytes
+ self.assertNotEqual(md5(resp.body).hexdigest(), etag)
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(self.replicas(), len(error_lines))
+ nparity = self.policy.ec_nparity
+ for line in error_lines[:nparity]:
+ self.assertTrue('retrying' in line)
+ for line in error_lines[nparity:]:
+ self.assertTrue('ChunkReadTimeout (0.01s)' in line)
+
+ def test_GET_read_timeout_resume(self):
+ segment_size = self.policy.ec_segment_size
+ test_data = ('test' * segment_size)[:-333]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = self._make_ec_archive_bodies(test_data)
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ self.app.recoverable_node_timeout = 0.05
+ # first one is slow
+ responses = [(200, SlowBody(ec_archive_bodies[0], 0.1), headers)]
+ # ... the rest are fine
+ responses += [(200, body, headers)
+ for body in ec_archive_bodies[1:]]
+
+ req = swob.Request.blank('/v1/a/c/o')
+
+ status_codes, body_iter, headers = zip(
+ *responses[:self.policy.ec_ndata + 1])
+ with set_http_connect(*status_codes, body_iter=body_iter,
+ headers=headers):
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ self.assertTrue(md5(resp.body).hexdigest(), etag)
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(1, len(error_lines))
+ self.assertTrue('retrying' in error_lines[0])
if __name__ == '__main__':
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 1a59016..bdce41f 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,55 +15,67 @@
# limitations under the License.
from __future__ import with_statement
-import cPickle as pickle
import logging
+import math
import os
+import pickle
import sys
import unittest
-import urlparse
-from nose import SkipTest
-from contextlib import contextmanager, nested, closing
+from contextlib import closing, contextmanager, nested
from gzip import GzipFile
from shutil import rmtree
+from StringIO import StringIO
import gc
import time
+from textwrap import dedent
from urllib import quote
from hashlib import md5
-from tempfile import mkdtemp
+from pyeclib.ec_iface import ECDriverError
+from tempfile import mkdtemp, NamedTemporaryFile
import weakref
+import operator
+import functools
+from swift.obj import diskfile
import re
+import random
+import urlparse
+from nose import SkipTest
import mock
-from eventlet import sleep, spawn, wsgi, listen
-import simplejson
+from eventlet import sleep, spawn, wsgi, listen, Timeout
+from swift.common.utils import hash_path, json, storage_directory, public
import gluster.swift.common.Glusterfs as gfs
gfs.RUN_DIR = mkdtemp()
-from test.unit import connect_tcp, readuntil2crlfs, FakeLogger, \
- fake_http_connect, FakeRing, FakeMemcache, debug_logger
+from test.unit import (
+ connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
+ FakeMemcache, debug_logger, patch_policies, write_fake_ring,
+ mocked_http_conn)
+from swift.proxy.controllers.obj import ReplicatedObjectController
from gluster.swift.proxy import server as proxy_server
from gluster.swift.account import server as account_server
from gluster.swift.container import server as container_server
from gluster.swift.obj import server as object_server
-from swift.common import ring
from swift.common.middleware import proxy_logging
from swift.common.middleware.acl import parse_acl, format_acl
-from swift.common.exceptions import ChunkReadTimeout
-from swift.common.constraints import MAX_META_NAME_LENGTH, \
- MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
- MAX_FILE_SIZE, MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH, \
- ACCOUNT_LISTING_LIMIT, CONTAINER_LISTING_LIMIT, MAX_OBJECT_NAME_LENGTH
-from swift.common import utils
+from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
+ APIVersionError
+from swift.common import utils, constraints
+from swift.common.ring import RingData
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
-from swift.common.wsgi import monkey_patch_mimetools
+from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_container_memcache_key, \
get_account_memcache_key, cors_validation
import swift.proxy.controllers
-from swift.common.request_helpers import get_sys_meta_prefix
+import swift.proxy.controllers.obj
from swift.common.swob import Request, Response, HTTPUnauthorized, \
- HTTPException
+ HTTPException, HTTPForbidden, HeaderKeyDict
+from swift.common import storage_policy
+from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \
+ StoragePolicyCollection, POLICIES
+from swift.common.request_helpers import get_sys_meta_prefix
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
@@ -70,13 +83,15 @@ logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_coros = _test_servers = _test_sockets = _orig_container_listing_limit = \
- _testdir = _orig_SysLogHandler = None
+ _testdir = _orig_SysLogHandler = _orig_POLICIES = _test_POLICIES = None
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
- _orig_container_listing_limit, _test_coros, _orig_SysLogHandler
+ _orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \
+ _orig_POLICIES, _test_POLICIES
+ _orig_POLICIES = storage_policy._POLICIES
_orig_SysLogHandler = utils.SysLogHandler
utils.SysLogHandler = mock.MagicMock()
monkey_patch_mimetools()
@@ -86,12 +101,13 @@ def do_setup(the_object_server):
_testdir = os.path.join(gfs.RUN_DIR, 'swift')
mkdirs(_testdir)
rmtree(_testdir)
- mkdirs(os.path.join(_testdir, 'sda1'))
- mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
- mkdirs(os.path.join(_testdir, 'sdb1'))
- mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
+ for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
+ 'sdf1', 'sdg1', 'sdh1', 'sdi1'):
+ mkdirs(os.path.join(_testdir, drive, 'tmp'))
mkdirs(os.path.join(_testdir, 'a'))
+ mkdirs(os.path.join(_testdir, 'a1'))
mkdirs(os.path.join(_testdir, 'a', 'tmp'))
+ mkdirs(os.path.join(_testdir, 'a1', 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
@@ -103,43 +119,57 @@ def do_setup(the_object_server):
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
+ obj3lis = listen(('localhost', 0))
+ objsocks = [obj1lis, obj2lis, obj3lis]
_test_sockets = \
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
+ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]},
# Gluster volume mapping to device
- {'id': 1, 'zone': 1, 'device': 'a', 'ip': '127.0.0.1',
+ {'id': 0, 'zone': 0, 'device': 'a', 'ip': '127.0.0.1',
+ 'port': acc1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'a1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]},
# Gluster volume mapping to device
- {'id': 1, 'zone': 1, 'device': 'a', 'ip': '127.0.0.1',
+ {'id': 0, 'zone': 0, 'device': 'a', 'ip': '127.0.0.1',
+ 'port': con1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'a1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
with closing(GzipFile(object_ring_path, 'wb')) as f:
- pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
+ pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
# Gluster volume mapping to device
- {'id': 1, 'zone': 1, 'device': 'a', 'ip': '127.0.0.1',
+ {'id': 0, 'zone': 0, 'device': 'a', 'ip': '127.0.0.1',
+ 'port': obj1lis.getsockname()[1]},
+ {'id': 1, 'zone': 1, 'device': 'a1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]}], 30),
f)
+
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
+ for policy in POLICIES:
+ # make sure all the rings are loaded
+ prosrv.get_object_ring(policy.idx)
+ # don't lose this one!
+ _test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
@@ -152,8 +182,10 @@ def do_setup(the_object_server):
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
+ obj3srv = the_object_server.ObjectController(
+ conf, logger=debug_logger('obj3'))
_test_servers = \
- (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
+ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf,
logger=prosrv.logger)
@@ -164,9 +196,10 @@ def do_setup(the_object_server):
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
+ obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
_test_coros = \
- (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
- # Gluster: ensure account exists
+ (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa)
+ # Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
@@ -177,12 +210,24 @@ def do_setup(the_object_server):
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
-
+ assert(resp.status == 202)
+ # Gluster: ensure account exists
+ ts = normalize_timestamp(time.time())
+ partition, nodes = prosrv.account_ring.get_nodes('a1')
+ for node in nodes:
+ conn = swift.proxy.controllers.obj.http_connect(node['ip'],
+ node['port'],
+ node['device'],
+ partition, 'PUT',
+ '/a1',
+ {'X-Timestamp': ts,
+ 'x-trans-id': 'test'})
+ resp = conn.getresponse()
# For GlusterFS the volume should have already been created since
# accounts map to volumes. Expect a 202 instead of a 201 as for
# OpenStack Swift's proxy unit test the account is explicitly created.
assert(resp.status == 202)
- # Create container
+ # Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
@@ -193,6 +238,62 @@ def do_setup(the_object_server):
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
+ # Create container in other account
+ # used for account-to-account tests
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nX-Auth-Token: t\r\n'
+ 'Content-Length: 0\r\n\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
+ exp, headers[:len(exp)])
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write(
+ 'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
+ #'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
+ # Gluster-Swift: No storage policies
+ 'Connection: close\r\nX-Auth-Token: t\r\n'
+ 'Content-Length: 0\r\n\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write(
+ 'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
+ #'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
+ # Gluster-Swift: No storage policies
+ 'Connection: close\r\nX-Auth-Token: t\r\n'
+ 'Content-Length: 0\r\n\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
+
+
+def unpatch_policies(f):
+ """
+ This will unset a TestCase level patch_policies to use the module level
+ policies setup for the _test_servers instead.
+
+ N.B. You should NEVER modify the _test_server policies or rings during a
+ test because they persist for the life of the entire module!
+ """
+ raise SkipTest("Storage Policies are not supported")
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ with patch_policies(_test_POLICIES):
+ return f(*args, **kwargs)
+ return wrapper
def setup():
@@ -204,6 +305,7 @@ def teardown():
server.kill()
rmtree(os.path.dirname(_testdir))
utils.SysLogHandler = _orig_SysLogHandler
+ storage_policy._POLICIES = _orig_POLICIES
def sortHeaderNames(headerNames):
@@ -217,6 +319,37 @@ def sortHeaderNames(headerNames):
return ', '.join(headers)
+def parse_headers_string(headers_str):
+ headers_dict = HeaderKeyDict()
+ for line in headers_str.split('\r\n'):
+ if ': ' in line:
+ header, value = line.split(': ', 1)
+ headers_dict[header] = value
+ return headers_dict
+
+
+def node_error_count(proxy_app, ring_node):
+ # Reach into the proxy's internals to get the error count for a
+ # particular node
+ node_key = proxy_app._error_limit_node_key(ring_node)
+ return proxy_app._error_limiting.get(node_key, {}).get('errors', 0)
+
+
+def node_last_error(proxy_app, ring_node):
+ # Reach into the proxy's internals to get the last error for a
+ # particular node
+ node_key = proxy_app._error_limit_node_key(ring_node)
+ return proxy_app._error_limiting.get(node_key, {}).get('last_error')
+
+
+def set_node_errors(proxy_app, ring_node, value, last_error):
+ # Set the node's error count to value
+ node_key = proxy_app._error_limit_node_key(ring_node)
+ stats = proxy_app._error_limiting.setdefault(node_key, {})
+ stats['errors'] = value
+ stats['last_error'] = last_error
+
+
class FakeMemcacheReturnsNone(FakeMemcache):
def get(self, key):
@@ -231,6 +364,9 @@ def save_globals():
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
+ orig_container_info = getattr(swift.proxy.controllers.Controller,
+ 'container_info', None)
+
try:
yield True
finally:
@@ -239,6 +375,7 @@ def save_globals():
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
+ swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
@@ -250,6 +387,36 @@ def set_http_connect(*args, **kwargs):
return new_connect
+def _make_callback_func(calls):
+ def callback(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None, ssl=False):
+ context = {}
+ context['method'] = method
+ context['path'] = path
+ context['headers'] = headers or {}
+ calls.append(context)
+ return callback
+
+
+def _limit_max_file_size(f):
+ """
+ This will limit constraints.MAX_FILE_SIZE for the duration of the
+ wrapped function, based on whether MAX_FILE_SIZE exceeds the
+ sys.maxsize limit on the system running the tests.
+
+ This allows successful testing on 32 bit systems.
+ """
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ test_max_file_size = constraints.MAX_FILE_SIZE
+ if constraints.MAX_FILE_SIZE >= sys.maxsize:
+ test_max_file_size = (2 ** 30 + 2)
+ with mock.patch.object(constraints, 'MAX_FILE_SIZE',
+ test_max_file_size):
+ return f(*args, **kwargs)
+ return wrapper
+
+
# tests
class TestController(unittest.TestCase):
@@ -257,11 +424,9 @@ class TestController(unittest.TestCase):
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
-
app = proxy_server.Application(None, self.memcache,
account_ring=self.account_ring,
- container_ring=self.container_ring,
- object_ring=FakeRing())
+ container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
@@ -515,17 +680,43 @@ class TestController(unittest.TestCase):
test(503, 503, 503)
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
+ def test_get_object_ring(self):
+ baseapp = proxy_server.Application({},
+ FakeMemcache(),
+ container_ring=FakeRing(),
+ account_ring=FakeRing())
+ with patch_policies([
+ StoragePolicy(0, 'a', False, object_ring=123),
+ StoragePolicy(1, 'b', True, object_ring=456),
+ StoragePolicy(2, 'd', False, object_ring=789)
+ ]):
+ # None means legacy so always use policy 0
+ ring = baseapp.get_object_ring(None)
+ self.assertEqual(ring, 123)
+ ring = baseapp.get_object_ring('')
+ self.assertEqual(ring, 123)
+ ring = baseapp.get_object_ring('0')
+ self.assertEqual(ring, 123)
+ ring = baseapp.get_object_ring('1')
+ self.assertEqual(ring, 456)
+ ring = baseapp.get_object_ring('2')
+ self.assertEqual(ring, 789)
+ # illegal values
+ self.assertRaises(ValueError, baseapp.get_object_ring, '99')
+ self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
+
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
- raise Exception('this shouldnt be caught')
+ raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing())
+ container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
@@ -535,7 +726,6 @@ class TestProxyServer(unittest.TestCase):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
- object_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
@@ -545,8 +735,7 @@ class TestProxyServer(unittest.TestCase):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
- account_ring=FakeRing(),
- object_ring=FakeRing())
+ account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEquals(resp.status, '405 Method Not Allowed')
@@ -560,8 +749,7 @@ class TestProxyServer(unittest.TestCase):
set_http_connect(200)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
@@ -576,8 +764,7 @@ class TestProxyServer(unittest.TestCase):
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
@@ -589,8 +776,7 @@ class TestProxyServer(unittest.TestCase):
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
- FakeRing(), FakeRing(),
- FakeRing())
+ FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEquals(resp.status, '400 Bad Request')
@@ -602,15 +788,52 @@ class TestProxyServer(unittest.TestCase):
finally:
rmtree(swift_dir, ignore_errors=True)
+ def test_adds_transaction_id(self):
+ swift_dir = mkdtemp()
+ try:
+ logger = FakeLogger()
+ baseapp = proxy_server.Application({'swift_dir': swift_dir},
+ FakeMemcache(), logger,
+ container_ring=FakeLogger(),
+ account_ring=FakeRing())
+ baseapp.handle_request(
+ Request.blank('/info',
+ environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
+ 'REQUEST_METHOD': 'GET'}))
+ # This is kind of a hokey way to get the transaction ID; it'd be
+ # better to examine response headers, but the catch_errors
+ # middleware is what sets the X-Trans-Id header, and we don't have
+ # that available here.
+ self.assertTrue(logger.txn_id.endswith('-sardine'))
+ finally:
+ rmtree(swift_dir, ignore_errors=True)
+
+ def test_adds_transaction_id_length_limit(self):
+ swift_dir = mkdtemp()
+ try:
+ logger = FakeLogger()
+ baseapp = proxy_server.Application({'swift_dir': swift_dir},
+ FakeMemcache(), logger,
+ container_ring=FakeLogger(),
+ account_ring=FakeRing())
+ baseapp.handle_request(
+ Request.blank('/info',
+ environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
+ 'REQUEST_METHOD': 'GET'}))
+ self.assertTrue(logger.txn_id.endswith(
+ '-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
+ finally:
+ rmtree(swift_dir, ignore_errors=True)
+
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
- FakeMemcache(), FakeLogger(),
- FakeRing(), FakeRing(),
- FakeRing())
+ FakeMemcache(),
+ container_ring=FakeLogger(),
+ account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
@@ -622,7 +845,6 @@ class TestProxyServer(unittest.TestCase):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
FakeMemcache(),
container_ring=FakeRing(),
- object_ring=FakeRing(),
account_ring=FakeRing())
self.assertEquals(baseapp.node_timings, {})
@@ -651,7 +873,6 @@ class TestProxyServer(unittest.TestCase):
'read_affinity': 'r1=1'},
FakeMemcache(),
container_ring=FakeRing(),
- object_ring=FakeRing(),
account_ring=FakeRing())
nodes = [{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
@@ -665,22 +886,22 @@ class TestProxyServer(unittest.TestCase):
def test_info_defaults(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertTrue(isinstance(app.disallowed_sections, list))
- self.assertEqual(0, len(app.disallowed_sections))
+ self.assertEqual(1, len(app.disallowed_sections))
+ self.assertEqual(['swift.valid_api_versions'],
+ app.disallowed_sections)
self.assertTrue(app.admin_key is None)
def test_get_info_controller(self):
- path = '/info'
+ req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ container_ring=FakeRing())
- controller, path_parts = app.get_controller(path)
+ controller, path_parts = app.get_controller(req)
self.assertTrue('version' in path_parts)
self.assertTrue(path_parts['version'] is None)
@@ -690,20 +911,253 @@ class TestProxyServer(unittest.TestCase):
self.assertEqual(controller.__name__, 'InfoController')
+ def test_error_limit_methods(self):
+ logger = debug_logger('test')
+ app = proxy_server.Application({}, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing(),
+ logger=logger)
+ node = app.container_ring.get_part_nodes(0)[0]
+ # error occurred
+ app.error_occurred(node, 'test msg')
+ self.assertTrue('test msg' in
+ logger.get_lines_for_level('error')[-1])
+ self.assertEqual(1, node_error_count(app, node))
+
+ # exception occurred
+ try:
+ raise Exception('kaboom1!')
+ except Exception as e1:
+ app.exception_occurred(node, 'test1', 'test1 msg')
+ line = logger.get_lines_for_level('error')[-1]
+ self.assertTrue('test1 server' in line)
+ self.assertTrue('test1 msg' in line)
+ log_args, log_kwargs = logger.log_dict['error'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e1)
+ self.assertEqual(2, node_error_count(app, node))
+
+ # warning exception occurred
+ try:
+ raise Exception('kaboom2!')
+ except Exception as e2:
+ app.exception_occurred(node, 'test2', 'test2 msg',
+ level=logging.WARNING)
+ line = logger.get_lines_for_level('warning')[-1]
+ self.assertTrue('test2 server' in line)
+ self.assertTrue('test2 msg' in line)
+ log_args, log_kwargs = logger.log_dict['warning'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e2)
+ self.assertEqual(3, node_error_count(app, node))
+
+ # custom exception occurred
+ try:
+ raise Exception('kaboom3!')
+ except Exception as e3:
+ e3_info = sys.exc_info()
+ try:
+ raise Exception('kaboom4!')
+ except Exception:
+ pass
+ app.exception_occurred(node, 'test3', 'test3 msg',
+ level=logging.WARNING, exc_info=e3_info)
+ line = logger.get_lines_for_level('warning')[-1]
+ self.assertTrue('test3 server' in line)
+ self.assertTrue('test3 msg' in line)
+ log_args, log_kwargs = logger.log_dict['warning'][-1]
+ self.assertTrue(log_kwargs['exc_info'])
+ self.assertEqual(log_kwargs['exc_info'][1], e3)
+ self.assertEqual(4, node_error_count(app, node))
+
+ def test_valid_api_version(self):
+ app = proxy_server.Application({}, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+
+ # The version string is only checked for account, container and object
+ # requests; the raised APIVersionError returns a 404 to the client
+ for path in [
+ '/v2/a',
+ '/v2/a/c',
+ '/v2/a/c/o']:
+ req = Request.blank(path)
+ self.assertRaises(APIVersionError, app.get_controller, req)
+
+ # Default valid API versions are ok
+ for path in [
+ '/v1/a',
+ '/v1/a/c',
+ '/v1/a/c/o',
+ '/v1.0/a',
+ '/v1.0/a/c',
+ '/v1.0/a/c/o']:
+ req = Request.blank(path)
+ controller, path_parts = app.get_controller(req)
+ self.assertTrue(controller is not None)
+
+ # Ensure settings valid API version constraint works
+ for version in ["42", 42]:
+ try:
+ with NamedTemporaryFile() as f:
+ f.write('[swift-constraints]\n')
+ f.write('valid_api_versions = %s\n' % version)
+ f.flush()
+ with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
+ constraints.reload_constraints()
+
+ req = Request.blank('/%s/a' % version)
+ controller, _ = app.get_controller(req)
+ self.assertTrue(controller is not None)
+
+ # In this case v1 is invalid
+ req = Request.blank('/v1/a')
+ self.assertRaises(APIVersionError, app.get_controller, req)
+ finally:
+ constraints.reload_constraints()
+
+ # Check that the valid_api_versions is not exposed by default
+ req = Request.blank('/info')
+ controller, path_parts = app.get_controller(req)
+ self.assertTrue('swift.valid_api_versions' in
+ path_parts.get('disallowed_sections'))
+
+@patch_policies([
+ StoragePolicy(0, 'zero', is_default=True),
+ StoragePolicy(1, 'one'),
+])
+class TestProxyServerLoading(unittest.TestCase):
+
+ def setUp(self):
+ self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
+ utils.HASH_PATH_SUFFIX = 'endcap'
+ self.tempdir = mkdtemp()
+
+ def tearDown(self):
+ rmtree(self.tempdir)
+ utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
+ for policy in POLICIES:
+ policy.object_ring = None
+
+ def test_load_policy_rings(self):
+ for policy in POLICIES:
+ self.assertFalse(policy.object_ring)
+ conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
+ conf_body = """
+ [DEFAULT]
+ swift_dir = %s
+
+ [pipeline:main]
+ pipeline = catch_errors cache proxy-server
+
+ [app:proxy-server]
+ use = egg:swift#proxy
+
+ [filter:cache]
+ use = egg:swift#memcache
+
+ [filter:catch_errors]
+ use = egg:swift#catch_errors
+ """ % self.tempdir
+ with open(conf_path, 'w') as f:
+ f.write(dedent(conf_body))
+ account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
+ write_fake_ring(account_ring_path)
+ container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
+ write_fake_ring(container_ring_path)
+ for policy in POLICIES:
+ object_ring_path = os.path.join(self.tempdir,
+ policy.ring_name + '.ring.gz')
+ write_fake_ring(object_ring_path)
+ app = loadapp(conf_path)
+ # find the end of the pipeline
+ while hasattr(app, 'app'):
+ app = app.app
+
+ # validate loaded rings
+ self.assertEqual(app.account_ring.serialized_path,
+ account_ring_path)
+ self.assertEqual(app.container_ring.serialized_path,
+ container_ring_path)
+ for policy in POLICIES:
+ self.assertEqual(policy.object_ring,
+ app.get_object_ring(int(policy)))
+
+ def test_missing_rings(self):
+ conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
+ conf_body = """
+ [DEFAULT]
+ swift_dir = %s
+
+ [pipeline:main]
+ pipeline = catch_errors cache proxy-server
+
+ [app:proxy-server]
+ use = egg:swift#proxy
+
+ [filter:cache]
+ use = egg:swift#memcache
+
+ [filter:catch_errors]
+ use = egg:swift#catch_errors
+ """ % self.tempdir
+ with open(conf_path, 'w') as f:
+ f.write(dedent(conf_body))
+ ring_paths = [
+ os.path.join(self.tempdir, 'account.ring.gz'),
+ os.path.join(self.tempdir, 'container.ring.gz'),
+ ]
+ for policy in POLICIES:
+ self.assertFalse(policy.object_ring)
+ object_ring_path = os.path.join(self.tempdir,
+ policy.ring_name + '.ring.gz')
+ ring_paths.append(object_ring_path)
+ for policy in POLICIES:
+ self.assertFalse(policy.object_ring)
+ for ring_path in ring_paths:
+ self.assertFalse(os.path.exists(ring_path))
+ self.assertRaises(IOError, loadapp, conf_path)
+ write_fake_ring(ring_path)
+ # all rings exist, app should load
+ loadapp(conf_path)
+ for policy in POLICIES:
+ self.assert_(policy.object_ring)
+
+
+@patch_policies([StoragePolicy(0, 'zero', True,
+ object_ring=FakeRing(base_port=3000))])
class TestObjectController(unittest.TestCase):
def setUp(self):
- self.app = proxy_server.Application(None, FakeMemcache(),
- logger=debug_logger('proxy-ut'),
- account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing())
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ logger=debug_logger('proxy-ut'),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
- self.app.object_ring.set_replicas(3)
+ for policy in POLICIES:
+ policy.object_ring = FakeRing(base_port=3000)
+
+ def put_container(self, policy_name, container_name):
+ # Note: only works if called with unpatched policies
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: 0\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Storage-Policy: %s\r\n'
+ '\r\n' % (container_name, policy_name))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 2'
+ self.assertEqual(headers[:len(exp)], exp)
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
@@ -717,7 +1171,10 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
- res = method(req)
+ try:
+ res = method(req)
+ except HTTPException as res:
+ pass
self.assertEquals(res.status_int, expected)
# repeat test
@@ -727,9 +1184,181 @@ class TestObjectController(unittest.TestCase):
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
- res = method(req)
+ try:
+ res = method(req)
+ except HTTPException as res:
+ pass
self.assertEquals(res.status_int, expected)
+ @unpatch_policies
+ def test_policy_IO(self):
+ def check_file(policy, cont, devs, check_val):
+ partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+ for dev in devs:
+ file = df_mgr.get_diskfile(dev, partition, 'a',
+ cont, 'o',
+ policy=policy)
+ if check_val is True:
+ file.open()
+
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+
+ # check policy 0: put file on c, read it back, check loc on disk
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ obj = 'test_object0'
+ path = '/v1/a/c/o'
+ fd.write('PUT %s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Length: %s\r\n'
+ 'Content-Type: text/plain\r\n'
+ '\r\n%s' % (path, str(len(obj)), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+ req = Request.blank(path,
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type':
+ 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, obj)
+
+ check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
+ check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
+
+ # check policy 1: put file on c1, read it back, check loc on disk
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ path = '/v1/a/c1/o'
+ obj = 'test_object1'
+ fd.write('PUT %s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Length: %s\r\n'
+ 'Content-Type: text/plain\r\n'
+ '\r\n%s' % (path, str(len(obj)), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ self.assertEqual(headers[:len(exp)], exp)
+ req = Request.blank(path,
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type':
+ 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, obj)
+
+ check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
+ check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
+
+ # check policy 2: put file on c2, read it back, check loc on disk
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ path = '/v1/a/c2/o'
+ obj = 'test_object2'
+ fd.write('PUT %s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Length: %s\r\n'
+ 'Content-Type: text/plain\r\n'
+ '\r\n%s' % (path, str(len(obj)), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ self.assertEqual(headers[:len(exp)], exp)
+ req = Request.blank(path,
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type':
+ 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, obj)
+
+ check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
+ check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
+
+ @unpatch_policies
+ def test_policy_IO_override(self):
+ if hasattr(_test_servers[-1], '_filesystem'):
+ # ironically, the _filesystem attribute on the object server means
+ # the in-memory diskfile is in use, so this test does not apply
+ return
+
+ prosrv = _test_servers[0]
+
+ # validate container policy is 1
+ req = Request.blank('/v1/a/c1', method='HEAD')
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 204) # sanity check
+ self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
+
+ # check overrides: put it in policy 2 (not where the container says)
+ req = Request.blank(
+ '/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'PUT',
+ 'wsgi.input': StringIO("hello")},
+ headers={'Content-Type': 'text/plain',
+ 'Content-Length': '5',
+ 'X-Backend-Storage-Policy-Index': '2'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 201) # sanity check
+
+ # go to disk to make sure it's there
+ partition, nodes = prosrv.get_object_ring(2).get_nodes(
+ 'a', 'c1', 'wrong-o')
+ node = nodes[0]
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+ df = df_mgr.get_diskfile(node['device'], partition, 'a',
+ 'c1', 'wrong-o', policy=POLICIES[2])
+ with df.open():
+ contents = ''.join(df.reader())
+ self.assertEqual(contents, "hello")
+
+ # can't get it from the normal place
+ req = Request.blank('/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type': 'text/plain'})
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 404) # sanity check
+
+ # but we can get it from policy 2
+ req = Request.blank('/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Content-Type': 'text/plain',
+ 'X-Backend-Storage-Policy-Index': '2'})
+
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.body, 'hello')
+
+ # and we can delete it the same way
+ req = Request.blank('/v1/a/c1/wrong-o',
+ environ={'REQUEST_METHOD': 'DELETE'},
+ headers={'Content-Type': 'text/plain',
+ 'X-Backend-Storage-Policy-Index': '2'})
+
+ res = req.get_response(prosrv)
+ self.assertEqual(res.status_int, 204)
+
+ df = df_mgr.get_diskfile(node['device'], partition, 'a',
+ 'c1', 'wrong-o', policy=POLICIES[2])
+ try:
+ df.open()
+ except DiskFileNotExist as e:
+ self.assert_(float(e.timestamp) > 0)
+ else:
+ self.fail('did not raise DiskFileNotExist')
+
+ @unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
@@ -757,6 +1386,619 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
+ @unpatch_policies
+ def test_PUT_ec(self):
+ policy = POLICIES[3]
+ self.put_container("ec", "ec-con")
+
+ obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o1 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ ecd = policy.pyeclib_driver
+ expected_pieces = set(ecd.encode(obj))
+
+ # go to disk to make sure it's there and all erasure-coded
+ partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ got_pieces = set()
+ got_indices = set()
+ got_durable = []
+ for node_index, node in enumerate(nodes):
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'o1',
+ policy=policy)
+ with df.open():
+ meta = df.get_metadata()
+ contents = ''.join(df.reader())
+ got_pieces.add(contents)
+
+ # check presence for a .durable file for the timestamp
+ durable_file = os.path.join(
+ _testdir, node['device'], storage_directory(
+ diskfile.get_data_dir(policy),
+ partition, hash_path('a', 'ec-con', 'o1')),
+ utils.Timestamp(df.timestamp).internal + '.durable')
+
+ if os.path.isfile(durable_file):
+ got_durable.append(True)
+
+ lmeta = dict((k.lower(), v) for k, v in meta.items())
+ got_indices.add(
+ lmeta['x-object-sysmeta-ec-frag-index'])
+
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-etag'],
+ md5(obj).hexdigest())
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-content-length'],
+ str(len(obj)))
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-segment-size'],
+ '4096')
+ self.assertEqual(
+ lmeta['x-object-sysmeta-ec-scheme'],
+ 'jerasure_rs_vand 2+1')
+ self.assertEqual(
+ lmeta['etag'],
+ md5(contents).hexdigest())
+
+ self.assertEqual(expected_pieces, got_pieces)
+ self.assertEqual(set(('0', '1', '2')), got_indices)
+
+ # verify at least 2 puts made it all the way to the end of 2nd
+ # phase, ie at least 2 .durable statuses were written
+ num_durable_puts = sum(d is True for d in got_durable)
+ self.assertTrue(num_durable_puts >= 2)
+
+ @unpatch_policies
+ def test_PUT_ec_multiple_segments(self):
+ ec_policy = POLICIES[3]
+ self.put_container("ec", "ec-con")
+
+ pyeclib_header_size = len(ec_policy.pyeclib_driver.encode("")[0])
+ segment_size = ec_policy.ec_segment_size
+
+ # Big enough to have multiple segments. Also a multiple of the
+ # segment size to get coverage of that path too.
+ obj = 'ABC' * segment_size
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o2 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # it's a 2+1 erasure code, so each fragment archive should be half
+ # the length of the object, plus three inline pyeclib metadata
+ # things (one per segment)
+ expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
+
+ partition, nodes = ec_policy.object_ring.get_nodes(
+ 'a', 'ec-con', 'o2')
+
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ got_durable = []
+ fragment_archives = []
+ for node in nodes:
+ df = df_mgr.get_diskfile(
+ node['device'], partition, 'a',
+ 'ec-con', 'o2', policy=ec_policy)
+ with df.open():
+ contents = ''.join(df.reader())
+ fragment_archives.append(contents)
+ self.assertEqual(len(contents), expected_length)
+
+ # check presence for a .durable file for the timestamp
+ durable_file = os.path.join(
+ _testdir, node['device'], storage_directory(
+ diskfile.get_data_dir(ec_policy),
+ partition, hash_path('a', 'ec-con', 'o2')),
+ utils.Timestamp(df.timestamp).internal + '.durable')
+
+ if os.path.isfile(durable_file):
+ got_durable.append(True)
+
+ # Verify that we can decode each individual fragment and that they
+ # are all the correct size
+ fragment_size = ec_policy.fragment_size
+ nfragments = int(
+ math.ceil(float(len(fragment_archives[0])) / fragment_size))
+
+ for fragment_index in range(nfragments):
+ fragment_start = fragment_index * fragment_size
+ fragment_end = (fragment_index + 1) * fragment_size
+
+ try:
+ frags = [fa[fragment_start:fragment_end]
+ for fa in fragment_archives]
+ seg = ec_policy.pyeclib_driver.decode(frags)
+ except ECDriverError:
+ self.fail("Failed to decode fragments %d; this probably "
+ "means the fragments are not the sizes they "
+ "should be" % fragment_index)
+
+ segment_start = fragment_index * segment_size
+ segment_end = (fragment_index + 1) * segment_size
+
+ self.assertEqual(seg, obj[segment_start:segment_end])
+
+ # verify at least 2 puts made it all the way to the end of 2nd
+ # phase, ie at least 2 .durable statuses were written
+ num_durable_puts = sum(d is True for d in got_durable)
+ self.assertTrue(num_durable_puts >= 2)
+
+ @unpatch_policies
+ def test_PUT_ec_object_etag_mismatch(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/o3 HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: %s\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5('something else').hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 422'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # nothing should have made it to disk on the object servers
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'o3')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'o3')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ for node in nodes:
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'o3', policy=POLICIES[3])
+ self.assertRaises(DiskFileNotExist, df.open)
+
+ @unpatch_policies
+ def test_PUT_ec_fragment_archive_etag_mismatch(self):
+ self.put_container("ec", "ec-con")
+
+ # Cause a hash mismatch by feeding one particular MD5 hasher some
+ # extra data. The goal here is to get exactly one of the hashers in
+ # an object server.
+ countdown = [1]
+
+ def busted_md5_constructor(initial_str=""):
+ hasher = md5(initial_str)
+ if countdown[0] == 0:
+ hasher.update('wrong')
+ countdown[0] -= 1
+ return hasher
+
+ obj = 'uvarovite-esurience-cerated-symphysic'
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ with mock.patch('swift.obj.server.md5', busted_md5_constructor):
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/pimento HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: %s\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 503' # no quorum
+ self.assertEqual(headers[:len(exp)], exp)
+
+ # 2/3 of the fragment archives should have landed on disk
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'pimento')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ partition, nodes = prosrv.get_object_ring(3).get_nodes(
+ 'a', 'ec-con', 'pimento')
+ conf = {'devices': _testdir, 'mount_check': 'false'}
+
+ df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
+
+ found = 0
+ for node in nodes:
+ df = df_mgr.get_diskfile(node['device'], partition,
+ 'a', 'ec-con', 'pimento',
+ policy=POLICIES[3])
+ try:
+ df.open()
+ found += 1
+ except DiskFileNotExist:
+ pass
+ self.assertEqual(found, 2)
+
+ @unpatch_policies
+ def test_PUT_ec_if_none_match(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'If-None-Match: *\r\n'
+ 'Etag: "%s"\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 412'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ @unpatch_policies
+ def test_GET_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '0123456' * 11 * 17
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+ self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ self.assertEqual(gotten_obj, obj)
+
+ @unpatch_policies
+ def test_conditional_GET_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'this object has an etag and is otherwise unimportant'
+ etag = md5(obj).hexdigest()
+ not_etag = md5(obj + "blahblah").hexdigest()
+
+ prolis = _test_sockets[0]
+ prosrv = _test_servers[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ for verb in ('GET', 'HEAD'):
+ # If-Match
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': not_etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 412)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-Match': "*"})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ # If-None-Match
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 304)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': not_etag})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+
+ req = Request.blank(
+ '/v1/a/ec-con/conditionals',
+ environ={'REQUEST_METHOD': verb},
+ headers={'If-None-Match': "*"})
+ resp = req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 304)
+
+ @unpatch_policies
+ def test_GET_ec_big(self):
+ self.put_container("ec", "ec-con")
+
+ # our EC segment size is 4 KiB, so this is multiple (3) segments;
+ # we'll verify that with a sanity check
+ obj = 'a moose once bit my sister' * 400
+ self.assertTrue(
+ len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2,
+ "object is too small for proper testing")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ # This may look like a redundant test, but when things fail, this
+ # has a useful failure message while the subsequent one spews piles
+ # of garbage and demolishes your terminal's scrollback buffer.
+ self.assertEqual(len(gotten_obj), len(obj))
+ self.assertEqual(gotten_obj, obj)
+
+ @unpatch_policies
+ def test_GET_ec_failure_handling(self):
+ self.put_container("ec", "ec-con")
+
+ obj = 'look at this object; it is simply amazing ' * 500
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ def explodey_iter(inner_iter):
+ yield next(inner_iter)
+ raise Exception("doom ba doom")
+
+ real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
+
+ def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
+ # Each thing in `iterators` here is a document-parts iterator,
+ # and we want to fail after getting a little into each part.
+ #
+ # That way, we ensure we've started streaming the response to
+ # the client when things go wrong.
+ return real_ec_app_iter(
+ path, policy,
+ [explodey_iter(i) for i in iterators],
+ *a, **kw)
+
+ with mock.patch("swift.proxy.controllers.obj.ECAppIter",
+ explodey_ec_app_iter):
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+
+ gotten_obj = ''
+ try:
+ with Timeout(300): # don't hang the testrun when this fails
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+ except Timeout:
+ self.fail("GET hung when connection failed")
+
+ # Ensure we failed partway through, otherwise the mocks could
+ # get out of date without anyone noticing
+ self.assertTrue(0 < len(gotten_obj) < len(obj))
+
+ @unpatch_policies
+ def test_HEAD_ec(self):
+ self.put_container("ec", "ec-con")
+
+ obj = '0123456' * 11 * 17
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('HEAD /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ headers = parse_headers_string(headers)
+ self.assertEqual(str(len(obj)), headers['Content-Length'])
+ self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
+ self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
+
+ @unpatch_policies
+ def test_GET_ec_404(self):
+ self.put_container("ec", "ec-con")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 404'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ @unpatch_policies
+ def test_HEAD_ec_404(self):
+ self.put_container("ec", "ec-con")
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 404'
+ self.assertEqual(headers[:len(exp)], exp)
+
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
@@ -768,13 +2010,22 @@ class TestObjectController(unittest.TestCase):
'server!')
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- # The (201, -4) tuples in there have the effect of letting the
- # initial connect succeed, after which getexpect() gets called and
- # then the -4 makes the response of that actually be 201 instead of
- # 100. Perfectly straightforward.
- set_http_connect(200, 200, (201, -4), (201, -4), (201, -4),
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ # The (201, Exception('test')) tuples in there have the effect of
+ # changing the status of the initial expect response. The default
+ # expect response from FakeConn for 201 is 100.
+ # But the object server won't send a 100 continue line if the
+ # client doesn't send a expect 100 header (as is the case with
+ # zero byte PUTs as validated by this test), nevertheless the
+ # object controller calls getexpect without prejudice. In this
+ # case the status from the response shows up early in getexpect
+ # instead of having to wait until getresponse. The Exception is
+ # in there to ensure that the object controller also *uses* the
+ # result of getexpect instead of calling getresponse in which case
+ # our FakeConn will blow up.
+ success_codes = [(201, Exception('test'))] * 3
+ set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
@@ -795,9 +2046,14 @@ class TestObjectController(unittest.TestCase):
'non-zero byte PUT!')
with save_globals():
- controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
- set_http_connect(200, 200, 201, 201, 201,
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
+ # the (100, 201) tuples in there are just being extra explicit
+ # about the FakeConn returning the 100 Continue status when the
+ # object controller calls getexpect. Which is FakeConn's default
+ # for 201 if no expect_status is specified.
+ success_codes = [(100, 201)] * 3
+ set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
@@ -820,12 +2076,14 @@ class TestObjectController(unittest.TestCase):
def is_r0(node):
return node['region'] == 0
- self.app.object_ring.max_more_nodes = 100
+ object_ring = self.app.get_object_ring(None)
+ object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
@@ -854,14 +2112,16 @@ class TestObjectController(unittest.TestCase):
def is_r0(node):
return node['region'] == 0
- self.app.object_ring.max_more_nodes = 100
+ object_ring = self.app.get_object_ring(None)
+ object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
- proxy_server.ObjectController(self.app, 'a', 'c', 'o.jpg')
+ ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
- self.app.object_ring.get_part_nodes(1)[0], 'test')
+ object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
@@ -880,6 +2140,28 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(0, written_to[1][1] % 2)
self.assertNotEqual(0, written_to[2][1] % 2)
+ @unpatch_policies
+ def test_PUT_no_etag_fallocate(self):
+ with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ obj = 'hemoleucocytic-surfactant'
+ fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+ # one for each obj server; this test has 2
+ self.assertEqual(len(mock_fallocate.mock_calls), 2)
+
+ @unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -897,6 +2179,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -929,6 +2212,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -962,6 +2246,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -995,6 +2280,7 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1028,9 +2314,9 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
+ @unpatch_policies
def test_PUT_message_length_too_large(self):
- swift.proxy.controllers.obj.MAX_FILE_SIZE = 10
- try:
+ with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
@@ -1045,9 +2331,8 @@ class TestObjectController(unittest.TestCase):
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
- finally:
- swift.proxy.controllers.obj.MAX_FILE_SIZE = MAX_FILE_SIZE
+ @unpatch_policies
def test_PUT_last_modified(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -1107,7 +2392,7 @@ class TestObjectController(unittest.TestCase):
if 'x-if-delete-at' in headers or 'X-If-Delete-At' in headers:
test_errors.append('X-If-Delete-At in headers')
- body = simplejson.dumps(
+ body = json.dumps(
[{"name": "001o/1",
"hash": "x",
"bytes": 0,
@@ -1115,11 +2400,12 @@ class TestObjectController(unittest.TestCase):
"last_modified": "1970-01-01T00:00:01.000000"}])
body_iter = ('', '', body, '', '', '', '', '', '', '', '', '', '', '')
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
# HEAD HEAD GET GET HEAD GET GET GET PUT PUT
# PUT DEL DEL DEL
set_http_connect(200, 200, 200, 200, 200, 200, 200, 200, 201, 201,
- 201, 200, 200, 200,
+ 201, 204, 204, 204,
give_connect=test_connect,
body_iter=body_iter,
headers={'x-versions-location': 'foo'})
@@ -1131,10 +2417,133 @@ class TestObjectController(unittest.TestCase):
controller.DELETE(req)
self.assertEquals(test_errors, [])
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_DELETE_on_expired_versioned_object(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ methods = set()
+ authorize_call_count = [0]
+
+ def test_connect(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ methods.add((method, path))
+
+ def fake_container_info(account, container, req):
+ return {'status': 200, 'sync_key': None,
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': 'foo',
+ 'partition': 1, 'bytes': None, 'storage_policy': '1',
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+
+ def fake_list_iter(container, prefix, env):
+ object_list = [{'name': '1'}, {'name': '2'}, {'name': '3'}]
+ for obj in object_list:
+ yield obj
+
+ def fake_authorize(req):
+ authorize_call_count[0] += 1
+ return None # allow the request
+
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ controller.container_info = fake_container_info
+ controller._listing_iter = fake_list_iter
+ set_http_connect(404, 404, 404, # get for the previous version
+ 200, 200, 200, # get for the pre-previous
+ 201, 201, 201, # put move the pre-previous
+ 204, 204, 204, # delete for the pre-previous
+ give_connect=test_connect)
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'DELETE',
+ 'swift.authorize': fake_authorize})
+
+ self.app.memcache.store = {}
+ self.app.update_request(req)
+ controller.DELETE(req)
+ exp_methods = [('GET', '/a/foo/3'),
+ ('GET', '/a/foo/2'),
+ ('PUT', '/a/c/o'),
+ ('DELETE', '/a/foo/2')]
+ self.assertEquals(set(exp_methods), (methods))
+ self.assertEquals(authorize_call_count[0], 2)
+
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_denied_DELETE_of_versioned_object(self):
+ """
+ Verify that a request with read access to a versions container
+ is unable to cause any write operations on the versioned container.
+ """
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ methods = set()
+ authorize_call_count = [0]
+
+ def test_connect(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ methods.add((method, path))
+
+ def fake_container_info(account, container, req):
+ return {'status': 200, 'sync_key': None,
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': 'foo',
+ 'partition': 1, 'bytes': None, 'storage_policy': '1',
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+
+ def fake_list_iter(container, prefix, env):
+ object_list = [{'name': '1'}, {'name': '2'}, {'name': '3'}]
+ for obj in object_list:
+ yield obj
+
+ def fake_authorize(req):
+ # deny write access
+ authorize_call_count[0] += 1
+ return HTTPForbidden(req) # allow the request
+
+ with save_globals():
+ controller = ReplicatedObjectController(self.app, 'a', 'c', 'o')
+ controller.container_info = fake_container_info
+ # patching _listing_iter simulates request being authorized
+ # to list versions container
+ controller._listing_iter = fake_list_iter
+ set_http_connect(give_connect=test_connect)
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'DELETE',
+ 'swift.authorize': fake_authorize})
+
+ self.app.memcache.store = {}
+ self.app.update_request(req)
+ resp = controller.DELETE(req)
+ self.assertEqual(403, resp.status_int)
+ self.assertFalse(methods, methods)
+ self.assertEquals(authorize_call_count[0], 1)
+
def test_PUT_auto_content_type(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
@@ -1170,8 +2579,7 @@ class TestObjectController(unittest.TestCase):
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
- FakeRing(), FakeRing(),
- FakeRing())
+ FakeRing(), FakeRing())
self.assertEquals(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEquals(proxy_server.mimetypes.guess_type('blah.jpg')[0],
@@ -1181,8 +2589,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
@@ -1197,11 +2605,12 @@ class TestObjectController(unittest.TestCase):
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
+ test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
@@ -1209,19 +2618,30 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
- test_status_map((200, 200, 201, 201, -1), 201)
- test_status_map((200, 200, 201, 201, -2), 201) # expect timeout
- test_status_map((200, 200, 201, 201, -3), 201) # error limited
- test_status_map((200, 200, 201, -1, -1), 503)
- test_status_map((200, 200, 503, 503, -1), 503)
+ test_status_map((200, 200, 201, 201, -1), 201) # connect exc
+ # connect errors
+ test_status_map((200, 200, Timeout(), 201, 201, ), 201)
+ test_status_map((200, 200, 201, 201, Exception()), 201)
+ # expect errors
+ test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
+ test_status_map((200, 200, (Exception(), None), 201, 201), 201)
+ # response errors
+ test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
+ test_status_map((200, 200, (100, Exception()), 201, 201), 201)
+ test_status_map((200, 200, 507, 201, 201), 201) # error limited
+ test_status_map((200, 200, -1, 201, -1), 503)
+ test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
@@ -1230,7 +2650,10 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
@@ -1240,10 +2663,10 @@ class TestObjectController(unittest.TestCase):
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
- 'Content-Length': str(MAX_FILE_SIZE + 1),
+ 'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
@@ -1252,8 +2675,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
@@ -1263,8 +2686,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_getresponse_exceptions(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
@@ -1272,7 +2695,10 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
- res = controller.PUT(req)
+ try:
+ res = controller.PUT(req)
+ except HTTPException as res:
+ pass
expected = str(expected)
self.assertEquals(res.status[:len(str(expected))],
str(expected))
@@ -1301,6 +2727,137 @@ class TestObjectController(unittest.TestCase):
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
+ @patch_policies([
+ StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', object_ring=FakeRing()),
+ ])
+ def test_POST_backend_headers(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ self.app.object_post_as_copy = False
+ self.app.sort_nodes = lambda nodes: nodes
+ backend_requests = []
+
+ def capture_requests(ip, port, method, path, headers, *args,
+ **kwargs):
+ backend_requests.append((method, path, headers))
+
+ req = Request.blank('/v1/a/c/o', {}, method='POST',
+ headers={'X-Object-Meta-Color': 'Blue'})
+
+ # we want the container_info response to says a policy index of 1
+ resp_headers = {'X-Backend-Storage-Policy-Index': 1}
+ with mocked_http_conn(
+ 200, 200, 202, 202, 202,
+ headers=resp_headers, give_connect=capture_requests
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(len(backend_requests), 5)
+
+ def check_request(req, method, path, headers=None):
+ req_method, req_path, req_headers = req
+ self.assertEqual(method, req_method)
+ # caller can ignore leading path parts
+ self.assertTrue(req_path.endswith(path),
+ 'expected path to end with %s, it was %s' % (
+ path, req_path))
+ headers = headers or {}
+ # caller can ignore some headers
+ for k, v in headers.items():
+ self.assertEqual(req_headers[k], v)
+ account_request = backend_requests.pop(0)
+ check_request(account_request, method='HEAD', path='/sda/0/a')
+ container_request = backend_requests.pop(0)
+ check_request(container_request, method='HEAD', path='/sda/0/a/c')
+ # make sure backend requests included expected container headers
+ container_headers = {}
+ for request in backend_requests:
+ req_headers = request[2]
+ device = req_headers['x-container-device']
+ host = req_headers['x-container-host']
+ container_headers[device] = host
+ expectations = {
+ 'method': 'POST',
+ 'path': '/0/a/c/o',
+ 'headers': {
+ 'X-Container-Partition': '0',
+ 'Connection': 'close',
+ 'User-Agent': 'proxy-server %s' % os.getpid(),
+ 'Host': 'localhost:80',
+ 'Referer': 'POST http://localhost/v1/a/c/o',
+ 'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': '1'
+ },
+ }
+ check_request(request, **expectations)
+
+ expected = {}
+ for i, device in enumerate(['sda', 'sdb', 'sdc']):
+ expected[device] = '10.0.0.%d:100%d' % (i, i)
+ self.assertEqual(container_headers, expected)
+
+ # and again with policy override
+ self.app.memcache.store = {}
+ backend_requests = []
+ req = Request.blank('/v1/a/c/o', {}, method='POST',
+ headers={'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': 0})
+ with mocked_http_conn(
+ 200, 200, 202, 202, 202,
+ headers=resp_headers, give_connect=capture_requests
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(len(backend_requests), 5)
+ for request in backend_requests[2:]:
+ expectations = {
+ 'method': 'POST',
+ 'path': '/0/a/c/o', # ignore device bit
+ 'headers': {
+ 'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': '0',
+ }
+ }
+ check_request(request, **expectations)
+
+ # and this time with post as copy
+ self.app.object_post_as_copy = True
+ self.app.memcache.store = {}
+ backend_requests = []
+ req = Request.blank('/v1/a/c/o', {}, method='POST',
+ headers={'X-Object-Meta-Color': 'Blue',
+ 'X-Backend-Storage-Policy-Index': 0})
+ with mocked_http_conn(
+ 200, 200, 200, 200, 200, 201, 201, 201,
+ headers=resp_headers, give_connect=capture_requests
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(len(backend_requests), 8)
+ policy0 = {'X-Backend-Storage-Policy-Index': '0'}
+ policy1 = {'X-Backend-Storage-Policy-Index': '1'}
+ expected = [
+ # account info
+ {'method': 'HEAD', 'path': '/0/a'},
+ # container info
+ {'method': 'HEAD', 'path': '/0/a/c'},
+ # x-newests
+ {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
+ {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
+ {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
+ # new writes
+ {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
+ {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
+ {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
+ ]
+ for request, expectations in zip(backend_requests, expected):
+ check_request(request, **expectations)
+
def test_POST_as_copy(self):
with save_globals():
def test_status_map(statuses, expected):
@@ -1333,9 +2890,9 @@ class TestObjectController(unittest.TestCase):
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
- test_status_map((200, 200, 204, 500, 404), 503)
+ test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
- test_status_map((200, 200, 404, 404, 500), 404)
+ test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
@@ -1388,6 +2945,8 @@ class TestObjectController(unittest.TestCase):
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
+ test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
+ None, '1'), '1')
def test_GET_newest(self):
with save_globals():
@@ -1443,10 +3002,10 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_val_len(self):
with save_globals():
- limit = MAX_META_VALUE_LENGTH
+ limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
- proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
@@ -1466,7 +3025,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_as_copy_meta_val_len(self):
with save_globals():
- limit = MAX_META_VALUE_LENGTH
+ limit = constraints.MAX_META_VALUE_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
@@ -1486,7 +3045,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_key_len(self):
with save_globals():
- limit = MAX_META_NAME_LENGTH
+ limit = constraints.MAX_META_NAME_LENGTH
self.app.object_post_as_copy = False
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
@@ -1508,7 +3067,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_as_copy_meta_key_len(self):
with save_globals():
- limit = MAX_META_NAME_LENGTH
+ limit = constraints.MAX_META_NAME_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank(
@@ -1529,7 +3088,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_count(self):
with save_globals():
- limit = MAX_META_COUNT
+ limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in xrange(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
@@ -1542,7 +3101,7 @@ class TestObjectController(unittest.TestCase):
def test_POST_meta_size(self):
with save_globals():
- limit = MAX_META_OVERALL_SIZE
+ limit = constraints.MAX_META_OVERALL_SIZE
count = limit / 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
@@ -1601,15 +3160,16 @@ class TestObjectController(unittest.TestCase):
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
@@ -1635,7 +3195,7 @@ class TestObjectController(unittest.TestCase):
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
- self.app.client_timeout = 0.1
+ self.app.client_timeout = 0.05
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
@@ -1650,15 +3210,16 @@ class TestObjectController(unittest.TestCase):
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
@@ -1684,15 +3245,16 @@ class TestObjectController(unittest.TestCase):
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
@@ -1718,6 +3280,19 @@ class TestObjectController(unittest.TestCase):
def test_node_read_timeout_retry(self):
with save_globals():
+ self.app.account_ring.get_nodes('account')
+ for dev in self.app.account_ring.devs:
+ dev['ip'] = '127.0.0.1'
+ dev['port'] = 1
+ self.app.container_ring.get_nodes('account')
+ for dev in self.app.container_ring.devs:
+ dev['ip'] = '127.0.0.1'
+ dev['port'] = 1
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
+ dev['ip'] = '127.0.0.1'
+ dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
@@ -1775,15 +3350,16 @@ class TestObjectController(unittest.TestCase):
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
- for dev in self.app.container_ring.devs.values():
+ for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
- self.app.object_ring.get_nodes('account')
- for dev in self.app.object_ring.devs.values():
+ object_ring = self.app.get_object_ring(None)
+ object_ring.get_nodes('account')
+ for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
@@ -1806,87 +3382,149 @@ class TestObjectController(unittest.TestCase):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 503)
+ def test_node_request_setting(self):
+ baseapp = proxy_server.Application({'request_node_count': '3'},
+ FakeMemcache(),
+ container_ring=FakeRing(),
+ account_ring=FakeRing())
+ self.assertEquals(baseapp.request_node_count(3), 3)
+
def test_iter_nodes(self):
with save_globals():
try:
- self.app.object_ring.max_more_nodes = 2
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ object_ring = self.app.get_object_ring(None)
+ object_ring.max_more_nodes = 2
+ partition, nodes = object_ring.get_nodes('account',
+ 'container',
+ 'object')
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
+ for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEquals(len(collected_nodes), 5)
- self.app.object_ring.max_more_nodes = 20
+ object_ring.max_more_nodes = 20
self.app.request_node_count = lambda r: 20
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ partition, nodes = object_ring.get_nodes('account',
+ 'container',
+ 'object')
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
+ for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEquals(len(collected_nodes), 9)
+ # zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
- self.app.object_ring.max_more_nodes = 2
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ self.app.request_node_count = lambda r: 7
+ object_ring.max_more_nodes = 20
+ partition, nodes = object_ring.get_nodes('account',
+ 'container',
+ 'object')
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
- partition):
+ for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
- self.assertEquals(len(collected_nodes), 5)
- self.assertEquals(
- self.app.logger.log_dict['warning'],
- [(('Handoff requested (1)',), {}),
- (('Handoff requested (2)',), {})])
+ self.assertEquals(len(collected_nodes), 7)
+ self.assertEquals(self.app.logger.log_dict['warning'], [])
+ self.assertEquals(self.app.logger.get_increments(), [])
- self.app.log_handoffs = False
+ # one error-limited primary node -> one handoff warning
+ self.app.log_handoffs = True
self.app.logger = FakeLogger()
- self.app.object_ring.max_more_nodes = 2
- partition, nodes = self.app.object_ring.get_nodes('account',
- 'container',
- 'object')
+ self.app.request_node_count = lambda r: 7
+ self.app._error_limiting = {} # clear out errors
+ set_node_errors(self.app, object_ring._devs[0], 999,
+ last_error=(2 ** 63 - 1))
+
collected_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring,
- partition):
+ for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
- self.assertEquals(len(collected_nodes), 5)
- self.assertEquals(self.app.logger.log_dict['warning'], [])
+ self.assertEquals(len(collected_nodes), 7)
+ self.assertEquals(self.app.logger.log_dict['warning'], [
+ (('Handoff requested (5)',), {})])
+ self.assertEquals(self.app.logger.get_increments(),
+ ['handoff_count'])
+
+ # two error-limited primary nodes -> two handoff warnings
+ self.app.log_handoffs = True
+ self.app.logger = FakeLogger()
+ self.app.request_node_count = lambda r: 7
+ self.app._error_limiting = {} # clear out errors
+ for i in range(2):
+ set_node_errors(self.app, object_ring._devs[i], 999,
+ last_error=(2 ** 63 - 1))
+
+ collected_nodes = []
+ for node in self.app.iter_nodes(object_ring, partition):
+ collected_nodes.append(node)
+ self.assertEquals(len(collected_nodes), 7)
+ self.assertEquals(self.app.logger.log_dict['warning'], [
+ (('Handoff requested (5)',), {}),
+ (('Handoff requested (6)',), {})])
+ self.assertEquals(self.app.logger.get_increments(),
+ ['handoff_count',
+ 'handoff_count'])
+
+ # all error-limited primary nodes -> four handoff warnings,
+ # plus a handoff-all metric
+ self.app.log_handoffs = True
+ self.app.logger = FakeLogger()
+ self.app.request_node_count = lambda r: 10
+ object_ring.set_replicas(4) # otherwise we run out of handoffs
+ self.app._error_limiting = {} # clear out errors
+ for i in range(4):
+ set_node_errors(self.app, object_ring._devs[i], 999,
+ last_error=(2 ** 63 - 1))
+
+ collected_nodes = []
+ for node in self.app.iter_nodes(object_ring, partition):
+ collected_nodes.append(node)
+ self.assertEquals(len(collected_nodes), 10)
+ self.assertEquals(self.app.logger.log_dict['warning'], [
+ (('Handoff requested (7)',), {}),
+ (('Handoff requested (8)',), {}),
+ (('Handoff requested (9)',), {}),
+ (('Handoff requested (10)',), {})])
+ self.assertEquals(self.app.logger.get_increments(),
+ ['handoff_count',
+ 'handoff_count',
+ 'handoff_count',
+ 'handoff_count',
+ 'handoff_all_count'])
+
finally:
- self.app.object_ring.max_more_nodes = 0
+ object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
with mock.patch.object(self.app, 'sort_nodes') as sort_nodes:
- for node in self.app.iter_nodes(self.app.object_ring, 0):
+ object_ring = self.app.get_object_ring(None)
+ for node in self.app.iter_nodes(object_ring, 0):
pass
sort_nodes.assert_called_once_with(
- self.app.object_ring.get_part_nodes(0))
+ object_ring.get_part_nodes(0))
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes', lambda n: n):
- first_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
- second_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
+ object_ring = self.app.get_object_ring(None)
+ first_nodes = list(self.app.iter_nodes(object_ring, 0))
+ second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] in second_nodes)
self.app.error_limit(first_nodes[0], 'test')
- second_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
+ second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] not in second_nodes)
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
+ object_ring = self.app.get_object_ring(None)
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 6),
- mock.patch.object(self.app.object_ring, 'max_more_nodes', 99)):
- first_nodes = list(self.app.iter_nodes(self.app.object_ring, 0))
+ mock.patch.object(object_ring, 'max_more_nodes', 99)):
+ first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = []
- for node in self.app.iter_nodes(self.app.object_ring, 0):
+ for node in self.app.iter_nodes(object_ring, 0):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
@@ -1894,12 +3532,14 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(len(second_nodes), 7)
def test_iter_nodes_with_custom_node_iter(self):
- node_list = [dict(id=n) for n in xrange(10)]
+ object_ring = self.app.get_object_ring(None)
+ node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
+ for n in xrange(10)]
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 3)):
- got_nodes = list(self.app.iter_nodes(self.app.object_ring, 0,
+ got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list[:3], got_nodes)
@@ -1907,13 +3547,13 @@ class TestObjectController(unittest.TestCase):
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000)):
- got_nodes = list(self.app.iter_nodes(self.app.object_ring, 0,
+ got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
@@ -1922,8 +3562,8 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
@@ -1956,8 +3596,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEquals(resp.status_int, 200)
@@ -1969,21 +3609,26 @@ class TestObjectController(unittest.TestCase):
def test_error_limiting(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
- self.assertEquals(controller.app.object_ring.devs[0]['errors'], 2)
- self.assert_('last_error' in controller.app.object_ring.devs[0])
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]), 2)
+ self.assert_(node_last_error(controller.app, object_ring.devs[0])
+ is not None)
for _junk in xrange(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
- self.assertEquals(controller.app.object_ring.devs[0]['errors'],
- self.app.error_suppression_limit + 1)
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
- self.assert_('last_error' in controller.app.object_ring.devs[0])
+ self.assert_(node_last_error(controller.app, object_ring.devs[0])
+ is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
@@ -1999,17 +3644,78 @@ class TestObjectController(unittest.TestCase):
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
+ def test_error_limiting_survives_ring_reload(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
+ self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
+ 200)
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]), 2)
+ self.assert_(node_last_error(controller.app, object_ring.devs[0])
+ is not None)
+ for _junk in xrange(self.app.error_suppression_limit):
+ self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
+ 503), 503)
+ self.assertEquals(
+ node_error_count(controller.app, object_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
+
+ # wipe out any state in the ring
+ for policy in POLICIES:
+ policy.object_ring = FakeRing(base_port=3000)
+
+ # and we still get an error, which proves that the
+ # error-limiting info survived a ring reload
+ self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
+ 503)
+
+ def test_PUT_error_limiting(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
+ # acc con obj obj obj
+ self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
+ 200)
+
+ # 2, not 1, because assert_status_map() calls the method twice
+ odevs = object_ring.devs
+ self.assertEquals(node_error_count(controller.app, odevs[0]), 2)
+ self.assertEquals(node_error_count(controller.app, odevs[1]), 0)
+ self.assertEquals(node_error_count(controller.app, odevs[2]), 0)
+ self.assert_(node_last_error(controller.app, odevs[0]) is not None)
+ self.assert_(node_last_error(controller.app, odevs[1]) is None)
+ self.assert_(node_last_error(controller.app, odevs[2]) is None)
+
+ def test_PUT_error_limiting_last_node(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ controller.app.sort_nodes = lambda l: l
+ object_ring = controller.app.get_object_ring(None)
+ # acc con obj obj obj
+ self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
+ 200)
+
+ # 2, not 1, because assert_status_map() calls the method twice
+ odevs = object_ring.devs
+ self.assertEquals(node_error_count(controller.app, odevs[0]), 0)
+ self.assertEquals(node_error_count(controller.app, odevs[1]), 0)
+ self.assertEquals(node_error_count(controller.app, odevs[2]), 2)
+ self.assert_(node_last_error(controller.app, odevs[0]) is None)
+ self.assert_(node_last_error(controller.app, odevs[1]) is None)
+ self.assert_(node_last_error(controller.app, odevs[2]) is not None)
+
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- for dev in self.app.account_ring.devs.values():
- del dev['errors']
- del dev['last_error']
- for dev in self.app.container_ring.devs.values():
- del dev['errors']
- del dev['last_error']
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ self.app._error_limiting = {}
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
@@ -2073,9 +3779,10 @@ class TestObjectController(unittest.TestCase):
resp = getattr(controller, 'DELETE')(req)
self.assertEquals(resp.status_int, 404)
- for dev in self.app.account_ring.devs.values():
- dev['errors'] = self.app.error_suppression_limit + 1
- dev['last_error'] = time.time()
+ for dev in self.app.account_ring.devs:
+ set_node_errors(
+ self.app, dev, self.app.error_suppression_limit + 1,
+ time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
@@ -2085,11 +3792,12 @@ class TestObjectController(unittest.TestCase):
resp = getattr(controller, 'DELETE')(req)
self.assertEquals(resp.status_int, 404)
- for dev in self.app.account_ring.devs.values():
- dev['errors'] = 0
- for dev in self.app.container_ring.devs.values():
- dev['errors'] = self.app.error_suppression_limit + 1
- dev['last_error'] = time.time()
+ for dev in self.app.account_ring.devs:
+ set_node_errors(self.app, dev, 0, last_error=None)
+ for dev in self.app.container_ring.devs:
+ set_node_errors(self.app, dev,
+ self.app.error_suppression_limit + 1,
+ time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
@@ -2103,8 +3811,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
@@ -2124,8 +3832,8 @@ class TestObjectController(unittest.TestCase):
def test_PUT_POST_as_copy_requires_container_exist(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
@@ -2142,8 +3850,8 @@ class TestObjectController(unittest.TestCase):
def test_bad_metadata(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2153,18 +3861,21 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
- headers={'Content-Length': '0',
- 'X-Object-Meta-' + ('a' *
- MAX_META_NAME_LENGTH): 'v'})
+ req = Request.blank(
+ '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Object-Meta-' + (
+ 'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
- headers={'Content-Length': '0',
- 'X-Object-Meta-' + ('a' *
- (MAX_META_NAME_LENGTH + 1)): 'v'})
+ req = Request.blank(
+ '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={
+ 'Content-Length': '0',
+ 'X-Object-Meta-' + (
+ 'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 400)
@@ -2173,22 +3884,23 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
- MAX_META_VALUE_LENGTH})
+ constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
- headers={'Content-Length': '0',
- 'X-Object-Meta-Too-Long': 'a' *
- (MAX_META_VALUE_LENGTH + 1)})
+ req = Request.blank(
+ '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Object-Meta-Too-Long': 'a' *
+ (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
@@ -2197,7 +3909,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
@@ -2207,17 +3919,17 @@ class TestObjectController(unittest.TestCase):
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
- header_value = 'a' * MAX_META_VALUE_LENGTH
+ header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - \
- MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
+ constraints.MAX_META_VALUE_LENGTH:
+ size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
@@ -2225,7 +3937,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
@@ -2235,8 +3947,8 @@ class TestObjectController(unittest.TestCase):
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
- controller = proxy_server.ObjectController(self.app, account,
- container, obj)
+ controller = ReplicatedObjectController(
+ self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
@@ -2263,6 +3975,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_basic_put_with_x_copy_from_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_basic_put_with_x_copy_from_across_container(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2274,6 +3999,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c2/o')
+ def test_basic_put_with_x_copy_from_across_container_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c2/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c2/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
@@ -2284,6 +4022,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 400)
+ def test_copy_non_zero_content_length_with_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '5',
+ 'X-Copy-From': 'c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200)
+ # acct cont
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 400)
+
def test_copy_with_slashes_in_x_copy_from(self):
# extra source path parsing
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2296,6 +4045,20 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_copy_with_slashes_in_x_copy_from_and_account(self):
+ # extra source path parsing
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c/o/o2',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_spaces_in_x_copy_from(self):
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2308,6 +4071,20 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o%20o2')
+ def test_copy_with_spaces_in_x_copy_from_and_account(self):
+ # space in soure path
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': 'c/o%20o2',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o%20o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_leading_slash_in_x_copy_from(self):
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
@@ -2320,6 +4097,20 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
+ # repeat tests with leading /
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2331,6 +4122,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o/o2',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acc1 con1 objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2346,6 +4150,22 @@ class TestObjectController(unittest.TestCase):
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
+ def test_copy_with_no_object_in_x_copy_from_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200)
+ # acct cont
+ with self.controller_context(req, *status_list) as controller:
+ try:
+ controller.PUT(req)
+ except HTTPException as resp:
+ self.assertEquals(resp.status_int // 100, 4) # client error
+ else:
+ raise self.fail('Invalid X-Copy-From did not raise '
+ 'client error')
+
def test_copy_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2356,6 +4176,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 503)
+ def test_copy_server_error_reading_source_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 503, 503, 503)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 503)
+
def test_copy_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2367,6 +4198,18 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 404)
+ def test_copy_not_found_reading_source_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ # not found
+ status_list = (200, 200, 200, 200, 404, 404, 404)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 404)
+
def test_copy_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2377,6 +4220,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
+ def test_copy_with_some_missing_sources_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Copy-From-Account': 'a'})
+ status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+
def test_copy_with_object_metadata(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
@@ -2392,16 +4246,33 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+ def test_copy_with_object_metadata_and_account(self):
+ req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0',
+ 'X-Copy-From': '/c/o',
+ 'X-Object-Meta-Ours': 'okay',
+ 'X-Copy-From-Account': 'a'})
+ # test object metadata
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.PUT(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers.get('x-object-meta-test'), 'testing')
+ self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
+ self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+
+ @_limit_max_file_size
def test_copy_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
-
# copy-from object is too large to fit in target object
+
class LargeResponseBody(object):
def __len__(self):
- return MAX_FILE_SIZE + 1
+ return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
@@ -2415,7 +4286,10 @@ class TestObjectController(unittest.TestCase):
self.app.update_request(req)
self.app.memcache.store = {}
- resp = controller.PUT(req)
+ try:
+ resp = controller.PUT(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
def test_basic_COPY(self):
@@ -2429,6 +4303,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_basic_COPY_account(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': 'c1/o2',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_across_containers(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2451,6 +4338,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_COPY_account_source_with_slashes_in_name(self):
+ req = Request.blank('/v1/a/c/o/o2',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': 'c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2462,6 +4362,19 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ def test_COPY_account_destination_leading_slash(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2473,14 +4386,35 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ def test_COPY_account_source_with_slashes_destination_leading_slash(self):
+ req = Request.blank('/v1/a/c/o/o2',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
+ self.assertEquals(resp.headers['x-copied-from-account'], 'a')
+
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
- resp = controller.COPY(req)
- self.assertEquals(resp.status_int, 412)
+ self.assertRaises(HTTPException, controller.COPY, req)
+
+ def test_COPY_account_no_object_in_destination(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': 'c_o',
+ 'Destination-Account': 'a1'})
+ status_list = [] # no requests needed
+ with self.controller_context(req, *status_list) as controller:
+ self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
@@ -2492,6 +4426,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 503)
+ def test_COPY_account_server_error_reading_source(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 503, 503, 503)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 503)
+
def test_COPY_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2502,6 +4447,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 404)
+ def test_COPY_account_not_found_reading_source(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 404, 404, 404)
+ # acct cont acct cont objc objc objc
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 404)
+
def test_COPY_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2512,6 +4468,17 @@ class TestObjectController(unittest.TestCase):
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
+ def test_COPY_account_with_some_missing_sources(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+
def test_COPY_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2527,6 +4494,23 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+ def test_COPY_account_with_metadata(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'X-Object-Meta-Ours': 'okay',
+ 'Destination-Account': 'a1'})
+ status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
+ # acct cont acct cont objc objc objc obj obj obj
+ with self.controller_context(req, *status_list) as controller:
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers.get('x-object-meta-test'),
+ 'testing')
+ self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
+ self.assertEquals(resp.headers.get('x-delete-at'), '9876543210')
+
+ @_limit_max_file_size
def test_COPY_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
@@ -2535,7 +4519,7 @@ class TestObjectController(unittest.TestCase):
class LargeResponseBody(object):
def __len__(self):
- return MAX_FILE_SIZE + 1
+ return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
@@ -2546,12 +4530,43 @@ class TestObjectController(unittest.TestCase):
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
- resp = controller.COPY(req)
+ try:
+ resp = controller.COPY(req)
+ except HTTPException as resp:
+ pass
+ self.assertEquals(resp.status_int, 413)
+
+ @_limit_max_file_size
+ def test_COPY_account_source_larger_than_max_file_size(self):
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+
+ class LargeResponseBody(object):
+
+ def __len__(self):
+ return constraints.MAX_FILE_SIZE + 1
+
+ def __getitem__(self, key):
+ return ''
+
+ copy_from_obj_body = LargeResponseBody()
+ status_list = (200, 200, 200, 200, 200)
+ # acct cont objc objc objc
+ kwargs = dict(body=copy_from_obj_body)
+ with self.controller_context(req, *status_list,
+ **kwargs) as controller:
+ try:
+ resp = controller.COPY(req)
+ except HTTPException as resp:
+ pass
self.assertEquals(resp.status_int, 413)
def test_COPY_newest(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
@@ -2567,30 +4582,86 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['x-copied-from-last-modified'],
'3')
+ def test_COPY_account_newest(self):
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+ req.account = 'a'
+ controller.object_name = 'o'
+ set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
+ #act cont acct cont objc objc objc obj obj obj
+ timestamps=('1', '1', '1', '1', '3', '2', '1',
+ '4', '4', '4'))
+ self.app.memcache.store = {}
+ resp = controller.COPY(req)
+ self.assertEquals(resp.status_int, 201)
+ self.assertEquals(resp.headers['x-copied-from-last-modified'],
+ '3')
+
def test_COPY_delete_at(self):
with save_globals():
- given_headers = {}
+ backend_requests = []
- def fake_connect_put_node(nodes, part, path, headers,
- logger_thread_locals):
- given_headers.update(headers)
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ backend_requests.append((method, path, headers))
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o')
- controller._connect_put_node = fake_connect_put_node
- set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
+ give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
- controller.COPY(req)
- self.assertEquals(given_headers.get('X-Delete-At'), '9876543210')
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
+ resp = controller.COPY(req)
+ self.assertEqual(201, resp.status_int) # sanity
+ for method, path, given_headers in backend_requests:
+ if method != 'PUT':
+ continue
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ '9876543210')
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
+
+ def test_COPY_account_delete_at(self):
+ with save_globals():
+ backend_requests = []
+
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ backend_requests.append((method, path, headers))
+
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
+ set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
+ give_connect=capture_requests)
+ self.app.memcache.store = {}
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'COPY'},
+ headers={'Destination': '/c1/o',
+ 'Destination-Account': 'a1'})
+
+ self.app.update_request(req)
+ resp = controller.COPY(req)
+ self.assertEqual(201, resp.status_int) # sanity
+ for method, path, given_headers in backend_requests:
+ if method != 'PUT':
+ continue
+ self.assertEquals(given_headers.get('X-Delete-At'),
+ '9876543210')
+ self.assertTrue('X-Delete-At-Host' in given_headers)
+ self.assertTrue('X-Delete-At-Device' in given_headers)
+ self.assertTrue('X-Delete-At-Partition' in given_headers)
+ self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):
@@ -2615,8 +4686,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
set_http_connect(201, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
@@ -2637,17 +4708,16 @@ class TestObjectController(unittest.TestCase):
req.body_file = ChunkedFile(11)
self.app.memcache.store = {}
self.app.update_request(req)
- try:
- swift.proxy.controllers.obj.MAX_FILE_SIZE = 10
+
+ with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
res = controller.PUT(req)
self.assertEquals(res.status_int, 413)
- finally:
- swift.proxy.controllers.obj.MAX_FILE_SIZE = MAX_FILE_SIZE
+ @unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
@@ -2657,10 +4727,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
@@ -2670,10 +4741,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 404'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
@@ -2684,10 +4756,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
@@ -2698,10 +4771,11 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
@@ -2712,12 +4786,13 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 405'
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
- obj2srv) = _test_servers
+ obj2srv, obj3srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
@@ -2735,12 +4810,13 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
+ @unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
@@ -2752,6 +4828,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(headers[:len(exp)], exp)
self.assert_('\r\nContent-Length: 0\r\n' in headers)
+ @unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
@@ -2763,7 +4840,7 @@ class TestObjectController(unittest.TestCase):
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
@@ -2795,7 +4872,7 @@ class TestObjectController(unittest.TestCase):
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEquals(headers[:len(exp)], exp)
- listing = simplejson.loads(fd.read())
+ listing = json.loads(fd.read())
self.assert_(ustr.decode('utf8') in [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -2843,7 +4920,7 @@ class TestObjectController(unittest.TestCase):
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEquals(headers[:len(exp)], exp)
- listing = simplejson.loads(fd.read())
+ listing = json.loads(fd.read())
self.assertEquals(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
@@ -2871,10 +4948,11 @@ class TestObjectController(unittest.TestCase):
self.assert_('\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower(), quote(ustr)) in headers)
+ @unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
@@ -2900,11 +4978,12 @@ class TestObjectController(unittest.TestCase):
body = fd.read()
self.assertEquals(body, 'oh hai123456789abcdef')
+ @unpatch_policies
def test_version_manifest(self, oc='versions', vc='vers', o='name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
- obj2lis) = _test_sockets
+ obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
pre = quote('%03x' % len(o))
@@ -3108,18 +5187,22 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 404'
self.assertEquals(headers[:len(exp)], exp)
- # make sure manifest files don't get versioned
- sock = connect_tcp(('localhost', prolis.getsockname()[1]))
- fd = sock.makefile()
- fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
- 'localhost\r\nConnection: close\r\nX-Storage-Token: '
- 't\r\nContent-Length: 0\r\nContent-Type: text/jibberish0\r\n'
- 'Foo: barbaz\r\nX-Object-Manifest: %s/foo_\r\n\r\n'
- % (oc, vc, o))
- fd.flush()
- headers = readuntil2crlfs(fd)
- exp = 'HTTP/1.1 201'
- self.assertEquals(headers[:len(exp)], exp)
+ # make sure dlo manifest files don't get versioned
+ for _junk in xrange(1, versions_to_create):
+ sleep(.01) # guarantee that the timestamp changes
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
+ 'localhost\r\nConnection: close\r\nX-Storage-Token: '
+ 't\r\nContent-Length: 0\r\n'
+ 'Content-Type: text/jibberish0\r\n'
+ 'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
+ % (oc, o, oc, o))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ self.assertEquals(headers[:len(exp)], exp)
+
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
@@ -3233,51 +5316,59 @@ class TestObjectController(unittest.TestCase):
exp = 'HTTP/1.1 2' # 2xx response
self.assertEquals(headers[:len(exp)], exp)
+ @unpatch_policies
def test_version_manifest_utf8(self):
oc = '0_oc_non_ascii\xc2\xa3'
vc = '0_vc_non_ascii\xc2\xa3'
o = '0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_container(self):
oc = '1_oc_non_ascii\xc2\xa3'
vc = '1_vc_ascii'
o = '1_o_ascii'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_version_container(self):
oc = '2_oc_ascii'
vc = '2_vc_non_ascii\xc2\xa3'
o = '2_o_ascii'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_containers(self):
oc = '3_oc_non_ascii\xc2\xa3'
vc = '3_vc_non_ascii\xc2\xa3'
o = '3_o_ascii'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_object(self):
oc = '4_oc_ascii'
vc = '4_vc_ascii'
o = '4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_version_container_utf_object(self):
oc = '5_oc_ascii'
vc = '5_vc_non_ascii\xc2\xa3'
o = '5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_version_manifest_utf8_container_utf_object(self):
oc = '6_oc_non_ascii\xc2\xa3'
vc = '6_vc_ascii'
o = '6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
+ @unpatch_policies
def test_conditional_range_get(self):
- (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis) = \
- _test_sockets
+ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
+ obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
@@ -3325,8 +5416,8 @@ class TestObjectController(unittest.TestCase):
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
@@ -3357,8 +5448,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assert_('accept-ranges' in resp.headers)
@@ -3369,8 +5460,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assert_('accept-ranges' in resp.headers)
@@ -3384,8 +5475,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
@@ -3400,8 +5491,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
@@ -3417,8 +5508,8 @@ class TestObjectController(unittest.TestCase):
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
@@ -3435,8 +5526,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
@@ -3453,8 +5544,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
@@ -3470,8 +5561,8 @@ class TestObjectController(unittest.TestCase):
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
@@ -3482,9 +5573,10 @@ class TestObjectController(unittest.TestCase):
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
+ self.app.object_post_as_copy = False
+ controller = ReplicatedObjectController(
+ self.app, 'account', 'container', 'object')
+ set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
try:
@@ -3498,194 +5590,133 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(res.status, '202 Fake')
self.assertEquals(req.headers.get('x-delete-at'),
str(int(t + 60)))
-
- self.app.object_post_as_copy = False
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container',
- 'object')
- set_http_connect(200, 200, 202, 202, 202)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '60'})
- self.app.update_request(req)
- res = controller.POST(req)
- self.assertEquals(res.status, '202 Fake')
- self.assertEquals(req.headers.get('x-delete-at'),
- str(int(t + 60)))
finally:
time.time = orig_time
- def test_POST_non_int_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '60.1'})
- self.app.update_request(req)
- res = controller.POST(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('Non-integer X-Delete-After' in res.body)
-
- def test_POST_negative_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-After': '-60'})
- self.app.update_request(req)
- res = controller.POST(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('X-Delete-At in past' in res.body)
-
- def test_POST_delete_at(self):
- with save_globals():
- given_headers = {}
-
- def fake_make_requests(req, ring, part, method, path, headers,
- query_string=''):
- given_headers.update(headers[0])
-
- self.app.object_post_as_copy = False
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- controller.make_requests = fake_make_requests
- set_http_connect(200, 200)
- self.app.memcache.store = {}
- t = str(int(time.time() + 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- controller.POST(req)
- self.assertEquals(given_headers.get('X-Delete-At'), t)
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
-
- t = str(int(time.time() + 100)) + '.1'
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- resp = controller.POST(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('Non-integer X-Delete-At' in resp.body)
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_PUT_versioning_with_nonzero_default_policy(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
- t = str(int(time.time() - 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
+ def test_connect(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ if method == "HEAD":
+ self.assertEquals(path, '/a/c/o.jpg')
+ self.assertNotEquals(None,
+ headers['X-Backend-Storage-Policy-Index'])
+ self.assertEquals(1, int(headers
+ ['X-Backend-Storage-Policy-Index']))
+
+ def fake_container_info(account, container, req):
+ return {'status': 200, 'sync_key': None, 'storage_policy': '1',
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': 'c-versions',
+ 'partition': 1, 'bytes': None,
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+ with save_globals():
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
+
+ controller.container_info = fake_container_info
+ set_http_connect(200, 200, 200, # head: for the last version
+ 200, 200, 200, # get: for the last version
+ 201, 201, 201, # put: move the current version
+ 201, 201, 201, # put: save the new version
+ give_connect=test_connect)
+ req = Request.blank('/v1/a/c/o.jpg',
+ environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Content-Length': '0'})
self.app.update_request(req)
- resp = controller.POST(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('X-Delete-At in past' in resp.body)
-
- def test_PUT_converts_delete_after_to_delete_at(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 201, 201, 201)
- self.app.memcache.store = {}
- orig_time = time.time
- try:
- t = time.time()
- time.time = lambda: t
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-After': '60'})
- self.app.update_request(req)
- res = controller.PUT(req)
- self.assertEquals(res.status, '201 Fake')
- self.assertEquals(req.headers.get('x-delete-at'),
- str(int(t + 60)))
- finally:
- time.time = orig_time
-
- def test_PUT_non_int_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 201, 201, 201)
self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-After': '60.1'})
- self.app.update_request(req)
res = controller.PUT(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('Non-integer X-Delete-After' in res.body)
-
- def test_PUT_negative_delete_after(self):
- with save_globals():
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- set_http_connect(200, 200, 201, 201, 201)
- self.app.memcache.store = {}
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-After': '-60'})
+ self.assertEquals(201, res.status_int)
+
+ @patch_policies([
+ StoragePolicy(0, 'zero', False, object_ring=FakeRing()),
+ StoragePolicy(1, 'one', True, object_ring=FakeRing())
+ ])
+ def test_cross_policy_DELETE_versioning(self):
+ # reset the router post patch_policies
+ self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
+ requests = []
+
+ def capture_requests(ipaddr, port, device, partition, method, path,
+ headers=None, query_string=None):
+ requests.append((method, path, headers))
+
+ def fake_container_info(app, env, account, container, **kwargs):
+ info = {'status': 200, 'sync_key': None, 'storage_policy': None,
+ 'meta': {}, 'cors': {'allow_origin': None,
+ 'expose_headers': None,
+ 'max_age': None},
+ 'sysmeta': {}, 'read_acl': None, 'object_count': None,
+ 'write_acl': None, 'versions': None,
+ 'partition': 1, 'bytes': None,
+ 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0,
+ 'id': 0, 'device': 'sda', 'port': 1000},
+ {'zone': 1, 'ip': '10.0.0.1', 'region': 1,
+ 'id': 1, 'device': 'sdb', 'port': 1001},
+ {'zone': 2, 'ip': '10.0.0.2', 'region': 0,
+ 'id': 2, 'device': 'sdc', 'port': 1002}]}
+ if container == 'c':
+ info['storage_policy'] = '1'
+ info['versions'] = 'c-versions'
+ elif container == 'c-versions':
+ info['storage_policy'] = '0'
+ else:
+ self.fail('Unexpected call to get_info for %r' % container)
+ return info
+ container_listing = json.dumps([{'name': 'old_version'}])
+ with save_globals():
+ resp_status = (
+ 200, 200, # listings for versions container
+ 200, 200, 200, # get: for the last version
+ 201, 201, 201, # put: move the last version
+ 200, 200, 200, # delete: for the last version
+ )
+ body_iter = iter([container_listing] + [
+ '' for x in range(len(resp_status) - 1)])
+ set_http_connect(*resp_status, body_iter=body_iter,
+ give_connect=capture_requests)
+ req = Request.blank('/v1/a/c/current_version', method='DELETE')
self.app.update_request(req)
- res = controller.PUT(req)
- self.assertEquals(res.status, '400 Bad Request')
- self.assertTrue('X-Delete-At in past' in res.body)
-
- def test_PUT_delete_at(self):
- with save_globals():
- given_headers = {}
-
- def fake_connect_put_node(nodes, part, path, headers,
- logger_thread_locals):
- given_headers.update(headers)
-
- controller = proxy_server.ObjectController(self.app, 'account',
- 'container', 'object')
- controller._connect_put_node = fake_connect_put_node
- set_http_connect(200, 200)
self.app.memcache.store = {}
- t = str(int(time.time() + 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- controller.PUT(req)
- self.assertEquals(given_headers.get('X-Delete-At'), t)
- self.assertTrue('X-Delete-At-Host' in given_headers)
- self.assertTrue('X-Delete-At-Device' in given_headers)
- self.assertTrue('X-Delete-At-Partition' in given_headers)
- self.assertTrue('X-Delete-At-Container' in given_headers)
-
- t = str(int(time.time() + 100)) + '.1'
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- resp = controller.PUT(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('Non-integer X-Delete-At' in resp.body)
-
- t = str(int(time.time() - 100))
- req = Request.blank('/v1/a/c/o', {},
- headers={'Content-Length': '0',
- 'Content-Type': 'foo/bar',
- 'X-Delete-At': t})
- self.app.update_request(req)
- resp = controller.PUT(req)
- self.assertEquals(resp.status_int, 400)
- self.assertTrue('X-Delete-At in past' in resp.body)
-
+ with mock.patch('swift.proxy.controllers.base.get_info',
+ fake_container_info):
+ resp = self.app.handle_request(req)
+ self.assertEquals(200, resp.status_int)
+ expected = [('GET', '/a/c-versions')] * 2 + \
+ [('GET', '/a/c-versions/old_version')] * 3 + \
+ [('PUT', '/a/c/current_version')] * 3 + \
+ [('DELETE', '/a/c-versions/old_version')] * 3
+ self.assertEqual(expected, [(m, p) for m, p, h in requests])
+ for method, path, headers in requests:
+ if 'current_version' in path:
+ expected_storage_policy = 1
+ elif 'old_version' in path:
+ expected_storage_policy = 0
+ else:
+ continue
+ storage_policy_index = \
+ int(headers['X-Backend-Storage-Policy-Index'])
+ self.assertEqual(
+ expected_storage_policy, storage_policy_index,
+ 'Unexpected %s request for %s '
+ 'with storage policy index %s' % (
+ method, path, storage_policy_index))
+
+ @unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
@@ -3747,8 +5778,8 @@ class TestObjectController(unittest.TestCase):
def test_OPTIONS(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a',
- 'c', 'o.jpg')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
@@ -3855,7 +5886,8 @@ class TestObjectController(unittest.TestCase):
def test_CORS_valid(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
@@ -3908,7 +5940,8 @@ class TestObjectController(unittest.TestCase):
def test_CORS_valid_with_obj_headers(self):
with save_globals():
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
@@ -3969,20 +6002,21 @@ class TestObjectController(unittest.TestCase):
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
@@ -3990,7 +6024,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -3998,10 +6033,10 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': None,
'X-Container-Partition': None,
@@ -4012,7 +6047,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
@@ -4020,13 +6056,13 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
@@ -4036,7 +6072,8 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
@@ -4044,13 +6081,13 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
@@ -4059,20 +6096,21 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
- 'X-Container-Partition': '1',
+ 'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@@ -4081,15 +6119,15 @@ class TestObjectController(unittest.TestCase):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
- delete_at_container = str(
- delete_at_timestamp /
- self.app.expiring_objects_container_divisor *
- self.app.expiring_objects_container_divisor)
+ delete_at_container = utils.get_expirer_container(
+ delete_at_timestamp, self.app.expiring_objects_container_divisor,
+ 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
@@ -4099,11 +6137,11 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Container': None,
@@ -4118,15 +6156,15 @@ class TestObjectController(unittest.TestCase):
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
- delete_at_container = str(
- delete_at_timestamp /
- self.app.expiring_objects_container_divisor *
- self.app.expiring_objects_container_divisor)
+ delete_at_container = utils.get_expirer_container(
+ delete_at_timestamp, self.app.expiring_objects_container_divisor,
+ 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
- controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
+ controller = ReplicatedObjectController(
+ self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
@@ -4135,38 +6173,483 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
- 'X-Delete-At-Partition': '1',
+ 'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
+class TestECMismatchedFA(unittest.TestCase):
+ def tearDown(self):
+ prosrv = _test_servers[0]
+ # don't leak error limits and poison other tests
+ prosrv._error_limiting = {}
+
+ def test_mixing_different_objects_fragment_archives(self):
+ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
+ obj2srv, obj3srv) = _test_servers
+ ec_policy = POLICIES[3]
+
+ @public
+ def bad_disk(req):
+ return Response(status=507, body="borken")
+
+ ensure_container = Request.blank(
+ "/v1/a/ec-crazytown",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
+ resp = ensure_container.get_response(prosrv)
+ self.assertTrue(resp.status_int in (201, 202))
+
+ obj1 = "first version..."
+ put_req1 = Request.blank(
+ "/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Auth-Token": "t"})
+ put_req1.body = obj1
+
+ obj2 = u"versión segundo".encode("utf-8")
+ put_req2 = Request.blank(
+ "/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "PUT"},
+ headers={"X-Auth-Token": "t"})
+ put_req2.body = obj2
+
+ # pyeclib has checks for unequal-length; we don't want to trip those
+ self.assertEqual(len(obj1), len(obj2))
+
+ # Servers obj1 and obj2 will have the first version of the object
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj3srv, 'PUT', bad_disk),
+ mock.patch(
+ 'swift.common.storage_policy.ECStoragePolicy.quorum')):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=2)
+ resp = put_req1.get_response(prosrv)
+ self.assertEqual(resp.status_int, 201)
+
+ # Server obj3 (and, in real life, some handoffs) will have the
+ # second version of the object.
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj1srv, 'PUT', bad_disk),
+ mock.patch.object(obj2srv, 'PUT', bad_disk),
+ mock.patch(
+ 'swift.common.storage_policy.ECStoragePolicy.quorum'),
+ mock.patch(
+ 'swift.proxy.controllers.base.Controller._quorum_size',
+ lambda *a, **kw: 1)):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=1)
+ resp = put_req2.get_response(prosrv)
+ self.assertEqual(resp.status_int, 201)
+
+ # A GET that only sees 1 fragment archive should fail
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with nested(
+ mock.patch.object(obj1srv, 'GET', bad_disk),
+ mock.patch.object(obj2srv, 'GET', bad_disk)):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 503)
+
+ # A GET that sees 2 matching FAs will work
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with mock.patch.object(obj3srv, 'GET', bad_disk):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.body, obj1)
+
+ # A GET that sees 2 mismatching FAs will fail
+ get_req = Request.blank("/v1/a/ec-crazytown/obj",
+ environ={"REQUEST_METHOD": "GET"},
+ headers={"X-Auth-Token": "t"})
+ prosrv._error_limiting = {}
+ with mock.patch.object(obj2srv, 'GET', bad_disk):
+ resp = get_req.get_response(prosrv)
+ self.assertEqual(resp.status_int, 503)
+
+
+class TestObjectECRangedGET(unittest.TestCase):
+ def setUp(self):
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ logger=debug_logger('proxy-ut'),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+
+ @classmethod
+ def setUpClass(cls):
+ cls.obj_name = 'range-get-test'
+ cls.tiny_obj_name = 'range-get-test-tiny'
+ cls.aligned_obj_name = 'range-get-test-aligned'
+
+ # Note: only works if called with unpatched policies
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: 0\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Storage-Policy: ec\r\n'
+ '\r\n')
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 2'
+ assert headers[:len(exp)] == exp, "container PUT failed"
+
+ seg_size = POLICIES.get_by_name("ec").ec_segment_size
+ cls.seg_size = seg_size
+ # EC segment size is 4 KiB, hence this gives 4 segments, which we
+ # then verify with a quick sanity check
+ cls.obj = ' my hovercraft is full of eels '.join(
+ str(s) for s in range(431))
+ assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
+ "object is wrong number of segments"
+
+ cls.tiny_obj = 'tiny, tiny object'
+ assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
+
+ cls.aligned_obj = "".join(
+ "abcdEFGHijkl%04d" % x for x in range(512))
+ assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
+
+ for obj_name, obj in ((cls.obj_name, cls.obj),
+ (cls.tiny_obj_name, cls.tiny_obj),
+ (cls.aligned_obj_name, cls.aligned_obj)):
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n%s' % (obj_name, len(obj), obj))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = 'HTTP/1.1 201'
+ assert headers[:len(exp)] == exp, \
+ "object PUT failed %s" % obj_name
+
+ def _get_obj(self, range_value, obj_name=None):
+ if obj_name is None:
+ obj_name = self.obj_name
+
+ prolis = _test_sockets[0]
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile()
+ fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Connection: close\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'Range: %s\r\n'
+ '\r\n' % (obj_name, range_value))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ # e.g. "HTTP/1.1 206 Partial Content\r\n..."
+ status_code = int(headers[9:12])
+ headers = parse_headers_string(headers)
+
+ gotten_obj = ''
+ while True:
+ buf = fd.read(64)
+ if not buf:
+ break
+ gotten_obj += buf
+
+ return (status_code, headers, gotten_obj)
+
+ def test_unaligned(self):
+ # One segment's worth of data, but straddling two segment boundaries
+ # (so it has data from three segments)
+ status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.obj[3783:7879])
+
+ def test_aligned_left(self):
+ # First byte is aligned to a segment boundary, last byte is not
+ status, headers, gotten_obj = self._get_obj("bytes=0-5500")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "5501")
+ self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
+ self.assertEqual(len(gotten_obj), 5501)
+ self.assertEqual(gotten_obj, self.obj[:5501])
+
+ def test_aligned_range(self):
+ # Ranged GET that wants exactly one segment
+ status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.obj[4096:8192])
+
+ def test_aligned_range_end(self):
+ # Ranged GET that wants exactly the last segment
+ status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "2225")
+ self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
+ self.assertEqual(len(gotten_obj), 2225)
+ self.assertEqual(gotten_obj, self.obj[12288:])
+
+ def test_aligned_range_aligned_obj(self):
+ # Ranged GET that wants exactly the last segment, which is full-size
+ status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
+ self.aligned_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "4096")
+ self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
+ self.assertEqual(len(gotten_obj), 4096)
+ self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
+
+ def test_byte_0(self):
+ # Just the first byte, but it's index 0, so that's easy to get wrong
+ status, headers, gotten_obj = self._get_obj("bytes=0-0")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], "1")
+ self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
+ self.assertEqual(gotten_obj, self.obj[0])
+
+ def test_unsatisfiable(self):
+ # Goes just one byte too far off the end of the object, so it's
+ # unsatisfiable
+ status, _junk, _junk = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj), len(self.obj) + 100))
+ self.assertEqual(status, 416)
+
+ def test_off_end(self):
+ # Ranged GET that's mostly off the end of the object, but overlaps
+ # it in just the last byte
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '1')
+ self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[-1])
+
+ def test_aligned_off_end(self):
+ # Ranged GET that starts on a segment boundary but asks for a whole lot
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (8192, len(self.obj) + 100))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '6321')
+ self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[8192:])
+
+ def test_way_off_end(self):
+ # Ranged GET that's mostly off the end of the object, but overlaps
+ # it in just the last byte, and wants multiple segments' worth off
+ # the end
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '1')
+ self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[-1])
+
+ def test_boundaries(self):
+ # Wants the last byte of segment 1 + the first byte of segment 2
+ status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '2')
+ self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
+ self.assertEqual(gotten_obj, self.obj[4095:4097])
+
+ def test_until_end(self):
+ # Wants the last byte of segment 1 + the rest
+ status, headers, gotten_obj = self._get_obj("bytes=4095-")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '10418')
+ self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
+ self.assertEqual(gotten_obj, self.obj[4095:])
+
+ def test_small_suffix(self):
+ # Small range-suffix GET: the last 100 bytes (less than one segment)
+ status, headers, gotten_obj = self._get_obj("bytes=-100")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '100')
+ self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
+ self.assertEqual(len(gotten_obj), 100)
+ self.assertEqual(gotten_obj, self.obj[-100:])
+
+ def test_small_suffix_aligned(self):
+ # Small range-suffix GET: the last 100 bytes, last segment is
+ # full-size
+ status, headers, gotten_obj = self._get_obj("bytes=-100",
+ self.aligned_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '100')
+ self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
+ self.assertEqual(len(gotten_obj), 100)
+
+ def test_suffix_two_segs(self):
+ # Ask for enough data that we need the last two segments. The last
+ # segment is short, though, so this ensures we compensate for that.
+ #
+ # Note that the total range size is less than one full-size segment.
+ suffix_len = len(self.obj) % self.seg_size + 1
+
+ status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], str(suffix_len))
+ self.assertEqual(headers['Content-Range'],
+ 'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
+ len(self.obj) - 1,
+ len(self.obj)))
+ self.assertEqual(len(gotten_obj), suffix_len)
+
+ def test_large_suffix(self):
+ # Large range-suffix GET: the last 5000 bytes (more than one segment)
+ status, headers, gotten_obj = self._get_obj("bytes=-5000")
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '5000')
+ self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
+ self.assertEqual(len(gotten_obj), 5000)
+ self.assertEqual(gotten_obj, self.obj[-5000:])
+
+ def test_overlarge_suffix(self):
+ # The last N+1 bytes of an N-byte object
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-%d" % (len(self.obj) + 1))
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '14513')
+ self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
+ self.assertEqual(len(gotten_obj), len(self.obj))
+ self.assertEqual(gotten_obj, self.obj)
+
+ def test_small_suffix_tiny_object(self):
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-5", self.tiny_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '5')
+ self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
+ self.assertEqual(gotten_obj, self.tiny_obj[12:])
+
+ def test_overlarge_suffix_tiny_object(self):
+ status, headers, gotten_obj = self._get_obj(
+ "bytes=-1234567890", self.tiny_obj_name)
+ self.assertEqual(status, 206)
+ self.assertEqual(headers['Content-Length'], '17')
+ self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
+ self.assertEqual(len(gotten_obj), len(self.tiny_obj))
+ self.assertEqual(gotten_obj, self.tiny_obj)
+
+
+@patch_policies([
+ StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
+ StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
+ StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
+])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
- self.app = proxy_server.Application(None, FakeMemcache(),
- account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing(),
- logger=FakeLogger())
+ self.app = proxy_server.Application(
+ None, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing(base_port=2000),
+ logger=debug_logger())
+
+ def test_convert_policy_to_index(self):
+ controller = swift.proxy.controllers.ContainerController(self.app,
+ 'a', 'c')
+ expected = {
+ 'zero': 0,
+ 'ZeRo': 0,
+ 'one': 1,
+ 'OnE': 1,
+ }
+ for name, index in expected.items():
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain',
+ 'X-Storage-Policy': name})
+ self.assertEqual(controller._convert_policy_to_index(req), index)
+ # default test
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain'})
+ self.assertEqual(controller._convert_policy_to_index(req), None)
+ # negative test
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain',
+ 'X-Storage-Policy': 'nada'})
+ self.assertRaises(HTTPException, controller._convert_policy_to_index,
+ req)
+ # storage policy two is deprecated
+ req = Request.blank('/a/c', headers={'Content-Length': '0',
+ 'Content-Type': 'text/plain',
+ 'X-Storage-Policy': 'two'})
+ self.assertRaises(HTTPException, controller._convert_policy_to_index,
+ req)
+
+ def test_convert_index_to_name(self):
+ policy = random.choice(list(POLICIES))
+ req = Request.blank('/v1/a/c')
+ with mocked_http_conn(
+ 200, 200,
+ headers={'X-Backend-Storage-Policy-Index': int(policy)},
+ ) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
+
+ def test_no_convert_index_to_name_when_container_not_found(self):
+ policy = random.choice(list(POLICIES))
+ req = Request.blank('/v1/a/c')
+ with mocked_http_conn(
+ 200, 404, 404, 404,
+ headers={'X-Backend-Storage-Policy-Index':
+ int(policy)}) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(resp.headers['X-Storage-Policy'], None)
+
+ def test_error_convert_index_to_name(self):
+ req = Request.blank('/v1/a/c')
+ with mocked_http_conn(
+ 200, 200,
+ headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
+ resp = req.get_response(self.app)
+ self.assertRaises(StopIteration, fake_conn.code_iter.next)
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['X-Storage-Policy'], None)
+ error_lines = self.app.logger.get_lines_for_level('error')
+ self.assertEqual(2, len(error_lines))
+ for msg in error_lines:
+ expected = "Could not translate " \
+ "X-Backend-Storage-Policy-Index ('-1')"
+ self.assertTrue(expected in msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
- 'x-container-read': '*:user'}
+ 'x-container-read': '*:user',
+ 'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
- 'x-container-read': '*:user'}
+ 'x-container-read': '*:user',
+ 'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
@@ -4249,22 +6732,78 @@ class TestContainerController(unittest.TestCase):
# return 200 and cache 200 for and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
- # return 304 dont cache container
+ # return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
- # return 503, dont cache container
+ # return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# In all the following tests cache 404 for account
- # return 404 (as account is not found) and dont cache container
+ # return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# This should make no difference
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 404)
+ def test_PUT_policy_headers(self):
+ backend_requests = []
+
+ def capture_requests(ipaddr, port, device, partition, method,
+ path, headers=None, query_string=None):
+ if method == 'PUT':
+ backend_requests.append(headers)
+
+ def test_policy(requested_policy):
+ with save_globals():
+ mock_conn = set_http_connect(200, 201, 201, 201,
+ give_connect=capture_requests)
+ self.app.memcache.store = {}
+ req = Request.blank('/v1/a/test', method='PUT',
+ headers={'Content-Length': 0})
+ if requested_policy:
+ expected_policy = requested_policy
+ req.headers['X-Storage-Policy'] = policy.name
+ else:
+ expected_policy = POLICIES.default
+ res = req.get_response(self.app)
+ if expected_policy.is_deprecated:
+ self.assertEquals(res.status_int, 400)
+ self.assertEqual(0, len(backend_requests))
+ expected = 'is deprecated'
+ self.assertTrue(expected in res.body,
+ '%r did not include %r' % (
+ res.body, expected))
+ return
+ self.assertEquals(res.status_int, 201)
+ self.assertEqual(
+ expected_policy.object_ring.replicas,
+ len(backend_requests))
+ for headers in backend_requests:
+ if not requested_policy:
+ self.assertFalse('X-Backend-Storage-Policy-Index' in
+ headers)
+ self.assertTrue(
+ 'X-Backend-Storage-Policy-Default' in headers)
+ self.assertEqual(
+ int(expected_policy),
+ int(headers['X-Backend-Storage-Policy-Default']))
+ else:
+ self.assertTrue('X-Backend-Storage-Policy-Index' in
+ headers)
+ self.assertEqual(int(headers
+ ['X-Backend-Storage-Policy-Index']),
+ int(policy))
+ # make sure all mocked responses are consumed
+ self.assertRaises(StopIteration, mock_conn.code_iter.next)
+
+ test_policy(None) # no policy header
+ for policy in POLICIES:
+ backend_requests = [] # reset backend requests
+ test_policy(policy)
+
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
@@ -4323,6 +6862,47 @@ class TestContainerController(unittest.TestCase):
503, 201, 201), # put container success
201, missing_container=True)
+ def test_PUT_autocreate_account_with_sysmeta(self):
+ # x-account-sysmeta headers in a container PUT request should be
+ # transferred to the account autocreate PUT request
+ with save_globals():
+ controller = proxy_server.ContainerController(self.app, 'account',
+ 'container')
+
+ def test_status_map(statuses, expected, headers=None, **kwargs):
+ set_http_connect(*statuses, **kwargs)
+ self.app.memcache.store = {}
+ req = Request.blank('/v1/a/c', {}, headers=headers)
+ req.content_length = 0
+ self.app.update_request(req)
+ res = controller.PUT(req)
+ expected = str(expected)
+ self.assertEquals(res.status[:len(expected)], expected)
+
+ self.app.account_autocreate = True
+ calls = []
+ callback = _make_callback_func(calls)
+ key, value = 'X-Account-Sysmeta-Blah', 'something'
+ headers = {key: value}
+
+ # all goes according to plan
+ test_status_map(
+ (404, 404, 404, # account_info fails on 404
+ 201, 201, 201, # PUT account
+ 200, # account_info success
+ 201, 201, 201), # put container success
+ 201, missing_container=True,
+ headers=headers,
+ give_connect=callback)
+
+ self.assertEqual(10, len(calls))
+ for call in calls[3:6]:
+ self.assertEqual('/account', call['path'])
+ self.assertTrue(key in call['headers'],
+ '%s call, key %s missing in headers %s' %
+ (call['method'], key, call['headers']))
+ self.assertEqual(value, call['headers'][key])
+
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
@@ -4359,7 +6939,14 @@ class TestContainerController(unittest.TestCase):
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
- self.assert_status_map(controller.PUT, (201, 201, 201), 403,
+ self.assert_status_map(controller.PUT,
+ (200, 200, 201, 201, 201), 201,
+ missing_container=True)
+
+ controller = proxy_server.ContainerController(self.app, 'account',
+ 'container_new')
+
+ self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
@@ -4372,7 +6959,7 @@ class TestContainerController(unittest.TestCase):
def test_PUT_max_container_name_length(self):
with save_globals():
- limit = MAX_CONTAINER_NAME_LENGTH
+ limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT,
@@ -4398,9 +6985,7 @@ class TestContainerController(unittest.TestCase):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
- for dev in self.app.account_ring.devs.values():
- del dev['errors']
- del dev['last_error']
+ self.app._error_limiting = {}
controller = proxy_server.ContainerController(self.app,
'account',
'container')
@@ -4437,9 +7022,10 @@ class TestContainerController(unittest.TestCase):
resp = getattr(controller, meth)(req)
self.assertEquals(resp.status_int, 404)
- for dev in self.app.account_ring.devs.values():
- dev['errors'] = self.app.error_suppression_limit + 1
- dev['last_error'] = time.time()
+ for dev in self.app.account_ring.devs:
+ set_node_errors(self.app, dev,
+ self.app.error_suppression_limit + 1,
+ time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
@@ -4477,19 +7063,26 @@ class TestContainerController(unittest.TestCase):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
+ container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
+
self.assertEquals(
- controller.app.container_ring.devs[0]['errors'], 2)
- self.assert_('last_error' in controller.app.container_ring.devs[0])
+ node_error_count(controller.app, container_ring.devs[0]), 2)
+ self.assert_(
+ node_last_error(controller.app, container_ring.devs[0])
+ is not None)
for _junk in xrange(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
- self.assertEquals(controller.app.container_ring.devs[0]['errors'],
- self.app.error_suppression_limit + 1)
+ self.assertEquals(
+ node_error_count(controller.app, container_ring.devs[0]),
+ self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
- self.assert_('last_error' in controller.app.container_ring.devs[0])
+ self.assert_(
+ node_last_error(controller.app, container_ring.devs[0])
+ is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
@@ -4606,14 +7199,15 @@ class TestContainerController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
- ('a' * MAX_META_NAME_LENGTH): 'v'})
+ ('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
- headers={'X-Container-Meta-' +
- ('a' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ req = Request.blank(
+ '/v1/a/c', environ={'REQUEST_METHOD': method},
+ headers={'X-Container-Meta-' +
+ ('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
@@ -4621,21 +7215,21 @@ class TestContainerController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
- 'a' * MAX_META_VALUE_LENGTH})
+ 'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
- 'a' * (MAX_META_VALUE_LENGTH + 1)})
+ 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -4644,7 +7238,7 @@ class TestContainerController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -4654,16 +7248,17 @@ class TestContainerController(unittest.TestCase):
set_http_connect(201, 201, 201)
headers = {}
- header_value = 'a' * MAX_META_VALUE_LENGTH
+ header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (constraints.MAX_META_OVERALL_SIZE - 4
+ - constraints.MAX_META_VALUE_LENGTH):
+ size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -4671,7 +7266,7 @@ class TestContainerController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -4781,6 +7376,78 @@ class TestContainerController(unittest.TestCase):
controller.HEAD(req)
self.assert_(called[0])
+ def test_unauthorized_requests_when_account_not_found(self):
+ # verify unauthorized container requests always return response
+ # from swift.authorize
+ called = [0, 0]
+
+ def authorize(req):
+ called[0] += 1
+ return HTTPUnauthorized(request=req)
+
+ def account_info(*args):
+ called[1] += 1
+ return None, None, None
+
+ def _do_test(method):
+ with save_globals():
+ swift.proxy.controllers.Controller.account_info = account_info
+ app = proxy_server.Application(None, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+ set_http_connect(201, 201, 201)
+ req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
+ req.environ['swift.authorize'] = authorize
+ self.app.update_request(req)
+ res = app.handle_request(req)
+ return res
+
+ for method in ('PUT', 'POST', 'DELETE'):
+ # no delay_denial on method, expect one call to authorize
+ called = [0, 0]
+ res = _do_test(method)
+ self.assertEqual(401, res.status_int)
+ self.assertEqual([1, 0], called)
+
+ for method in ('HEAD', 'GET'):
+ # delay_denial on method, expect two calls to authorize
+ called = [0, 0]
+ res = _do_test(method)
+ self.assertEqual(401, res.status_int)
+ self.assertEqual([2, 1], called)
+
+ def test_authorized_requests_when_account_not_found(self):
+ # verify authorized container requests always return 404 when
+ # account not found
+ called = [0, 0]
+
+ def authorize(req):
+ called[0] += 1
+
+ def account_info(*args):
+ called[1] += 1
+ return None, None, None
+
+ def _do_test(method):
+ with save_globals():
+ swift.proxy.controllers.Controller.account_info = account_info
+ app = proxy_server.Application(None, FakeMemcache(),
+ account_ring=FakeRing(),
+ container_ring=FakeRing())
+ set_http_connect(201, 201, 201)
+ req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
+ req.environ['swift.authorize'] = authorize
+ self.app.update_request(req)
+ res = app.handle_request(req)
+ return res
+
+ for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
+ # expect one call to authorize
+ called = [0, 0]
+ res = _do_test(method)
+ self.assertEqual(404, res.status_int)
+ self.assertEqual([1, 1], called)
+
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
@@ -5008,10 +7675,10 @@ class TestContainerController(unittest.TestCase):
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
@@ -5028,13 +7695,13 @@ class TestContainerController(unittest.TestCase):
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
@@ -5048,10 +7715,10 @@ class TestContainerController(unittest.TestCase):
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
@@ -5068,13 +7735,13 @@ class TestContainerController(unittest.TestCase):
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
- 'X-Account-Partition': '1',
+ 'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
@@ -5087,7 +7754,7 @@ class TestContainerController(unittest.TestCase):
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
- new_connect = set_http_connect(200, # account existance check
+ new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
@@ -5096,7 +7763,7 @@ class TestContainerController(unittest.TestCase):
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
- timestamps.pop(0) # account existance check
+ timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
@@ -5112,7 +7779,7 @@ class TestContainerController(unittest.TestCase):
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
- new_connect = set_http_connect(200, # account existance check
+ new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
@@ -5121,7 +7788,7 @@ class TestContainerController(unittest.TestCase):
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
- timestamps.pop(0) # account existance check
+ timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
@@ -5141,18 +7808,20 @@ class TestContainerController(unittest.TestCase):
self.assert_(got_exc)
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing)
+ container_ring=FakeRing())
- def assert_status_map(self, method, statuses, expected, env_expected=None):
+ def assert_status_map(self, method, statuses, expected, env_expected=None,
+ headers=None, **kwargs):
+ headers = headers or {}
with save_globals():
- set_http_connect(*statuses)
- req = Request.blank('/v1/a', {})
+ set_http_connect(*statuses, **kwargs)
+ req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEquals(res.status_int, expected)
@@ -5311,9 +7980,36 @@ class TestAccountController(unittest.TestCase):
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
+ def test_POST_autocreate_with_sysmeta(self):
+ with save_globals():
+ controller = proxy_server.AccountController(self.app, 'account')
+ self.app.memcache = FakeMemcacheReturnsNone()
+ # first test with autocreate being False
+ self.assertFalse(self.app.account_autocreate)
+ self.assert_status_map(controller.POST,
+ (404, 404, 404), 404)
+ # next turn it on and test account being created than updated
+ controller.app.account_autocreate = True
+ calls = []
+ callback = _make_callback_func(calls)
+ key, value = 'X-Account-Sysmeta-Blah', 'something'
+ headers = {key: value}
+ self.assert_status_map(
+ controller.POST,
+ (404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
+ # POST , autocreate PUT, POST again
+ headers=headers,
+ give_connect=callback)
+ self.assertEqual(9, len(calls))
+ for call in calls:
+ self.assertTrue(key in call['headers'],
+ '%s call, key %s missing in headers %s' %
+ (call['method'], key, call['headers']))
+ self.assertEqual(value, call['headers'][key])
+
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
@@ -5324,7 +8020,7 @@ class TestAccountController(unittest.TestCase):
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
- for dev in self.app.account_ring.devs.values():
+ for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
@@ -5377,7 +8073,7 @@ class TestAccountController(unittest.TestCase):
def test_PUT_max_account_name_length(self):
with save_globals():
self.app.allow_account_management = True
- limit = MAX_ACCOUNT_NAME_LENGTH
+ limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
@@ -5392,6 +8088,12 @@ class TestAccountController(unittest.TestCase):
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
+ def test_PUT_status(self):
+ with save_globals():
+ self.app.allow_account_management = True
+ controller = proxy_server.AccountController(self.app, 'account')
+ self.assert_status_map(controller.PUT, (201, 201, 202), 202)
+
def test_PUT_metadata(self):
self.metadata_helper('PUT')
@@ -5452,14 +8154,15 @@ class TestAccountController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
- ('a' * MAX_META_NAME_LENGTH): 'v'})
+ ('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
- req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
- headers={'X-Account-Meta-' +
- ('a' * (MAX_META_NAME_LENGTH + 1)): 'v'})
+ req = Request.blank(
+ '/v1/a/c', environ={'REQUEST_METHOD': method},
+ headers={'X-Account-Meta-' +
+ ('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
@@ -5467,21 +8170,21 @@ class TestAccountController(unittest.TestCase):
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
- 'a' * MAX_META_VALUE_LENGTH})
+ 'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
- 'a' * (MAX_META_VALUE_LENGTH + 1)})
+ 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEquals(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT):
+ for x in xrange(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -5490,7 +8193,7 @@ class TestAccountController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
- for x in xrange(MAX_META_COUNT + 1):
+ for x in xrange(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
@@ -5500,16 +8203,17 @@ class TestAccountController(unittest.TestCase):
set_http_connect(201, 201, 201)
headers = {}
- header_value = 'a' * MAX_META_VALUE_LENGTH
+ header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
- while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
- size += 4 + MAX_META_VALUE_LENGTH
+ while size < (constraints.MAX_META_OVERALL_SIZE - 4
+ - constraints.MAX_META_VALUE_LENGTH):
+ size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
- if MAX_META_OVERALL_SIZE - size > 1:
+ if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size - 1)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -5517,7 +8221,7 @@ class TestAccountController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Account-Meta-a'] = \
- 'a' * (MAX_META_OVERALL_SIZE - size)
+ 'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
@@ -5569,6 +8273,7 @@ class TestAccountController(unittest.TestCase):
test_status_map((204, 500, 404), 400)
+@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountControllerFakeGetResponse(unittest.TestCase):
"""
Test all the faked-out GET responses for accounts that don't exist. They
@@ -5578,8 +8283,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase):
conf = {'account_autocreate': 'yes'}
self.app = proxy_server.Application(conf, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing)
+ container_ring=FakeRing())
self.app.memcache = FakeMemcacheReturnsNone()
def test_GET_autocreate_accept_json(self):
@@ -5670,7 +8374,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase):
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing())
+ container_ring=FakeRing())
with save_globals():
# Mock account server will provide privileged information (ACLs)
@@ -5745,7 +8449,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase):
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
- container_ring=FakeRing(), object_ring=FakeRing())
+ container_ring=FakeRing())
app.allow_account_management = True
ext_header = 'x-account-access-control'
@@ -5841,10 +8545,6 @@ class FakeObjectController(object):
return
-class Stub(object):
- pass
-
-
class TestProxyObjectPerformance(unittest.TestCase):
def setUp(self):
@@ -5904,6 +8604,11 @@ class TestProxyObjectPerformance(unittest.TestCase):
print "Run %02d took %07.03f" % (i, end - start)
+@patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()),
+ StoragePolicy(1, 'ernie', True, object_ring=FakeRing()),
+ StoragePolicy(2, 'deprecated', is_deprecated=True,
+ object_ring=FakeRing()),
+ StoragePolicy(3, 'bert', object_ring=FakeRing())])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
@@ -5912,23 +8617,47 @@ class TestSwiftInfo(unittest.TestCase):
def test_registered_defaults(self):
proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
- container_ring=FakeRing(),
- object_ring=FakeRing)
+ container_ring=FakeRing())
si = utils.get_swift_info()['swift']
self.assertTrue('version' in si)
- self.assertEqual(si['max_file_size'], MAX_FILE_SIZE)
- self.assertEqual(si['max_meta_name_length'], MAX_META_NAME_LENGTH)
- self.assertEqual(si['max_meta_value_length'], MAX_META_VALUE_LENGTH)
- self.assertEqual(si['max_meta_count'], MAX_META_COUNT)
- self.assertEqual(si['account_listing_limit'], ACCOUNT_LISTING_LIMIT)
+ self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE)
+ self.assertEqual(si['max_meta_name_length'],
+ constraints.MAX_META_NAME_LENGTH)
+ self.assertEqual(si['max_meta_value_length'],
+ constraints.MAX_META_VALUE_LENGTH)
+ self.assertEqual(si['max_meta_count'], constraints.MAX_META_COUNT)
+ self.assertEqual(si['max_header_size'], constraints.MAX_HEADER_SIZE)
+ self.assertEqual(si['max_meta_overall_size'],
+ constraints.MAX_META_OVERALL_SIZE)
+ self.assertEqual(si['account_listing_limit'],
+ constraints.ACCOUNT_LISTING_LIMIT)
self.assertEqual(si['container_listing_limit'],
- CONTAINER_LISTING_LIMIT)
+ constraints.CONTAINER_LISTING_LIMIT)
self.assertEqual(si['max_account_name_length'],
- MAX_ACCOUNT_NAME_LENGTH)
+ constraints.MAX_ACCOUNT_NAME_LENGTH)
self.assertEqual(si['max_container_name_length'],
- MAX_CONTAINER_NAME_LENGTH)
- self.assertEqual(si['max_object_name_length'], MAX_OBJECT_NAME_LENGTH)
+ constraints.MAX_CONTAINER_NAME_LENGTH)
+ self.assertEqual(si['max_object_name_length'],
+ constraints.MAX_OBJECT_NAME_LENGTH)
+ self.assertTrue('strict_cors_mode' in si)
+ self.assertEqual(si['allow_account_management'], False)
+ self.assertEqual(si['account_autocreate'], False)
+ # This setting is by default excluded by disallowed_sections
+ self.assertEqual(si['valid_api_versions'],
+ constraints.VALID_API_VERSIONS)
+ # this next test is deliberately brittle in order to alert if
+ # other items are added to swift info
+ self.assertEqual(len(si), 18)
+
+ self.assertTrue('policies' in si)
+ sorted_pols = sorted(si['policies'], key=operator.itemgetter('name'))
+ self.assertEqual(len(sorted_pols), 3)
+ for policy in sorted_pols:
+ self.assertNotEquals(policy['name'], 'deprecated')
+ self.assertEqual(sorted_pols[0]['name'], 'bert')
+ self.assertEqual(sorted_pols[1]['name'], 'ernie')
+ self.assertEqual(sorted_pols[2]['name'], 'migrated')
if __name__ == '__main__':