log_file: /var/log/tests/distaf_test_run.log log_level: DEBUG remote_user: root servers: - &server1 host: server-vm1 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server2 host: server-vm2 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server3 host: server-vm3 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server4 host: server-vm4 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server5 host: server-vm5 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server6 host: server-vm2 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server7 host: server-vm7 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server8 host: server-vm8 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server9 host: server-vm9 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server10 host: server-vm10 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server11 host: server-vm11 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" - &server12 host: server-vm12 devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"] brick_root: "/bricks" clients: - &client1 host: client-vm1 - &client2 host: client-vm2 gluster: cluster_config: smb: enable: False user: 'root' passwd: 'foobar' ctdb_servers: [] ctdb_vips: - vip: vip1 routing_prefix: '23' interfaces: 'eth0' - vip: vip2 routing_prefix: '22' interfaces: 'eth0' ctdb_metavol_brick_path: '' nfs_ganesha: enable: False num_of_nfs_ganesha_nodes: 4 vips: [] volume_types: distribute: &distribute dist_count: 4 transport: tcp replicate: &replicate replica_count: 3 transport: tcp dist_rep: &dist_rep dist_count: 2 replica_count: 2 transport: tcp disperse: &disperse disperse_count: 4 redundancy_count: 2 transport: tcp dist_disperse: &dist_disperse dist_count: 2 disperse_count: 4 redundancy_count: 2 transport: tcp slave_volumes: - &slave_vol1 voltype: *dist_rep servers: [ *server5, *server6, *server7, *server8 ] peers: [] quota: enable: False limit_usage: path: "/" size: 100GB percent: limit_objects: path: "/" number: percent: alert_time: soft_timeout: hard_timeout: inode_quota: enable: False bitrot: enable: False scrub_throttle: scrub_frequency: volumes: - &vol1 name: hosdu voltype: *dist_disperse servers: [ *server1, *server2, *server3, *server4 ] peers: [ *server9, *server10, *server11, *server12 ] tier: create_tier: False type: *dist_rep quota: enable: False limit_usage: path: "/" size: 100GB percent: limit_objects: path: "/" number: percent: alert_time: 0 soft_timeout: 0 hard_timeout: 0 inode_quota: enable: False bitrot: enable: False scrub_throttle: 'aggressive' scrub_frequency: 'hourly' geo_rep: create_geo_rep_session: False slave_volumes: [ *slave_vol1 ] user: 'root' group: 'geogroup' sync_mode: 'rsync' options: performance.readdir-ahead: on snapshot: use_snapshot: True snap_jobname: 'snap_job' snap_schedule: 2 mounts: - &mount1 protocol: 'glusterfs' server: '' volname: client: *client1 mountpoint: '' options: '' - &mount2 protocol: 'nfs' server: '' volname: '' client: *client2 mountpoint: '' options: '' global_mode: True