1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
|
log_file: /var/log/tests/distaf_test_run.log
log_level: DEBUG
remote_user: root
servers:
- &server1
host: server-vm1
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server2
host: server-vm2
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server3
host: server-vm3
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server4
host: server-vm4
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server5
host: server-vm5
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server6
host: server-vm2
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server7
host: server-vm7
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server8
host: server-vm8
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server9
host: server-vm9
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server10
host: server-vm10
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server11
host: server-vm11
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
- &server12
host: server-vm12
devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
brick_root: "/bricks"
clients:
- &client1
host: client-vm1
- &client2
host: client-vm2
gluster:
cluster_config:
smb:
enable: False
user: 'root'
passwd: 'foobar'
ctdb_servers: []
ctdb_vips:
- vip: vip1
routing_prefix: '23'
interfaces: 'eth0'
- vip: vip2
routing_prefix: '22'
interfaces: 'eth0'
ctdb_metavol_brick_path: ''
nfs_ganesha:
enable: False
num_of_nfs_ganesha_nodes: 4
vips: []
volume_types:
distribute: &distribute
type: distribute
dist_count: 4
transport: tcp
replicate: &replicate
type: replicate
replica_count: 3
transport: tcp
dist_rep: &dist_rep
type: dist_rep
dist_count: 2
replica_count: 2
transport: tcp
disperse: &disperse
type: disperse
disperse_count: 4
redundancy_count: 2
transport: tcp
dist_disperse: &dist_disperse
type: dist_disperse
dist_count: 2
disperse_count: 4
redundancy_count: 2
transport: tcp
slave_volumes:
- &slave_vol1
voltype: *dist_rep
servers: [ *server5, *server6, *server7, *server8 ]
peers: []
quota:
enable: False
limit_usage:
path: "/"
size: 100GB
percent:
limit_objects:
path: "/"
number:
percent:
alert_time:
soft_timeout:
hard_timeout:
inode_quota:
enable: False
bitrot:
enable: False
scrub_throttle:
scrub_frequency:
volumes:
- &vol1
name: hosdu
voltype: *dist_disperse
servers: [ *server1, *server2, *server3, *server4 ]
peers: [ *server9, *server10, *server11, *server12 ]
tier:
create_tier: False
type: *dist_rep
quota:
enable: False
limit_usage:
path: "/"
size: 100GB
percent:
limit_objects:
path: "/"
number:
percent:
alert_time: 0
soft_timeout: 0
hard_timeout: 0
inode_quota:
enable: False
bitrot:
enable: False
scrub_throttle: 'aggressive'
scrub_frequency: 'hourly'
geo_rep:
create_geo_rep_session: False
slave_volumes: [ *slave_vol1 ]
user: 'root'
group: 'geogroup'
sync_mode: 'rsync'
options:
performance.readdir-ahead: on
snapshot:
use_snapshot: True
snap_jobname: 'snap_job'
snap_schedule: 2
mounts:
- &mount1
protocol: 'glusterfs'
server: ''
volname:
client: *client1
mountpoint: ''
options: ''
- &mount2
protocol: 'nfs'
server: ''
volname: ''
client: *client2
mountpoint: ''
options: ''
global_mode: True
|