summaryrefslogtreecommitdiffstats
path: root/glusterfs.spec.in
blob: dccb5974bc4dd51f1190026b2cf68e284c3be099 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
# if you make changes, the it is advised to increment this number, and provide
# a descriptive suffix to identify who owns or what the change represents
# e.g. release_version 2.MSW
%global release 1%{?dist}
%global _sharedstatedir /var/lib


# if you wish to compile an rpm without rdma support, compile like this...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without rdma
%{?_without_rdma:%global _without_rdma --disable-ibverbs}

# No RDMA Support on x390(x)
%ifarch s390 s390x
%global _without_rdma --disable-ibverbs
%endif

# if you wish to compile an rpm without epoll...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without epoll
%{?_without_epoll:%global _without_epoll --disable-epoll}

# if you wish to compile an rpm with fusermount...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with fusermount
%{?_with_fusermount:%global _with_fusermount --enable-fusermount}

%global version @PACKAGE_VERSION@
%if "%{version}" >= "3.2"
%global _can_georeplicate 1

# if you wish to compile an rpm without geo-replication support, compile like this...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without georeplication
%{?_without_georeplication:%global _without_georeplication --disable-geo-replication}
%endif

Summary: Cluster File System
Name: @PACKAGE_NAME@
Version: %{version}
Release: %{release}
License: GPLv3+
Group: System Environment/Base
Vendor: Gluster Inc
Packager: @PACKAGE_BUGREPORT@
URL: http://www.gluster.org/docs/index.php/GlusterFS
Source0: @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
Requires(post): /sbin/chkconfig
Requires(preun): /sbin/service, /sbin/chkconfig
Requires(postun): /sbin/service

BuildRequires: bison flex
BuildRequires: gcc make automake libtool
BuildRequires: ncurses-devel readline-devel openssl-devel
BuildRequires: python-ctypes
%if 0%{?suse_version}
BuildRequires: python-devel
%endif
Requires: openssl

Obsoletes:        %{name}-libs <= 2.0.0
Obsoletes:        %{name}-common < %{version}-%{release}
Obsoletes:        %{name}-core < %{version}-%{release}
Provides:         %{name}-libs = %{version}-%{release}
Provides:         %{name}-common = %{version}-%{release}
Provides:         %{name}-core = %{version}-%{release}

%description
GlusterFS is a clustered file-system capable of scaling to several
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file system in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in userspace and easily manageable.

This package includes the glusterfs binary, the glusterd daemon and the gluster
command line, libglusterfs and glusterfs translator modules common to both
GlusterFS server and client framework.

%if 0%{!?_without_rdma:1}
%package rdma
Summary: GlusterFS rdma support for ib-verbs
Group: Applications/File
BuildRequires: libibverbs-devel

Requires: %{name} = %{version}-%{release}

%description rdma
GlusterFS is a clustered file-system capable of scaling to several
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file system in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in userspace and easily manageable.

This package provides support to ib-verbs library.
%endif

%if 0%{?_can_georeplicate}
%if 0%{!?_without_georeplication:1}
%package geo-replication
Summary: GlusterFS Geo-replication
Group: Applications/File
Requires: %{name} = %{version}-%{release} , python-ctypes , rsync >= 3.0.0

%description geo-replication
GlusterFS is a clustered file-system capable of scaling to several
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file system in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in userspace and easily manageable.

This package provides support to geo-replication.
%endif
%endif

%package fuse
Summary: GlusterFS Fuse client
Group: Applications/File

Requires: %{name} >= %{version}-%{release}

Obsoletes:        %{name}-client < %{version}-%{release}
Provides:         %{name}-client = %{version}-%{release}

%description fuse
GlusterFS is a clustered file-system capable of scaling to several
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file system in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in userspace and easily manageable.

This package provides support to FUSE based clients.

%package server
Summary:          Clustered file-system server
Group:            System Environment/Daemons
Requires:         %{name} = %{version}-%{release}
Requires:         %{name}-fuse = %{version}-%{release}
Requires:         openssl

%description server
GlusterFS is a clustered file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the glusterfs server daemon.

%package devel
Summary:        Development Libraries
Group:          Development/Libraries
Requires:       %{name} = %{version}-%{release}

%description devel
GlusterFS is a clustered file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the development libraries.

%prep
%setup -q -n %{name}-%{version}

%build
./autogen.sh
%configure %{?_without_rdma} %{?_without_epoll} %{?_with_fusermount} %{?_without_georeplication}

# Remove rpath
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool

%{__make} %{?_smp_mflags}


%install
%{__rm} -rf %{buildroot}
%{__make} install DESTDIR=%{buildroot}
# Install include directory
%{__mkdir_p} %{buildroot}%{_includedir}/glusterfs
%{__install} -p -m 0644 libglusterfs/src/*.h \
    %{buildroot}%{_includedir}/glusterfs/
%{__install} -p -m 0644 contrib/uuid/*.h \
    %{buildroot}%{_includedir}/glusterfs/
# Following needed by hekafs multi-tenant translator
%{__mkdir_p} %{buildroot}%{_includedir}/glusterfs/rpc
%{__install} -p -m 0644 rpc/rpc-lib/src/*.h \
    %{buildroot}%{_includedir}/glusterfs/rpc/
%{__install} -p -m 0644 rpc/xdr/src/*.h \
    %{buildroot}%{_includedir}/glusterfs/rpc/
%{__mkdir_p} %{buildroot}%{_includedir}/glusterfs/server
%{__install} -p -m 0644 xlators/protocol/server/src/*.h \
    %{buildroot}%{_includedir}/glusterfs/server/


# Remove unwanted files from all the shared libraries
find %{buildroot}%{_libdir} -name '*.a' -delete
find %{buildroot}%{_libdir} -name '*.la' -delete

# Remove installed docs, we include them ourselves as %%doc
%{__rm} -rf %{buildroot}%{_datadir}/doc/glusterfs/

# Rename the samples, so we can include them as %%config
#for file in %{buildroot}%{_sysconfdir}/glusterfs/*.sample; do
#  %{__mv} ${file} `dirname ${file}`/`basename ${file} .sample`
#done

# Create working directory
%{__mkdir_p} %{buildroot}%{_sharedstatedir}/glusterd

# Update configuration file to /var/lib working directory
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol

# Following needed by the hooks interface
%{__mkdir_p} %{buildroot}%{_sharedstatedir}/glusterd/hook-scripts
%{__install} -p -m 0644 extras/hook-scripts/*.sh   \
    %{buildroot}%{_sharedstatedir}/glusterd/hook-scripts/

# Clean up the examples we want to include as %%doc
#%{__cp} -a doc/examples examples
#%{__rm} -f examples/Makefile*

%clean
%{__rm} -rf %{buildroot}

%post
/sbin/ldconfig

# Copy the 'glusterfs-logrotate' file at the right place
if [ -d /etc/logrotate.d ]; then
   cp %{_docdir}/%{name}-%{version}/glusterfs-logrotate /etc/logrotate.d/glusterfs
fi

%postun
/sbin/ldconfig

%files
%defattr(-,root,root)
%doc AUTHORS ChangeLog COPYING INSTALL NEWS README THANKS
%doc extras/glusterfs-logrotate
%{_libdir}/glusterfs
%{_libdir}/*.so.*
%{_sbindir}/glusterfs*
%{_mandir}/man8/*gluster*.8*
%dir %{_localstatedir}/log/glusterfs
%if 0%{!?_without_rdma:1}
%exclude %{_libdir}/glusterfs/%{version}/rpc-transport/rdma*
%endif
%exclude %{_libdir}/glusterfs/%{version}/xlator/mount/fuse*
%exclude %{_libdir}/glusterfs/%{version}/xlator/storage*
%exclude %{_libdir}/glusterfs/%{version}/xlator/features/posix*
%exclude %{_libdir}/glusterfs/%{version}/xlator/protocol/server*
%exclude %{_libdir}/glusterfs/%{version}/xlator/mgmt*
%exclude %{_libdir}/glusterfs/%{version}/xlator/nfs*

%if 0%{!?_without_rdma:1}
%files rdma
%defattr(-,root,root)
%{_libdir}/glusterfs/%{version}/rpc-transport/rdma*
%endif

%if 0%{?_can_georeplicate}
%if 0%{!?_without_georeplication:1}
%post geo-replication
#restart glusterd.
%{_sysconfdir}/init.d/glusterd restart &> /dev/null
%endif

%if 0%{!?_without_georeplication:1}
%files geo-replication
%defattr(-,root,root)
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
%endif
%endif

%files fuse
%defattr(-,root,root)
%{_libdir}/glusterfs/%{version}/xlator/mount/fuse*
%{_mandir}/man8/mount.glusterfs.8*
/sbin/mount.glusterfs
%if 0%{?_with_fusermount:1}
%{_bindir}/fusermount-glusterfs
%endif

%post server
/sbin/chkconfig --add glusterd

# Move legacy sysconf files to the correct sysconfdir
if [ -d /etc/glusterd ]; then
    cp -a /etc/glusterd /var/lib/
    mv /etc/glusterd /etc/glusterd.rpmsave
fi

if [ -d /var/lib/glusterd/vols ]; then
    # Rename old volfiles in an RPM-standard way.  These aren't actually
    # considered package config files, so %config doesn't work for them.
    for file in $(find /var/lib/glusterd/vols -name '*.vol'); do
        newfile=${file}.rpmsave
        echo "warning: ${file} saved as ${newfile}"
        cp ${file} ${newfile}
    done
fi

pidof -c -o %PPID -x glusterd &> /dev/null
if [ $? -eq 0 ]; then
    kill -9 `pgrep -f gsyncd.py` &> /dev/null

    killall glusterd &> /dev/null
    #add marker translator
    glusterd --xlator-option *.upgrade=on
else
    glusterd --xlator-option *.upgrade=on
    sleep 10
    killall glusterd &> /dev/null
fi

%preun server
if [ $1 -eq 0 ]; then
    /sbin/service glusterd stop &>/dev/null || :
    /sbin/chkconfig --del glusterd
fi
if [ $1 -ge 1 ]; then
    /sbin/service glusterd condrestart &>/dev/null || :
fi

# Legacy server
if [ $1 -eq 0 ]; then
    /sbin/service glusterfsd stop &>/dev/null || :
    /sbin/chkconfig --del glusterfsd
fi
if [ $1 -ge 1 ]; then
    /sbin/service glusterfsd condrestart &>/dev/null || :
fi

%files server
%defattr(-,root,root,-)
#%doc examples/ doc/glusterfs*.vol.sample
%config(noreplace) %{_sysconfdir}/glusterfs
%{_sharedstatedir}/glusterd
%{_sysconfdir}/init.d/glusterd
%{_sbindir}/gluster
%{_sbindir}/glusterd
%{_libdir}/glusterfs/%{version}/xlator/storage*
%{_libdir}/glusterfs/%{version}/xlator/features/posix*
%{_libdir}/glusterfs/%{version}/xlator/protocol/server*
%{_libdir}/glusterfs/%{version}/xlator/mgmt*
%{_libdir}/glusterfs/%{version}/xlator/nfs*

%files devel
%defattr(-,root,root,-)
%{_includedir}/glusterfs
%exclude %{_includedir}/glusterfs/y.tab.h
%{_libdir}/*.so

%changelog
* Wed Nov 9 2011 Joe Julian <me@joejulian.name> - git master
- Merge fedora specfile into gluster's spec.in.
- Add conditionals to allow the same spec file to be used for both 3.1 and 3.2
- http://bugs.gluster.com/show_bug.cgi?id=2970

* Wed Oct  5 2011 Joe Julian <me@joejulian.name> - 3.2.4-1
- Update to 3.2.4
- Removed the $local_fs requirement from the init scripts as in RHEL/CentOS that's provided
- by netfs, which needs to be started after glusterd.

* Sun Sep 25 2011 Joe Julian <me@joejulian.name> - 3.2.3-2
- Merged in upstream changes
- Fixed version reporting 3.2git
- Added nfs init script (disabled by default)

* Fri Sep  1 2011 Joe Julian <me@joejulian.name> - 3.2.3-1
- Update to 3.2.3

* Tue Jul 19 2011 Joe Julian <me@joejulian.name> - 3.2.2-3
- Add readline and libtermcap dependencies

* Tue Jul 19 2011 Joe Julian <me@joejulian.name> - 3.2.2-2
- Critical patch to prevent glusterd from walking outside of its own volume during rebalance

* Thu Jul 14 2011 Joe Julian <me@joejulian.name> - 3.2.2-1
- Update to 3.2.2

* Wed Jul 13 2011 Joe Julian <me@joejulian.name> - 3.2.1-2
- fix hardcoded path to gsyncd in source to match the actual file location

* Mon Jun 21 2011 Joe Julian <me@joejulian.name> - 3.2.1
- Update to 3.2.1

* Mon Jun 20 2011 Joe Julian <me@joejulian.name> - 3.1.5
- Update to 3.1.5

* Mon May 31 2011 Joe Julian <me@joejulian.name> - 3.1.5-qa1.4
- Current git

* Sun May 29 2011 Joe Julian <me@joejulian.name> - 3.1.5-qa1.2
- set _sharedstatedir to /var/lib for FHS compliance in RHEL5/CentOS5
- mv /etc/glusterd, if it exists, to the new state dir for upgrading from gluster packaging

* Sat May 28 2011 Joe Julian <me@joejulian.name> - 3.1.5-qa1.1
- Update to 3.1.5-qa1
- Add patch to remove optimization disabling
- Add patch to remove forced 64 bit compile
- Obsolete glusterfs-core to allow for upgrading from gluster packaging

* Sun Mar 19 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.3-1
- Update to 3.1.3
- Merge in more upstream SPEC changes
- Remove patches from GlusterFS bugzilla #2309 and #2311
- Remove inode-gen.patch

* Sun Feb 06 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.2-3
- Add back in legacy SPEC elements to support older branches

* Tue Feb 03 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.2-2
- Add patches from CloudFS project

* Tue Jan 25 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.2-1
- Update to 3.1.2

* Wed Jan 5 2011 Dan HorĂ¡k <dan[at]danny.cz> - 3.1.1-3
- no InfiniBand on s390(x)

* Sat Jan 1 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.1-2
- Update to support readline
- Update to not parallel build

* Mon Dec 27 2010 Silas Sewell <silas@sewell.ch> - 3.1.1-1
- Update to 3.1.1
- Change package names to mirror upstream

* Mon Dec 20 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.0.7-1
- Update to 3.0.7

* Wed Jul 28 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.0.5-1
- Update to 3.0.x

* Sat Apr 10 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.9-2
- Move python version requires into a proper BuildRequires otherwise
  the spec always turned off python bindings as python is not part
  of buildsys-build and the chroot will never have python unless we
  require it
- Temporarily set -D_FORTIFY_SOURCE=1 until upstream fixes code
  GlusterFS Bugzilla #197 (#555728)
- Move glusterfs-volgen to devel subpackage (#555724)
- Update description (#554947)

* Sat Jan 2 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.9-1
- Update to 2.0.9

* Sat Nov 8 2009 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.8-1
- Update to 2.0.8
- Remove install of glusterfs-volgen, it's properly added to
  automake upstream now

* Sat Oct 31 2009 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.7-1
- Update to 2.0.7
- Install glusterfs-volgen, until it's properly added to automake
  by upstream
- Add macro to be able to ship more docs

* Thu Sep 17 2009 Peter Lemenkov <lemenkov@gmail.com> 2.0.6-2
- Rebuilt with new fuse

* Sat Sep 12 2009 Matthias Saou <http://freshrpms.net/> 2.0.6-1
- Update to 2.0.6.
- No longer default to disable the client on RHEL5 (#522192).
- Update spec file URLs.

* Mon Jul 27 2009 Matthias Saou <http://freshrpms.net/> 2.0.4-1
- Update to 2.0.4.

* Thu Jun 11 2009 Matthias Saou <http://freshrpms.net/> 2.0.1-2
- Remove libglusterfs/src/y.tab.c to fix koji F11/devel builds.

* Sat May 16 2009 Matthias Saou <http://freshrpms.net/> 2.0.1-1
- Update to 2.0.1.

* Thu May  7 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-1
- Update to 2.0.0 final.

* Wed Apr 29 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.3.rc8
- Move glusterfsd to common, since the client has a symlink to it.

* Fri Apr 24 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.2.rc8
- Update to 2.0.0rc8.

* Sun Apr 12 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.2.rc7
- Update glusterfsd init script to the new style init.
- Update files to match the new default vol file names.
- Include logrotate for glusterfsd, use a pid file by default.
- Include logrotate for glusterfs, using killall for lack of anything better.

* Sat Apr 11 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.1.rc7
- Update to 2.0.0rc7.
- Rename "libs" to "common" and move the binary, man page and log dir there.

* Tue Feb 24 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org>
- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild

* Mon Feb 16 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.1.rc1
- Update to 2.0.0rc1.
- Include new libglusterfsclient.h.

* Mon Feb 16 2009 Matthias Saou <http://freshrpms.net/> 1.3.12-1
- Update to 1.3.12.
- Remove no longer needed ocreat patch.

* Thu Jul 17 2008 Matthias Saou <http://freshrpms.net/> 1.3.10-1
- Update to 1.3.10.
- Remove mount patch, it's been included upstream now.

* Fri May 16 2008 Matthias Saou <http://freshrpms.net/> 1.3.9-1
- Update to 1.3.9.

* Fri May  9 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-1
- Update to 1.3.8 final.

* Tue Apr 23 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.10
- Include short patch to include fixes from latest TLA 751.

* Mon Apr 22 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.9
- Update to 1.3.8pre6.
- Include glusterfs binary in both the client and server packages, now that
  glusterfsd is a symlink to it instead of a separate binary.
* Sun Feb  3 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.8
- Add python version check and disable bindings for version < 2.4.

* Sun Feb  3 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.7
- Add --without client rpmbuild option, make it the default for RHEL (no fuse).
  (I hope "rhel" is the proper default macro name, couldn't find it...)

* Wed Jan 30 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.6
- Add --without ibverbs rpmbuild option to the package.

* Mon Jan 14 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.5
- Update to current TLA again, patch-636 which fixes the known segfaults.

* Thu Jan 10 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.4
- Downgrade to glusterfs--mainline--2.5--patch-628 which is more stable.

* Tue Jan  8 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.3
- Update to current TLA snapshot.
- Include umount.glusterfs wrapper script (really needed? dunno).
- Include patch to mount wrapper to avoid multiple identical mounts.

* Sun Dec 30 2007 Matthias Saou <http://freshrpms.net/> 1.3.8-0.1
- Update to current TLA snapshot, which includes "volume-name=" fstab option.

* Mon Dec  3 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-6
- Re-add the /var/log/glusterfs directory in the client sub-package (required).
- Include custom patch to support vol= in fstab for -n glusterfs client option.

* Mon Nov 26 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-4
- Re-enable libibverbs.
- Check and update License field to GPLv3+.
- Add glusterfs-common obsoletes, to provide upgrade path from old packages.
- Include patch to add mode to O_CREATE opens.

* Thu Nov 22 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-3
- Remove Makefile* files from examples.
- Include RHEL/Fedora type init script, since the included ones don't do.

* Wed Nov 21 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-1
- Major spec file cleanup.
- Add misssing %%clean section.
- Fix ldconfig calls (weren't set for the proper sub-package).

* Sat Aug 4 2007 Matt Paine <matt@mattsoftware.com> - 1.3.pre7
- Added support to build rpm without ibverbs support (use --without ibverbs
  switch)

* Sun Jul 15 2007 Matt Paine <matt@mattsoftware.com> - 1.3.pre6
- Initial spec file
0.0%;'/> -rw-r--r--extras/thin-arbiter/thin-arbiter.vol5
-rw-r--r--extras/who-wrote-glusterfs/gitdm.domain-map1
-rw-r--r--geo-replication/gsyncd.conf.in6
-rw-r--r--geo-replication/setup.py6
-rw-r--r--geo-replication/src/peer_mountbroker.py.in17
-rw-r--r--geo-replication/syncdaemon/Makefile.am2
-rw-r--r--geo-replication/syncdaemon/README.md1
-rw-r--r--geo-replication/syncdaemon/changelogagent.py78
-rw-r--r--geo-replication/syncdaemon/gsyncd.py25
-rw-r--r--geo-replication/syncdaemon/gsyncdstatus.py4
-rw-r--r--geo-replication/syncdaemon/libcxattr.py4
-rw-r--r--geo-replication/syncdaemon/libgfchangelog.py256
-rw-r--r--geo-replication/syncdaemon/master.py42
-rw-r--r--geo-replication/syncdaemon/monitor.py93
-rw-r--r--geo-replication/syncdaemon/py2py3.py46
-rw-r--r--geo-replication/syncdaemon/resource.py67
-rw-r--r--geo-replication/syncdaemon/subcmds.py11
-rw-r--r--geo-replication/syncdaemon/syncdutils.py90
-rwxr-xr-xgeo-replication/tests/unit/test_gsyncdstatus.py10
-rw-r--r--glusterfs.spec.in656
-rw-r--r--glusterfsd/src/Makefile.am5
-rw-r--r--glusterfsd/src/gf_attach.c55
-rw-r--r--glusterfsd/src/glusterfsd-messages.h56
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c522
-rw-r--r--glusterfsd/src/glusterfsd.c551
-rw-r--r--glusterfsd/src/glusterfsd.h12
-rw-r--r--heal/src/Makefile.am6
-rw-r--r--heal/src/glfs-heal.c36
-rw-r--r--libgfdb.pc.in12
-rw-r--r--libglusterd/Makefile.am3
-rw-r--r--libglusterd/src/Makefile.am31
-rw-r--r--libglusterd/src/gd-common-utils.c78
-rw-r--r--libglusterd/src/gd-common-utils.h28
-rw-r--r--libglusterd/src/libglusterd.sym2
-rw-r--r--libglusterfs/src/Makefile.am18
-rw-r--r--libglusterfs/src/call-stub.c163
-rw-r--r--libglusterfs/src/client_t.c111
-rw-r--r--libglusterfs/src/common-utils.c488
-rw-r--r--libglusterfs/src/compat.c2
-rw-r--r--libglusterfs/src/ctx.c8
-rw-r--r--libglusterfs/src/defaults-tmpl.c7
-rw-r--r--libglusterfs/src/dict.c432
-rw-r--r--libglusterfs/src/event-epoll.c93
-rw-r--r--libglusterfs/src/event-poll.c47
-rw-r--r--libglusterfs/src/event.c3
-rw-r--r--libglusterfs/src/events.c81
-rw-r--r--libglusterfs/src/fd.c26
-rw-r--r--libglusterfs/src/gf-dirent.c2
-rw-r--r--libglusterfs/src/gfdb/Makefile.am37
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store.c802
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store.h331
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store_helper.c588
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store_helper.h95
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store_types.h532
-rw-r--r--libglusterfs/src/gfdb/gfdb_mem-types.h17
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3.c1542
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3.h328
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3_helper.c1260
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3_helper.h51
-rw-r--r--libglusterfs/src/gidcache.c5
-rw-r--r--libglusterfs/src/globals.c13
-rw-r--r--libglusterfs/src/glusterfs/client_t.h24
-rw-r--r--libglusterfs/src/glusterfs/common-utils.h139
-rw-r--r--libglusterfs/src/glusterfs/compat.h3
-rw-r--r--libglusterfs/src/glusterfs/dict.h13
-rw-r--r--libglusterfs/src/glusterfs/fd.h3
-rw-r--r--libglusterfs/src/glusterfs/gf-event.h4
-rw-r--r--libglusterfs/src/glusterfs/globals.h12
-rw-r--r--libglusterfs/src/glusterfs/glusterfs-acl.h2
-rw-r--r--libglusterfs/src/glusterfs/glusterfs-fops.h241
-rw-r--r--libglusterfs/src/glusterfs/glusterfs.h43
-rw-r--r--libglusterfs/src/glusterfs/inode.h5
-rw-r--r--libglusterfs/src/glusterfs/iobuf.h12
-rw-r--r--libglusterfs/src/glusterfs/latency.h22
-rw-r--r--libglusterfs/src/glusterfs/libglusterfs-messages.h130
-rw-r--r--libglusterfs/src/glusterfs/lkowner.h2
-rw-r--r--libglusterfs/src/glusterfs/logging.h11
-rw-r--r--libglusterfs/src/glusterfs/mem-pool.h58
-rw-r--r--libglusterfs/src/glusterfs/mem-types.h6
-rw-r--r--libglusterfs/src/glusterfs/stack.h14
-rw-r--r--libglusterfs/src/glusterfs/statedump.h2
-rw-r--r--libglusterfs/src/glusterfs/store.h6
-rw-r--r--libglusterfs/src/glusterfs/syncop.h80
-rw-r--r--libglusterfs/src/glusterfs/syscall.h18
-rw-r--r--libglusterfs/src/glusterfs/xlator.h25
-rw-r--r--libglusterfs/src/graph.c30
-rw-r--r--libglusterfs/src/inode.c114
-rw-r--r--libglusterfs/src/iobuf.c129
-rw-r--r--libglusterfs/src/latency.c91
-rw-r--r--libglusterfs/src/libglusterfs.sym25
-rw-r--r--libglusterfs/src/logging.c343
-rw-r--r--libglusterfs/src/mem-pool.c328
-rw-r--r--libglusterfs/src/monitoring.c8
-rw-r--r--libglusterfs/src/options.c123
-rw-r--r--libglusterfs/src/rbthash.c46
-rw-r--r--libglusterfs/src/stack.c4
-rw-r--r--libglusterfs/src/statedump.c164
-rw-r--r--libglusterfs/src/store.c85
-rw-r--r--libglusterfs/src/syncop-utils.c37
-rw-r--r--libglusterfs/src/syncop.c359
-rw-r--r--libglusterfs/src/syscall.c48
-rw-r--r--libglusterfs/src/throttle-tbf.c2
-rw-r--r--libglusterfs/src/tier-ctr-interface.h44
-rw-r--r--libglusterfs/src/timer.c3
-rw-r--r--libglusterfs/src/xlator.c179
-rwxr-xr-xrfc.sh200
-rw-r--r--rpc/rpc-lib/src/Makefile.am2
-rw-r--r--rpc/rpc-lib/src/libgfrpc.sym1
-rw-r--r--rpc/rpc-lib/src/protocol-common.h1
-rw-r--r--rpc/rpc-lib/src/rpc-clnt-ping.c2
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.c51
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.h19
-rw-r--r--rpc/rpc-lib/src/rpc-drc.c13
-rw-r--r--rpc/rpc-lib/src/rpc-drc.h2
-rw-r--r--rpc/rpc-lib/src/rpc-transport.c87
-rw-r--r--rpc/rpc-lib/src/rpc-transport.h22
-rw-r--r--rpc/rpc-lib/src/rpcsvc-common.h4
-rw-r--r--rpc/rpc-lib/src/rpcsvc.c164
-rw-r--r--rpc/rpc-lib/src/rpcsvc.h59
-rw-r--r--rpc/rpc-lib/src/xdr-common.h2
-rw-r--r--rpc/rpc-transport/Makefile.am2
-rw-r--r--rpc/rpc-transport/rdma/Makefile.am1
-rw-r--r--rpc/rpc-transport/rdma/src/Makefile.am24
-rw-r--r--rpc/rpc-transport/rdma/src/name.c703
-rw-r--r--rpc/rpc-transport/rdma/src/name.h35
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.c4912
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.h384
-rw-r--r--rpc/rpc-transport/rdma/src/rpc-trans-rdma-messages.h66
-rw-r--r--rpc/rpc-transport/socket/src/name.c86
-rw-r--r--rpc/rpc-transport/socket/src/socket.c506
-rw-r--r--rpc/rpc-transport/socket/src/socket.h25
-rw-r--r--rpc/xdr/gen/Makefile.am49
-rw-r--r--rpc/xdr/src/.gitignore2
-rw-r--r--rpc/xdr/src/Makefile.am70
-rw-r--r--rpc/xdr/src/cli1-xdr.x1
-rw-r--r--rpc/xdr/src/glusterd1-xdr.x15
-rw-r--r--rpc/xdr/src/glusterfs-fops.x250
-rw-r--r--rpc/xdr/src/glusterfs3-xdr.x3
-rw-r--r--rpc/xdr/src/glusterfs4-xdr.x6
-rw-r--r--rpc/xdr/src/libgfxdr.sym2
-rw-r--r--rpc/xdr/src/rpc-common-xdr.x3
-rwxr-xr-xrun-tests.sh79
-rw-r--r--tests/00-geo-rep/00-georep-verify-non-root-setup.t43
-rw-r--r--tests/00-geo-rep/00-georep-verify-setup.t3
-rw-r--r--tests/00-geo-rep/bug-1708603.t63
-rw-r--r--tests/00-geo-rep/georep-basic-dr-rsync.t13
-rw-r--r--tests/00-geo-rep/georep-upgrade.t79
-rw-r--r--tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t (renamed from tests/basic/afr/split-brain-favorite-child-policy.t)4
-rw-r--r--tests/000-flaky/basic_changelog_changelog-snapshot.t (renamed from tests/basic/changelog/changelog-snapshot.t)4
-rw-r--r--tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t (renamed from tests/basic/distribute/rebal-all-nodes-migrate.t)8
-rw-r--r--tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t50
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/basic_mount-nfs-auth.t (renamed from tests/basic/mount-nfs-auth.t)0
-rw-r--r--tests/000-flaky/bugs_core_multiplex-limit-issue-151.t (renamed from tests/bugs/core/multiplex-limit-issue-151.t)6
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/bugs_distribute_bug-1117851.t (renamed from tests/bugs/distribute/bug-1117851.t)4
-rw-r--r--tests/000-flaky/bugs_distribute_bug-1122443.t (renamed from tests/bugs/distribute/bug-1122443.t)17
-rw-r--r--tests/000-flaky/bugs_glusterd_bug-857330/common.rc (renamed from tests/bugs/glusterd/bug-857330/common.rc)2
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/normal.t (renamed from tests/bugs/glusterd/bug-857330/normal.t)4
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/xml.t (renamed from tests/bugs/glusterd/bug-857330/xml.t)4
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/bugs_glusterd_quorum-value-check.t (renamed from tests/bugs/glusterd/quorum-value-check.t)6
-rw-r--r--tests/000-flaky/bugs_nfs_bug-1116503.t (renamed from tests/bugs/nfs/bug-1116503.t)6
-rw-r--r--tests/000-flaky/features_lock-migration_lkmigration-set-option.t (renamed from tests/features/lock-migration/lkmigration-set-option.t)4
-rw-r--r--tests/afr.rc16
-rw-r--r--tests/basic/afr/afr-anon-inode-no-quorum.t63
-rw-r--r--tests/basic/afr/afr-anon-inode.t114
-rw-r--r--tests/basic/afr/afr-seek.t55
-rw-r--r--tests/basic/afr/durability-off.t2
-rw-r--r--tests/basic/afr/entry-self-heal-anon-dir-off.t459
-rw-r--r--tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t9
-rw-r--r--tests/basic/afr/halo.t61
-rw-r--r--tests/basic/afr/rename-data-loss.t72
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t124
-rw-r--r--tests/basic/changelog/changelog-history.t12
-rw-r--r--tests/basic/ctime/ctime-utimesat.t28
-rw-r--r--tests/basic/distribute/file-rename.t1021
-rw-r--r--tests/basic/distribute/spare_file_rebalance.t51
-rw-r--r--tests/basic/ec/ec-badfd.c124
-rwxr-xr-xtests/basic/ec/ec-badfd.t26
-rw-r--r--tests/basic/ec/ec-quorum-count.t167
-rw-r--r--tests/basic/ec/ec-read-mask.t114
-rw-r--r--tests/basic/ec/ec-reset-brick.t50
-rw-r--r--tests/basic/ec/ec-seek.t3
-rw-r--r--tests/basic/ec/gfapi-ec-open-truncate.c3
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.c227
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.t115
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.c182
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.t102
-rw-r--r--tests/basic/fuse/active-io-graph-switch.t65
-rw-r--r--tests/basic/gfapi/bug-1507896.c49
-rw-r--r--tests/basic/gfapi/bug-1507896.t33
-rw-r--r--tests/basic/gfapi/gfapi-copy-file-range.t16
-rw-r--r--tests/basic/gfapi/gfapi-graph-switch-open-fd.t44
-rw-r--r--tests/basic/gfapi/gfapi-keep-writing.c129
-rw-r--r--tests/basic/gfapi/gfapi-ssl-load-volfile-test.c127
-rwxr-xr-xtests/basic/gfapi/gfapi-ssl-load-volfile-test.t76
-rw-r--r--tests/basic/gfapi/glfsxmp-coverage.c168
-rw-r--r--tests/basic/gfapi/glfsxmp.t5
-rw-r--r--tests/basic/gfapi/protocol-client-ssl.vol.in15
-rw-r--r--tests/basic/glusterd-restart-shd-mux.t10
-rw-r--r--tests/basic/glusterd/arbiter-volume.t29
-rw-r--r--tests/basic/glusterd/disperse-create.t4
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t2
-rw-r--r--tests/basic/glusterd/volume-brick-count.t61
-rw-r--r--tests/basic/graph-cleanup-brick-down-shd-mux.t64
-rw-r--r--tests/basic/metadisp/fsyncdir.c29
-rw-r--r--tests/basic/metadisp/ftruncate.c34
-rw-r--r--tests/basic/metadisp/fxattr.c107
-rw-r--r--tests/basic/metadisp/gfs-fsetxattr.c141
-rw-r--r--tests/basic/metadisp/metadisp.t316
-rw-r--r--tests/basic/metadisp/metadisp.vol14
-rwxr-xr-xtests/basic/mount.t3
-rw-r--r--tests/basic/multiple-volume-shd-mux.t46
-rw-r--r--tests/basic/open-behind/open-behind.t183
-rw-r--r--tests/basic/open-behind/tester-fd.c99
-rw-r--r--tests/basic/open-behind/tester.c444
-rw-r--r--tests/basic/open-behind/tester.h145
-rw-r--r--tests/basic/posix/shared-statfs.t11
-rw-r--r--tests/basic/posix/zero-fill-enospace.c7
-rw-r--r--tests/basic/quick-read-with-upcall.t16
-rw-r--r--tests/basic/seek.c (renamed from tests/basic/ec/seek.c)0
-rw-r--r--tests/basic/shd-mux-afr.t70
-rw-r--r--tests/basic/shd-mux-ec.t75
-rw-r--r--tests/basic/shd-mux.t149
-rwxr-xr-xtests/basic/trace.t22
-rw-r--r--tests/basic/volume-scale-shd-mux.t33
-rw-r--r--tests/basic/volume-snap-scheduler.t49
-rwxr-xr-xtests/basic/volume-snapshot-xml.t6
-rw-r--r--[-rwxr-xr-x]tests/basic/volume.t36
-rw-r--r--tests/bitrot/br-signer-threads-config-1797869.t73
-rw-r--r--tests/bugs/bitrot/bug-1227996.t1
-rw-r--r--tests/bugs/bitrot/bug-1245981.t4
-rwxr-xr-xtests/bugs/bug-1064147.t72
-rw-r--r--tests/bugs/bug-1371806.t1
-rw-r--r--tests/bugs/bug-1371806_acl.t1
-rw-r--r--tests/bugs/bug-1620580.t67
-rw-r--r--tests/bugs/bug-1694920.t63
-rwxr-xr-xtests/bugs/cli/bug-1320388.t2
-rwxr-xr-xtests/bugs/ctime/issue-832.t32
-rw-r--r--tests/bugs/distribute/bug-1600379.t54
-rwxr-xr-xtests/bugs/distribute/bug-1786679.t69
-rwxr-xr-xtests/bugs/distribute/issue-1327.t33
-rw-r--r--tests/bugs/fuse/bug-985074.t5
-rwxr-xr-xtests/bugs/fuse/many-groups-for-acl.t13
-rw-r--r--tests/bugs/gfapi/bug-1447266/bug-1447266.t2
-rw-r--r--tests/bugs/glusterd/brick-mux-validation-in-cluster.t59
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t4
-rw-r--r--tests/bugs/glusterd/brick-mux.t2
-rw-r--r--tests/bugs/glusterd/brick-order-check-add-brick.t61
-rw-r--r--tests/bugs/glusterd/bug-1720566.t50
-rw-r--r--tests/bugs/glusterd/check_elastic_server.t63
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t18
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t8
-rw-r--r--tests/bugs/glusterd/quorum-validation.t2
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t9
-rw-r--r--tests/bugs/glusterd/remove-brick-validation.t (renamed from tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t)14
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t1
-rw-r--r--tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t2
-rw-r--r--tests/bugs/glusterd/validating-options-for-replicated-volume.t13
-rwxr-xr-xtests/bugs/glusterfs-server/bug-852147.t2
-rw-r--r--tests/bugs/glusterfs-server/bug-873549.t2
-rwxr-xr-xtests/bugs/glusterfs-server/bug-887145.t14
-rw-r--r--tests/bugs/glusterfs/bug-873962-spb.t1
-rwxr-xr-xtests/bugs/logging/bug-823081.t8
-rw-r--r--tests/bugs/posix/bug-1651445.t30
-rw-r--r--tests/bugs/protocol/bug-1433815-auth-allow.t1
-rw-r--r--tests/bugs/replicate/bug-1101647.t2
-rw-r--r--tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t2
-rw-r--r--tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t18
-rw-r--r--tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t2
-rw-r--r--tests/bugs/replicate/bug-1744548-heal-timeout.t19
-rw-r--r--tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t74
-rw-r--r--tests/bugs/replicate/bug-1801624-entry-heal.t58
-rw-r--r--tests/bugs/replicate/bug-880898.t7
-rw-r--r--tests/bugs/replicate/issue-1254-prioritize-enospc.t80
-rw-r--r--tests/bugs/replicate/mdata-heal-no-xattrs.t59
-rwxr-xr-xtests/bugs/rpc/bug-954057.t10
-rw-r--r--tests/bugs/shard/bug-1272986.t6
-rw-r--r--tests/bugs/shard/bug-1696136.c5
-rw-r--r--tests/bugs/shard/bug-shard-discard.c5
-rw-r--r--tests/bugs/shard/issue-1243.t43
-rw-r--r--tests/bugs/shard/issue-1281.t34
-rw-r--r--tests/bugs/shard/issue-1425.t45
-rw-r--r--tests/bugs/shard/shard-fallocate.c5
-rwxr-xr-xtests/bugs/snapshot/bug-1111041.t10
-rw-r--r--tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t1
-rw-r--r--tests/bugs/snapshot/bug-1597662.t3
-rwxr-xr-xtests/bugs/transport/bug-873367.t2
-rw-r--r--tests/bugs/write-behind/issue-884.c267
-rwxr-xr-xtests/bugs/write-behind/issue-884.t40
-rw-r--r--tests/cluster.rc41
-rw-r--r--tests/ec.rc9
-rw-r--r--tests/env.rc.in2
-rw-r--r--tests/features/flock_interrupt.t1
-rw-r--r--tests/features/fuse-lru-limit.t1
-rw-r--r--tests/features/interrupt.t14
-rwxr-xr-xtests/features/ssl-authz.t26
-rw-r--r--tests/features/ssl-ciphers.t72
-rwxr-xr-xtests/features/trash.t74
-rwxr-xr-xtests/features/worm.t39
-rw-r--r--tests/geo-rep.rc5
-rw-r--r--tests/glusterfind/glusterfind-basic.t84
-rw-r--r--tests/line-coverage/afr-heal-info.t43
-rwxr-xr-xtests/line-coverage/arbiter-coverage.t32
-rw-r--r--tests/line-coverage/cli-peer-and-volume-operations.t48
-rwxr-xr-xtests/line-coverage/errorgen-coverage.t2
-rw-r--r--tests/line-coverage/log-and-brick-ops-negative-case.t82
-rw-r--r--tests/ssl.rc2
-rw-r--r--tests/thin-arbiter.rc3
-rw-r--r--tests/volume.rc15
-rw-r--r--tools/gfind_missing_files/gfind_missing_files.sh2
-rwxr-xr-xtools/glusterfind/S57glusterfind-delete-post.py2
-rw-r--r--tools/glusterfind/src/changelog.py16
-rw-r--r--tools/glusterfind/src/gfind_py2py3.py25
-rw-r--r--tools/glusterfind/src/main.py61
-rw-r--r--tools/glusterfind/src/utils.py6
-rw-r--r--xlators/cluster/afr/src/afr-common.c1205
-rw-r--r--xlators/cluster/afr/src/afr-dir-read.c18
-rw-r--r--xlators/cluster/afr/src/afr-dir-write.c16
-rw-r--r--xlators/cluster/afr/src/afr-inode-read.c38
-rw-r--r--xlators/cluster/afr/src/afr-inode-read.h3
-rw-r--r--xlators/cluster/afr/src/afr-inode-write.c75
-rw-r--r--xlators/cluster/afr/src/afr-mem-types.h2
-rw-r--r--xlators/cluster/afr/src/afr-messages.h160
-rw-r--r--xlators/cluster/afr/src/afr-open.c19
-rw-r--r--xlators/cluster/afr/src/afr-read-txn.c10
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-common.c203
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-data.c22
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-entry.c212
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-metadata.c70
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-name.c41
-rw-r--r--xlators/cluster/afr/src/afr-self-heal.h21
-rw-r--r--xlators/cluster/afr/src/afr-self-heald.c286
-rw-r--r--xlators/cluster/afr/src/afr-self-heald.h20
-rw-r--r--xlators/cluster/afr/src/afr-transaction.c154
-rw-r--r--xlators/cluster/afr/src/afr.c55
-rw-r--r--xlators/cluster/afr/src/afr.h338
-rw-r--r--xlators/cluster/dht/src/dht-common.c1892
-rw-r--r--xlators/cluster/dht/src/dht-common.h405
-rw-r--r--xlators/cluster/dht/src/dht-diskusage.c12
-rw-r--r--xlators/cluster/dht/src/dht-hashfn.c38
-rw-r--r--xlators/cluster/dht/src/dht-helper.c199
-rw-r--r--xlators/cluster/dht/src/dht-inode-read.c95
-rw-r--r--xlators/cluster/dht/src/dht-inode-write.c56
-rw-r--r--xlators/cluster/dht/src/dht-layout.c92
-rw-r--r--xlators/cluster/dht/src/dht-linkfile.c50
-rw-r--r--xlators/cluster/dht/src/dht-lock.c190
-rw-r--r--xlators/cluster/dht/src/dht-mem-types.h3
-rw-r--r--xlators/cluster/dht/src/dht-messages.h320
-rw-r--r--xlators/cluster/dht/src/dht-rebalance.c1130
-rw-r--r--xlators/cluster/dht/src/dht-rename.c4
-rw-r--r--xlators/cluster/dht/src/dht-selfheal.c589
-rw-r--r--xlators/cluster/dht/src/dht-shared.c136
-rw-r--r--xlators/cluster/dht/src/nufa.c1
-rw-r--r--xlators/cluster/ec/src/ec-combine.c40
-rw-r--r--xlators/cluster/ec/src/ec-common.c126
-rw-r--r--xlators/cluster/ec/src/ec-common.h24
-rw-r--r--xlators/cluster/ec/src/ec-dir-read.c23
-rw-r--r--xlators/cluster/ec/src/ec-dir-write.c57
-rw-r--r--xlators/cluster/ec/src/ec-generic.c51
-rw-r--r--xlators/cluster/ec/src/ec-heal.c159
-rw-r--r--xlators/cluster/ec/src/ec-heald.c73
-rw-r--r--xlators/cluster/ec/src/ec-helpers.c9
-rw-r--r--xlators/cluster/ec/src/ec-inode-read.c46
-rw-r--r--xlators/cluster/ec/src/ec-inode-write.c61
-rw-r--r--xlators/cluster/ec/src/ec-locks.c69
-rw-r--r--xlators/cluster/ec/src/ec-messages.h2
-rw-r--r--xlators/cluster/ec/src/ec-types.h21
-rw-r--r--xlators/cluster/ec/src/ec.c125
-rw-r--r--xlators/cluster/ec/src/ec.h1
-rw-r--r--xlators/debug/delay-gen/src/delay-gen.c2
-rw-r--r--xlators/debug/error-gen/src/error-gen.c47
-rw-r--r--xlators/debug/io-stats/src/io-stats.c217
-rw-r--r--xlators/debug/trace/src/trace.c20
-rw-r--r--xlators/features/Makefile.am6
-rw-r--r--xlators/features/barrier/src/barrier.c2
-rw-r--r--xlators/features/barrier/src/barrier.h3
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h51
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c12
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h20
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.c80
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c284
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.h20
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c97
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h1
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h73
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.c324
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.h22
-rw-r--r--xlators/features/changelog/lib/src/changelog-lib-messages.h30
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-api.c4
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal-handler.c25
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-reborp.c32
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog.c5
-rw-r--r--xlators/features/changelog/lib/src/gf-history-changelog.c68
-rw-r--r--xlators/features/changelog/src/changelog-barrier.c15
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.c9
-rw-r--r--xlators/features/changelog/src/changelog-helpers.c285
-rw-r--r--xlators/features/changelog/src/changelog-helpers.h35
-rw-r--r--xlators/features/changelog/src/changelog-messages.h122
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.c42
-rw-r--r--xlators/features/changelog/src/changelog-rpc.c19
-rw-r--r--xlators/features/changelog/src/changelog.c346
-rw-r--r--xlators/features/cloudsync/src/Makefile.am4
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-c.py2
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c2
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c4
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c88
-rw-r--r--xlators/features/cloudsync/src/cloudsync.h2
-rw-r--r--xlators/features/gfid-access/src/gfid-access.c6
-rw-r--r--xlators/features/index/src/index.c4
-rw-r--r--xlators/features/leases/src/leases-internal.c4
-rw-r--r--xlators/features/leases/src/leases.h35
-rw-r--r--xlators/features/locks/src/clear.c4
-rw-r--r--xlators/features/locks/src/common.c326
-rw-r--r--xlators/features/locks/src/common.h41
-rw-r--r--xlators/features/locks/src/entrylk.c35
-rw-r--r--xlators/features/locks/src/inodelk.c179
-rw-r--r--xlators/features/locks/src/locks.h41
-rw-r--r--xlators/features/locks/src/posix.c254
-rw-r--r--xlators/features/locks/src/reservelk.c2
-rw-r--r--xlators/features/marker/src/marker-quota.c26
-rw-r--r--xlators/features/metadisp/Makefile.am3
-rw-r--r--xlators/features/metadisp/src/Makefile.am38
-rw-r--r--xlators/features/metadisp/src/backend.c45
-rw-r--r--xlators/features/metadisp/src/fops-tmpl.c10
-rw-r--r--xlators/features/metadisp/src/gen-fops.py160
-rw-r--r--xlators/features/metadisp/src/metadisp-create.c101
-rw-r--r--xlators/features/metadisp/src/metadisp-fops.h51
-rw-r--r--xlators/features/metadisp/src/metadisp-fsync.c54
-rw-r--r--xlators/features/metadisp/src/metadisp-lookup.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-open.c70
-rw-r--r--xlators/features/metadisp/src/metadisp-readdir.c65
-rw-r--r--xlators/features/metadisp/src/metadisp-setattr.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-stat.c124
-rw-r--r--xlators/features/metadisp/src/metadisp-unlink.c160
-rw-r--r--xlators/features/metadisp/src/metadisp.c46
-rw-r--r--xlators/features/metadisp/src/metadisp.h45
-rw-r--r--xlators/features/quiesce/src/quiesce.c42
-rw-r--r--xlators/features/quota/src/quota-enforcer-client.c20
-rw-r--r--xlators/features/quota/src/quota.c125
-rw-r--r--xlators/features/quota/src/quota.h8
-rw-r--r--xlators/features/quota/src/quotad-aggregator.c16
-rw-r--r--xlators/features/read-only/src/worm-helper.c15
-rw-r--r--xlators/features/read-only/src/worm.c94
-rw-r--r--xlators/features/shard/src/shard.c539
-rw-r--r--xlators/features/shard/src/shard.h7
-rw-r--r--xlators/features/snapview-client/src/snapview-client-messages.h35
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c384
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mgmt.c25
-rw-r--r--xlators/features/trash/src/trash.c4
-rw-r--r--xlators/features/upcall/src/upcall-internal.c10
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-c.py10
-rw-r--r--xlators/features/utime/src/utime-helpers.h1
-rw-r--r--xlators/features/utime/src/utime.c31
-rw-r--r--xlators/meta/src/meta-helpers.c9
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c87
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c380
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-errno.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c927
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c146
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c671
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c264
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c108
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c15
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h153
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c247
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c432
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c48
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c834
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c309
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c32
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c77
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c86
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c28
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c14
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c92
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c13
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c121
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c211
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c337
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c241
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c511
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c122
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c83
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c1212
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c781
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c538
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c188
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c306
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h393
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.c423
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.h58
-rw-r--r--xlators/mount/fuse/src/fuse-helpers.c110
-rwxr-xr-xxlators/mount/fuse/utils/mount.glusterfs.in39
-rwxr-xr-xxlators/mount/fuse/utils/mount_glusterfs.in19
-rw-r--r--xlators/nfs/server/src/acl3.c27
-rw-r--r--xlators/nfs/server/src/acl3.h2
-rw-r--r--xlators/nfs/server/src/auth-cache.c4
-rw-r--r--xlators/nfs/server/src/mount3.c59
-rw-r--r--xlators/nfs/server/src/mount3udp_svc.c2
-rw-r--r--xlators/nfs/server/src/nfs-common.c5
-rw-r--r--xlators/nfs/server/src/nfs-fops.c2
-rw-r--r--xlators/nfs/server/src/nfs.c14
-rw-r--r--xlators/nfs/server/src/nfs3-fh.c6
-rw-r--r--xlators/nfs/server/src/nfs3-helpers.c4
-rw-r--r--xlators/nfs/server/src/nfs3.c54
-rw-r--r--xlators/nfs/server/src/nlm4.c77
-rw-r--r--xlators/nfs/server/src/nlmcbk_svc.c5
-rw-r--r--xlators/performance/io-cache/src/io-cache-messages.h39
-rw-r--r--xlators/performance/io-cache/src/io-cache.c171
-rw-r--r--xlators/performance/io-cache/src/io-cache.h27
-rw-r--r--xlators/performance/io-cache/src/ioc-inode.c14
-rw-r--r--xlators/performance/io-cache/src/page.c38
-rw-r--r--xlators/performance/io-threads/src/io-threads-messages.h14
-rw-r--r--xlators/performance/io-threads/src/io-threads.c53
-rw-r--r--xlators/performance/md-cache/src/md-cache.c430
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-helper.c55
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.c6
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.h2
-rw-r--r--xlators/performance/open-behind/src/open-behind-messages.h6
-rw-r--r--xlators/performance/open-behind/src/open-behind.c1324
-rw-r--r--xlators/performance/quick-read/src/quick-read.c50
-rw-r--r--xlators/performance/quick-read/src/quick-read.h2
-rw-r--r--xlators/performance/read-ahead/src/read-ahead.c9
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.c4
-rw-r--r--xlators/performance/write-behind/src/write-behind.c8
-rw-r--r--xlators/protocol/client/src/client-callback.c89
-rw-r--r--xlators/protocol/client/src/client-common.c30
-rw-r--r--xlators/protocol/client/src/client-handshake.c712
-rw-r--r--xlators/protocol/client/src/client-helpers.c65
-rw-r--r--xlators/protocol/client/src/client-lk.c31
-rw-r--r--xlators/protocol/client/src/client-messages.h123
-rw-r--r--xlators/protocol/client/src/client-rpc-fops.c700
-rw-r--r--xlators/protocol/client/src/client-rpc-fops_v2.c721
-rw-r--r--xlators/protocol/client/src/client.c1210
-rw-r--r--xlators/protocol/client/src/client.h43
-rw-r--r--xlators/protocol/server/src/Makefile.am4
-rw-r--r--xlators/protocol/server/src/server-common.c32
-rw-r--r--xlators/protocol/server/src/server-handshake.c124
-rw-r--r--xlators/protocol/server/src/server-helpers.c131
-rw-r--r--xlators/protocol/server/src/server-messages.h179
-rw-r--r--xlators/protocol/server/src/server-rpc-fops.c204
-rw-r--r--xlators/protocol/server/src/server-rpc-fops_v2.c906
-rw-r--r--xlators/protocol/server/src/server.c203
-rw-r--r--xlators/protocol/server/src/server.h1
-rw-r--r--xlators/storage/posix/src/posix-common.c241
-rw-r--r--xlators/storage/posix/src/posix-entry-ops.c388
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.c89
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.h4
-rw-r--r--xlators/storage/posix/src/posix-handle.c202
-rw-r--r--xlators/storage/posix/src/posix-handle.h33
-rw-r--r--xlators/storage/posix/src/posix-helpers.c588
-rw-r--r--xlators/storage/posix/src/posix-inode-fd-ops.c364
-rw-r--r--xlators/storage/posix/src/posix-inode-handle.h13
-rw-r--r--xlators/storage/posix/src/posix-messages.h4
-rw-r--r--xlators/storage/posix/src/posix-metadata.c153
-rw-r--r--xlators/storage/posix/src/posix-metadata.h6
-rw-r--r--xlators/storage/posix/src/posix.h139
-rw-r--r--xlators/system/posix-acl/src/posix-acl.c18
682 files changed, 41556 insertions, 34033 deletions
diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE
index 306b4eebc15..386ed2d8dd5 100644
--- a/.github/ISSUE_TEMPLATE
+++ b/.github/ISSUE_TEMPLATE
@@ -1,13 +1,30 @@
-This repository (glusterfs) uses github issues to track feature requests *only*.
+<!-- Please use this template while reporting an issue, providing as much information as possible. Failure to do so may result in a delayed response. Thank you! -->
-For assistance with bugs, please file a bug in our bugzilla instance [1]
+**Description of problem:**
-For other queries regarding glusterfs, please post to the users [2] or devel [3] lists as appropriate.
-You may further want to subscribe to these lists or view the archives for these lists, see [4] [5].
+**The exact command to reproduce the issue**:
+
+
+**The full output of the command that failed**:
+<details>
+
+
+
+</details>
+
+**Expected results:**
+
+
+**Additional info:**
+
+
+**- The output of the `gluster volume info` command**:
+<details>
+
+
+
+</details>
+
+**- The operating system / glusterfs version**:
-[1] glusterfs Bugzilla: https://bugzilla.redhat.com/enter_bug.cgi?product=GlusterFS
-[2] Gluster users list ID: gluster-users@gluster.org
-[3] Gluster devel list ID: gluster-devel@gluster.org
-[4] Gluster users list subscribe/view archives: https://lists.gluster.org/mailman/listinfo/gluster-users
-[5] Gluster devel list subscribe/view archives: https://lists.gluster.org/mailman/listinfo/gluster-devel
diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE
index 0ec6eb319ee..e69de29bb2d 100644
--- a/.github/PULL_REQUEST_TEMPLATE
+++ b/.github/PULL_REQUEST_TEMPLATE
@@ -1,33 +0,0 @@
-Many thanks for your interest in improving GlusterFS!
-
-GlusterFS does not use GitHub Pull-Requests. Instead, changes are reviewed
-on the Gerrit instance of the Gluster Community at https://review.gluster.org
-
-In order to send your changes for review, follow these steps:
-
-1. login on https://review.gluster.org with your GitHub account
-2. add a public ssh-key to your profile on https://review.gluster.org/#/settings/ssh-keys
-3. add the Gerrit remote to your locally cloned git repository
-
- $ git remote add gerrit ssh://$USER@review.gluster.org/glusterfs.git
-
-4. configure the commit hooks
-
- $ git review --setup
-
-5. post your changes to Gerrit
-
- $ git review
-
-
-You may need to install the 'git-review' package if 'git review' is not
-available. Note that the hooks for the repository make sure to add a ChangeId
-label in the commit messages. Gerrit uses the ChangeId to track single patches
-and its updated versions.
-
-For more details, see the documented development workflow at
- http://gluster.readthedocs.io/en/latest/Developer-guide/Simplified-Development-Workflow/
-
-If there are any troubles or difficulties with these instructions, please
-contact us on gluster-devel@gluster.org or on Freenode IRC in the #gluster-dev
-channel.
diff --git a/.github/RELEASE_TRACKER_TEMPLATE b/.github/RELEASE_TRACKER_TEMPLATE
new file mode 100644
index 00000000000..502bbd5556c
--- /dev/null
+++ b/.github/RELEASE_TRACKER_TEMPLATE
@@ -0,0 +1,12 @@
+<!-- Please use this template while creating a tracker issue -->
+
+**Description of problem:**
+A tracker issue to track the issues that will be fixed as a part of this release
+
+
+**Major or minor release**:
+
+
+**Release version**:
+
+
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 00000000000..460e327c6ea
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,25 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 210
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 15
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - pinned
+ - security
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ Thank you for your contributions.
+
+ Noticed that this issue is not having any activity in last ~6 months! We
+ are marking this issue as stale because it has not had recent activity.
+
+ It will be closed in 2 weeks if no one responds with a comment here.
+
+
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: >
+ Closing this issue as there was no update since my last update on issue.
+ If this is an issue which is still valid, feel free to open it.
diff --git a/.gitignore b/.gitignore
index 8e05080c947..fc5ba586f8e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ test-driver
*.patch
.libs
.deps
+.dirstamp
# Softlinks to test and log
log
@@ -86,17 +87,14 @@ glusterfsd/src/glusterfsd
glusterfsd/src/gf_attach
heal/src/glfsheal
libgfchangelog.pc
-libgfdb.pc
libglusterfs/src/graph.lex.c
libglusterfs/src/y.tab.c
libglusterfs/src/y.tab.h
libglusterfs/src/defaults.c
-libglusterfs/src/glusterfs-fops.h
libglusterfs/src/cli1-xdr.h
libglusterfs/src/protocol-common.h
libtool
# copied XDR for cyclic libglusterfs <-> rpc-header dependency
-rpc/xdr/gen/*.x
run-tests.sh
!tests/basic/fuse/Makefile
!tests/basic/gfapi/Makefile
@@ -110,14 +108,6 @@ extras/peer_add_secret_pub
tools/gfind_missing_files/gcrawler
tools/glusterfind/glusterfind
tools/glusterfind/src/tool.conf
-# Generated by fdl xlator
-xlators/experimental/fdl/src/fdl.c
-xlators/experimental/fdl/src/gf_logdump
-xlators/experimental/fdl/src/gf_recon
-xlators/experimental/fdl/src/libfdl.c
-xlators/experimental/fdl/src/librecon.c
-xlators/experimental/jbr-client/src/jbrc-cg.c
-xlators/experimental/jbr-server/src/jbr-cg.c
# Eventing
events/src/eventsapiconf.py
extras/systemd/glustereventsd.service
@@ -131,3 +121,5 @@ xlators/features/cloudsync/src/cloudsync-autogen-fops.c
xlators/features/cloudsync/src/cloudsync-autogen-fops.h
xlators/features/utime/src/utime-autogen-fops.c
xlators/features/utime/src/utime-autogen-fops.h
+tests/basic/metadisp/ftruncate
+xlators/features/metadisp/src/fops.c
diff --git a/.testignore b/.testignore
index 190573191e1..fe8f838bf2b 100644
--- a/.testignore
+++ b/.testignore
@@ -1,4 +1,6 @@
.github/ISSUE_TEMPLATE
+.github/PULL_REQUEST_TEMPLATE
+.github/stale.yml
.gitignore
.mailmap
.testignore
@@ -6,7 +8,7 @@
rfc.sh
submit-for-review.sh
AUTHORS
-CONTRIBUTING
+CONTRIBUTING.md
COPYING-GPLV2
COPYING-LGPLV3
ChangeLog
diff --git a/CONTRIBUTING b/CONTRIBUTING
deleted file mode 100644
index 7bccd88d7e5..00000000000
--- a/CONTRIBUTING
+++ /dev/null
@@ -1,25 +0,0 @@
- Developer's Certificate of Origin 1.1
-
- By making a contribution to this project, I certify that:
-
- (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
- (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
- (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000000..65fc3497104
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,114 @@
+# GlusterFS project Contribution guidelines
+
+## Development Workflow
+
+We follow most of the details as per the [document here](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests). If you are not aware of the github workflow, it is recommended to go through them before continuing here.
+
+
+#### Get the Repository setup
+
+0. Fork Repository
+ - Fork [GlusterFS repository](https://github.com/gluster/glusterfs/fork).
+
+1. Clone Repository
+ - Clone the glusterfs repo freshly from github using below steps.
+
+```
+ git clone git@github.com:${username}/glusterfs.git
+ cd glusterfs/
+ git remote add upstream git@github.com:gluster/glusterfs.git
+```
+
+About two tasks are one time for the life time. You can continue to use the same repository for all the work in future.
+
+#### Development & Other flows
+
+0. Issue:
+ - Make sure there is an issue filed for the task you are working on.
+ - If it is not filed, open the issue with all the description.
+ - If it is a bug fix, add label "Type:Bug".
+ - If it is an RFC, provide all the documentation, and request for "DocApproved", and "SpecApproved" label.
+
+1. Code:
+ - Start coding
+ - Build and test locally
+ - Make sure clang-format is installed and is run on the patch.
+
+2. Keep up-to-date
+ - GlusterFS is a large project with many developers, so there would be one or the other patch everyday.
+ - It is critical for developer to be up-to-date with `devel` repo to be Conflict-Free when PR is opened.
+ - Git provides many options to keep up-to-date, below is one of them
+```
+ git fetch upstream
+ git rebase upstream/devel
+```
+ - It is recommended you keep pushing to your repo every day, so you don't loose any work.
+ - It can be done by `./rfc.sh` (or `git push origin HEAD:issueNNN`)
+
+2. Commit Message / PR description:
+ - The name of the branch on your personal fork can start with issueNNNN, followed by anything of your choice.
+ - PRs continue to have the title of format "component: \<title\>", like it is practiced now.
+ - When you open a PR, having a reference Issue for the commit is mandatory in GlusterFS.
+ - Commit message can have, either `Fixes: #NNNN` or `Updates: #NNNN` in a separate line in the commit message.
+ - Here, NNNN is the Issue ID in glusterfs repository.
+ - Each commit needs the author to have the "Signed-off-by: Name \<email\>" line.
+ - Can do this by `-s` option for `git commit`.
+ - If the PR is not ready for review, apply the label `work-in-progress`.
+ - Check the availability of "Draft PR" is present for you, if yes, use that instead.
+
+3. Tests:
+ - All the required smoke tests would be auto-triggered.
+ - Developers get a chance to retrigger the smoke tests using **"/recheck smoke"** as comment.
+ - The "regression" tests would be triggered by a comment **"/run regression"** from developers in the [@gluster-maintainers](https://github.com/orgs/gluster/teams/gluster-maintainers) group.
+ - Ask for help as comment in PR if you have any questions about the process!
+
+4. Review Process:
+ - `+2` : is equivalent to "Approve" from the people in the maintainer's group.
+ - `+1` : can be given by a maintainer/reviewer by explicitly stating that in the comment.
+ - `-1` : provide details on required changes and pick "Request Changes" while submitting your review.
+ - `-2` : done by adding the `DO-NOT-MERGE` label.
+
+ - Any further discussions can happen as comments in the PR.
+
+5. Making changes:
+ - There are 2 approaches to submit changes done after addressing review comments.
+ - Commit changes as a new commit on top of the original commits in the branch, and push the changes to same branch (issueNNNN)
+ - Commit changes into the same commit with `--amend` option, and do a push to the same branch with `--force` option.
+
+6. Merging:
+ - GlusterFS project follows 'Squash and Merge' method
+ - This is mainly to preserve the historic Gerrit method of one patch in `git log` for one URL link.
+ - This also makes every merge a complete patch, which has passed all tests.
+ - The merging of the patch is expected to be done by the maintainers.
+ - It can be done when all the tests (smoke and regression) pass.
+ - When the PR has 'Approved' flag from corresponding maintainer.
+ - If you feel there is delay, feel free to add a comment, discuss the same in Slack channel, or send email.
+
+## By contributing to this project, the contributor would need to agree to below.
+
+### Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
diff --git a/MAINTAINERS b/MAINTAINERS
index 7ff1142f735..953e8755fd9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -51,15 +51,10 @@ Descriptions of section entries:
General Project Architects
--------------------------
-M: Jeff Darcy <jeff@pl.atyp.us>
-M: Vijay Bellur <vbellur@redhat.com>
-P: Amar Tumballi <amarts@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
-P: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Shyamsundar Ranganathan <srangana@redhat.com>
-P: Niels de Vos <ndevos@redhat.com>
-P: Xavier Hernandez <xhernandez@datalab.es>
-
+M: Amar Tumballi <amarts@gmail.com>
+M: Xavier Hernandez <xhernandez@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
+P: Atin Mukherjee <amukherj@redhat.com>
xlators:
--------
@@ -71,13 +66,14 @@ F: xlators/system/posix-acl/
Arbiter
M: Ravishankar N <ravishankar@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
S: Maintained
F: xlators/features/arbiter/
Automatic File Replication (AFR)
-M: Pranith Karampuri <pkarampu@redhat.com>
-P: Ravishankar N <ravishankar@redhat.com>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
+M: Ravishankar N <ravishankar@redhat.com>
+P: Karthik US <ksubrahm@redhat.com>
S: Maintained
F: xlators/cluster/afr/
@@ -87,10 +83,6 @@ P: Atin Mukherjee <amukherj@redhat.com>
S: Maintained
F: xlators/features/barrier
-Block Device
-S: Orphan
-F: xlators/storage/bd/
-
BitRot
M: Kotresh HR <khiremat@redhat.com>
P: Raghavendra Bhat <rabhat@redhat.com>
@@ -104,17 +96,14 @@ S: Maintained
F: xlators/features/changelog/
Distributed Hashing Table (DHT)
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-M: Nithya Balachandran <nbalacha@redhat.com>
P: Susant Palai <spalai@redhat.com>
S: Maintained
F: xlators/cluster/dht/
Erasure Coding
-M: Pranith Karampuri <pkarampu@redhat.com>
-M: Xavier Hernandez <xhernandez@datalab.es>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
+M: Xavier Hernandez <xhernandez@redhat.com>
P: Ashish Pandey <aspandey@redhat.com>
-P: Sunil Kumar Acharya <sheggodu@redhat.com>
S: Maintained
F: xlators/cluster/ec/
@@ -124,21 +113,19 @@ S: Maintained
F: xlators/debug/error-gen/
FUSE Bridge
-M: Niels de Vos <ndevos@redhat.com>
-P: Csaba Henk <chenk@redhat.com>
+M: Csaba Henk <chenk@redhat.com>
+P: Niels de Vos <ndevos@redhat.com>
S: Maintained
F: xlators/mount/
Index
-M: Pranith Karampuri <pkarampu@redhat.com>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
P: Ravishankar N <ravishankar@redhat.com>
S: Maintained
F: xlators/features/index/
IO Cache
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
-P: Nithya Balachandran <nbalacha@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
S: Maintained
F: xlators/performance/io-cache/
@@ -149,7 +136,7 @@ S: Maintained
F: xlators/debug/io-stats/
IO threads
-M: Pranith Karampuri <pkarampu@redhat.com>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
P: Ravishankar N <ravishankar@redhat.com>
S: Maintained
F: xlators/performance/io-threads/
@@ -163,18 +150,17 @@ F: xlators/features/leases/
Locks
M: Krutika Dhananjay <kdhananj@redhat.com>
+P: Xavier Hernandez <xhernandez@redhat.com>
S: Maintained
F: xlators/features/locks/
Marker
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
M: Kotresh HR <khiremat@redhat.com>
-P: Sanoj Unnikrishnan <sunnikri@redhat.com>
S: Maintained
F: xlators/features/marker/
Meta
-M: Mohammed Rafi KC <rkavunga@redhat.com>
+M: Mohammed Rafi KC <rafi.kavungal@iternity.com>
S: Maintained
F: xlators/features/meta/
@@ -186,53 +172,44 @@ F: xlators/performance/md-cache/
Negative-lookup Cache
M: Poornima G <pgurusid@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
S: Maintained
F: xlators/performance/nl-cache/
-NFS
-M: Shreyas Siravara <sshreyas@fb.com>
-M: Jeff Darcy <jeff@pl.atyp.us>
-P: Jiffin Tony Thottan <jthottan@redhat.com>
-P: Soumya Koduri <skoduri@redhat.com>
-S: Maintained
+gNFS
+M: Jiffin Tony Thottan <jthottan@redhat.com>
+P: Xie Changlong <xiechanglong@cmss.chinamobile.com>
+P: Amar Tumballi <amarts@gmail.com>
+S: Odd Fixes
F: xlators/nfs/server/
Open-behind
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Milind Changire <mchangir@redhat.com>
S: Maintained
F: xlators/performance/open-behind/
Posix:
M: Raghavendra Bhat <raghavendra@redhat.com>
+P: Kotresh HR <khiremat@redhat.com>
P: Krutika Dhananjay <kdhananj@redhat.com>
-P: Jiffin Tony Thottan <jthottan@redhat.com>
S: Maintained
F: xlators/storage/posix/
Quick-read
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Milind Changire <mchangir@redhat.com>
S: Maintained
F: xlators/performance/quick-read/
Quota
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Sanoj Unnikrishnan <sunnikri@redhat.com>
M: Shyamsundar Ranganathan <srangana@redhat.com>
+P: Hari Gowtham <hgowtham@redhat.com>
S: Maintained
F: xlators/features/quota/
Read-ahead
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
P: Csaba Henk <chenk@redhat.com>
S: Maintained
F: xlators/performance/read-ahead/
Readdir-ahead
-M: Poornima G <pgurusid@redhat.com>
-P: Krutika Dhananjay <kdhananj@redhat.com>
S: Maintained
F: xlators/performance/readdir-ahead/
@@ -256,7 +233,6 @@ S: Maintained
F: xlators/features/upcall/
Write-behind
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
P: Csaba Henk <chenk@redhat.com>
S: Maintained
F: xlators/performance/write-behind/
@@ -271,45 +247,24 @@ M: Susant Kumar Palai <spalai@redhat.com>
S: Maintained
F: xlators/features/cloudsync/
-Experimental Features:
-----------------------
-
-RIO-Distribution
-M: Shyamsundar Ranganathan <srangana@redhat.com>
-P: Kotresh HR <khiremat@redhat.com>
-P: Susant Palai <spalai@redhat.com>
-S: Maintained
-F: xlators/experimental/dht2/
-
-Journal Based Replication
-M: Jeff Darcy <jeff@pl.atyp.us>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
-S: Maintained
-F: xlators/experimental/fdl/
-F: xlators/experimenta/jbr-client/
-F: xlators/experimental/jbr-server/
-
-
Other bits of code:
-------------------
Doc
M: Humble Chirammal <hchiramm@redhat.com>
M: Raghavendra Talur <rtalur@redhat.com>
-M: Prashanth Pai <ppai@redhat.com>
S: Maintained
F: doc/
Geo Replication
M: Aravinda V K <avishwan@redhat.com>
M: Kotresh HR <khiremat@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+M: Sunny Kumar <sunkumar@redhat.com>
S: Maintained
F: geo-replication/
Glusterfind
-M: Milind Changire <mchangir@redhat.com>
-P: Aravinda VK <avishwan@redhat.com>
+M: Aravinda VK <avishwan@redhat.com>
S: Maintained
F: tools/glusterfind/
@@ -322,12 +277,12 @@ S: Maintained
F: api/
libglusterfs
-M: Amar Tumballi <amarts@redhat.com>
+M: Amar Tumballi <amarts@gmail.com>
+M: Xavier Hernandez <xhernandez@redhat.com>
M: Jeff Darcy <jeff@pl.atyp.us>
P: Kaleb Keithley <kkeithle@redhat.com>
P: Niels de Vos <ndevos@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
-P: Raghavendra Gowdappa <rgowdapp@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
P: Shyamsundar Ranganathan <srangana@redhat.com>
S: Maintained
F: libglusterfs/
@@ -335,43 +290,26 @@ F: libglusterfs/
xxhash
M: Aravinda VK <avishwan@redhat.com>
M: Kotresh HR <khiremat@redhat.com>
-P: Amar Tumballi <amarts@redhat.com>
+P: Yaniv Kaul <ykaul@redhat.com>
S: Maintained
F: contrib/xxhash/
T: https://github.com/Cyan4973/xxHash.git
-Management Daemon - glusterd1
+Management Daemon - glusterd
M: Atin Mukherjee <amukherj@redhat.com>
-M: Samikshan Bairagya <samikshan@gmail.com>
+M: Mohit Agrawal <moagrawa@redhat.com>
+M: Sanju Rakonde <srakonde@redhat.com>
S: Maintained
F: cli/
F: xlators/mgmt/glusterd/
-Management Daemon - glusterd2
-M: Kaushal M <kaushal@redhat.com>
-M: Prashanth Pai <ppai@redhat.com>
-P: Aravinda VK <avishwan@redhat.com>
-S: Maintained
-T: https://github.com/gluster/glusterd2.git
-
Protocol
-M: Kaleb Keithley <kkeithle@redhat.com>
M: Niels de Vos <ndevos@redhat.com>
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
S: Maintained
F: xlators/protocol/
-RDMA subsystem
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-M: Amar Tumballi <amarts@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
-S: Maintained
-F: rpc/rpc-transport/rdma/
-
Remote Procedure Call subsystem
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Milind Changire <mchangir@redhat.com>
P: Mohit Agrawal <moagrawa@redhat.com>
S: Maintained
F: rpc/rpc-lib/
@@ -379,17 +317,16 @@ F: rpc/xdr/
Snapshot
M: Raghavendra Bhat <raghavendra@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
P: Sunny Kumar <sunkumar@redhat.com>
S: Maintained
F: xlators/mgmt/glusterd/src/glusterd-snap*
F: extras/snap-scheduler.py
Socket subsystem
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
P: Krutika Dhananjay <kdhananj@redhat.com>
P: Milind Changire <mchangir@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
P: Mohit Agrawal <moagrawa@redhat.com>
S: Maintained
F: rpc/rpc-transport/socket/
@@ -418,16 +355,18 @@ F: extras/systemd/glustereventsd*
Distribution Specific:
----------------------
Build:
-M: Kaleb Keithley <kkeithle@redhat.com>
M: Niels de Vos <ndevos@redhat.com>
+M: Hari Gowtham <hgowtham@redhat.com>
P: Anoop C S <anoopcs@redhat.com>
-P: Kaushal M <kaushal@redhat.com>
P: Raghavendra Talur <rtalur@redhat.com>
+P: Rinku Kothiya <rkothiya@redhat.com>
S: Maintained
Debian packages on download.gluster.org
M: packaging@gluster.org
M: Kaleb Keithley <kkeithle@redhat.com>
+P: Sheetal Pamecha <spamecha@redhat.com>
+P: Shwetha Acharya <sacharya@redhat.com>
S: Maintained
W: http://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/Debian.README
T: https://github.com/gluster/glusterfs-debian.git
@@ -435,6 +374,8 @@ T: https://github.com/gluster/glusterfs-debian.git
OpenSuSE
M: packaging@gluster.org
M: Kaleb Keithley <kkeithle@redhat.com>
+P: Sheetal Pamecha <spamecha@redhat.com>
+P: Shwetha Acharya <sacharya@redhat.com>
S: Maintained
W: https://build.opensuse.org/repositories/home:glusterfs
W: https://download.gluster.org/pub/gluster/glusterfs/LATEST/SuSE/SuSE.README
@@ -451,6 +392,8 @@ T: https://github.com/CentOS-Storage-SIG/glusterfs.git
Ubuntu PPA
M: packaging@gluster.org
M: Kaleb Keithley <kkeithle@redhat.com>
+P: Sheetal Pamecha <spamecha@redhat.com>
+P: Shwetha Acharya <sacharya@redhat.com>
S: Maintained
W: https://launchpad.net/~gluster
W: http://download.gluster.org/pub/gluster/glusterfs/LATEST/Ubuntu/Ubuntu.README
@@ -460,22 +403,10 @@ Related projects
----------------
Gluster Block
M: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
-P: Niels de Vos <ndevos@redhat.com>
+M: Xiubo Li <xiubli@redhat.com>
S: Maintained
T: https://github.com/gluster/gluster-block.git
-Gluster Object
-P: Ram Edara <redara@redhat.com>
-P: Saravanakumar Arumugam <sarumuga@redhat.com>
-S: Maintained
-T: https://github.com/gluster/gluster-swift.git
-
-GlusterFS Hadoop HCFS plugin
-S: Orphan
-W: https://github.com/gluster/glusterfs-hadoop/wiki
-T: https://github.com/gluster/glusterfs-hadoop.git
-
GlusterFS core-utils
M: Anoop C S <anoopcs@redhat.com>
S: Maintained
@@ -489,11 +420,6 @@ S: Maintained
T: git://github.com/nfs-ganesha/nfs-ganesha.git
F: src/nfs-ganesha~/src/FSAL/FSAL_GLUSTER/
-Nagios Monitoring
-M: Sahina Bose <sabose@redhat.com>
-S: Maintained
-T: https://github.com/gluster/nagios-plugins-gluster.git
-
QEMU integration
M: Niels de Vos <ndevos@redhat.com>
M: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
@@ -536,12 +462,11 @@ Infrastructure
Platform
M: Michael Scherer <misc@redhat.com>
P: Shyamsundar Ranganathan <srangana@redhat.com>
-P: Amar Tumballi <amarts@redhat.com>
+P: Amar Tumballi <amarts@gmail.com>
Continuous Integration
M: Michael Scherer <misc@redhat.com>
M: Deepshikha Khandelwal <dkhandel@redhat.com>
-P: Kaushal M <kaushal@redhat.com>
P: Niels de Vos <ndevos@redhat.com>
Special Thanks
@@ -549,6 +474,17 @@ Special Thanks
GlusterFS would not be possible without the contributions of:
+
+M: Vijay Bellur <vbellur@redhat.com>
+M: Jeff Darcy <jeff@pl.atyp.us>
+M: Shreyas Siravara <sshreyas@fb.com>
+M: Kaushal M <kaushal@redhat.com>
+M: Nigel Babu
+M: Prashanth Pai
+P: Sanoj Unnikrishnan
+P: Milind Changire <mchangir@redhat.com>
+P: Sunil Kumar Acharya <sheggodu@redhat.com>
+M: Samikshan Bairagya <samikshan@gmail.com>
M: Chris Hertel
M: M. Mohan Kumar <mohan@in.ibm.com>
M: Shishir Gowda <gowda.shishir@gmail.com>
@@ -570,3 +506,5 @@ M: Jay Vyas
M: Luis Pabon
M: Ira Cooper
M: Shwetha Panduranga
+M: Nithya Balachandran
+M: Raghavendra Gowdappa
diff --git a/Makefile.am b/Makefile.am
index e0c795f418f..98ea5c1038d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -9,8 +9,9 @@ EXTRA_DIST = autogen.sh \
contrib/umountd \
$(shell find $(top_srcdir)/tests -type f -print)
-SUBDIRS = $(ARGP_STANDALONE_DIR) rpc/xdr/gen libglusterfs rpc api xlators \
- glusterfsd $(FUSERMOUNT_SUBDIR) doc extras cli heal \
+
+SUBDIRS = $(ARGP_STANDALONE_DIR) libglusterfs rpc libglusterd api \
+ glusterfsd xlators $(FUSERMOUNT_SUBDIR) doc extras cli heal \
@SYNCDAEMON_SUBDIR@ @UMOUNTD_SUBDIR@ tools events
pkgconfigdir = @pkgconfigdir@
@@ -18,9 +19,13 @@ pkgconfig_DATA = glusterfs-api.pc libgfchangelog.pc
CLEANFILES = glusterfs-api.pc libgfchangelog.pc contrib/umountd/Makefile
+clean-local:
+ find . -name '*.o' -o -name '*.lo' -o -name '.Po' | xargs rm -f
+
gitclean: distclean
find . -name Makefile.in -exec rm -f {} \;
find . -name mount.glusterfs -exec rm -f {} \;
+ find . -name .deps -o -name .libs | xargs rm -rf
rm -fr autom4te.cache
rm -f missing aclocal.m4 config.h.in config.guess config.sub ltmain.sh install-sh configure depcomp
@@ -48,4 +53,3 @@ gen-VERSION:
./build-aux/pkg-version --full \
> $(abs_top_builddir)/$(distdir)/VERSION; \
fi
-
diff --git a/README.md b/README.md
index 92f829e431e..9d68e033782 100644
--- a/README.md
+++ b/README.md
@@ -3,8 +3,7 @@
petabytes. It provides interfaces for object, block and file storage.
## Development
- Contributions to gluster in the form of patches and new feature additions can
- be made by following steps outlined at [Developers Guide](http://docs.gluster.org/en/latest/Developer-guide/Developers-Index/#contributing-to-the-gluster-community).
+ The development workflow is documented in [Contributors guide](CONTRIBUTING.md)
## Documentation
The Gluster documentation can be found at [Gluster Docs](http://docs.gluster.org).
diff --git a/api/src/README.Symbol_Versions b/api/src/README.Symbol_Versions
index d5cdedd826b..b6ec95f9311 100644
--- a/api/src/README.Symbol_Versions
+++ b/api/src/README.Symbol_Versions
@@ -1,3 +1,3 @@
-See .../doc/gfapi-symbol-versions/gfapi-symbol-versions.md
+See ../../doc/developer-guide/gfapi-symbol-versions.md
diff --git a/api/src/gfapi-messages.h b/api/src/gfapi-messages.h
index 68d12427aea..b9223940416 100644
--- a/api/src/gfapi-messages.h
+++ b/api/src/gfapi-messages.h
@@ -49,6 +49,99 @@ GLFS_MSGID(API, API_MSG_MEM_ACCT_INIT_FAILED, API_MSG_MASTER_XLATOR_INIT_FAILED,
API_MSG_INODE_LINK_FAILED, API_MSG_STATEDUMP_FAILED,
API_MSG_XREADDIRP_R_FAILED, API_MSG_LOCK_INSERT_MERGE_FAILED,
API_MSG_SETTING_LOCK_TYPE_FAILED, API_MSG_INODE_FIND_FAILED,
- API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED);
+ API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED,
+ API_MSG_INVALID_ARG, API_MSG_UPCALL_EVENT_NULL_RECEIVED,
+ API_MSG_FLAGS_HANDLE, API_MSG_FDCREATE_FAILED_ON_GRAPH,
+ API_MSG_TRANS_RDMA_DEP, API_MSG_TRANS_NOT_SUPPORTED,
+ API_MSG_FS_NOT_INIT, API_MSG_INVALID_SYSRQ,
+ API_MSG_DECODE_XDR_FAILED, API_MSG_NULL, API_MSG_CALL_NOT_SUCCESSFUL,
+ API_MSG_CALL_NOT_VALID, API_MSG_UNABLE_TO_DEL,
+ API_MSG_REMOTE_HOST_DISCONN, API_MSG_HANDLE_NOT_SET);
+#define API_MSG_ALLOC_FAILED_STR "Upcall allocation failed"
+#define API_MSG_LOCK_INSERT_MERGE_FAILED_STR \
+ "Lock insertion and splitting/merging failed"
+#define API_MSG_SETTING_LOCK_TYPE_FAILED_STR "Setting lock type failed"
+
+#define API_MSG_INVALID_ARG_STR "Invalid"
+#define API_MSG_INVALID_ENTRY_STR "Upcall entry validation failed"
+#define API_MSG_INODE_FIND_FAILED_STR "Unable to find inode entry"
+#define API_MSG_CREATE_HANDLE_FAILED_STR "handle creation failed"
+#define API_MSG_UPCALL_EVENT_NULL_RECEIVED_STR \
+ "Upcall_EVENT_NULL received. Skipping it"
+#define API_MSG_UPCALL_SYNCOP_FAILED_STR "Synctask for upcall failed"
+#define API_MSG_FDCREATE_FAILED_STR "Allocating anonymous fd failed"
+#define API_MSG_XREADDIRP_R_FAILED_STR "glfs_x_readdirp_r failed"
+#define API_MSG_FDCTX_SET_FAILED_STR "Setting fd ctx failed"
+#define API_MSG_FLAGS_HANDLE_STR "arg not set. Flags handled are"
+#define API_MSG_INODE_REFRESH_FAILED_STR "inode refresh failed"
+#define API_MSG_INODE_LINK_FAILED_STR "inode linking failed"
+#define API_MSG_GET_CWD_FAILED_STR "Failed to get cwd"
+#define API_MSG_FGETXATTR_FAILED_STR "fgetxattr failed"
+#define API_MSG_LOCKINFO_KEY_MISSING_STR "missing lockinfo key"
+#define API_MSG_FSYNC_FAILED_STR "fsync() failed"
+#define API_MSG_FDCREATE_FAILED_ON_GRAPH_STR "fd_create failed on graph"
+#define API_MSG_INODE_PATH_FAILED_STR "inode_path failed"
+#define API_MSG_SYNCOP_OPEN_FAILED_STR "syncop_open failed"
+#define API_MSG_LOCK_MIGRATE_FAILED_STR "lock migration failed on graph"
+#define API_MSG_OPENFD_SKIPPED_STR "skipping openfd in graph"
+#define API_MSG_FIRST_LOOKUP_GRAPH_FAILED_STR "first lookup on graph failed"
+#define API_MSG_CWD_GRAPH_REF_FAILED_STR "cwd refresh of graph failed"
+#define API_MSG_SWITCHED_GRAPH_STR "switched to graph"
+#define API_MSG_FSETXATTR_FAILED_STR "fsetxattr failed"
+#define API_MSG_MEM_ACCT_INIT_FAILED_STR "Memory accounting init failed"
+#define API_MSG_MASTER_XLATOR_INIT_FAILED_STR \
+ "master xlator for initialization failed"
+#define API_MSG_GFAPI_XLATOR_INIT_FAILED_STR \
+ "failed to initialize gfapi translator"
+#define API_MSG_VOLFILE_OPEN_FAILED_STR "volume file open failed"
+#define API_MSG_VOL_SPEC_FILE_ERROR_STR "Cannot reach volume specification file"
+#define API_MSG_TRANS_RDMA_DEP_STR \
+ "transport RDMA is deprecated, falling back to tcp"
+#define API_MSG_TRANS_NOT_SUPPORTED_STR \
+ "transport is not supported, possible values tcp|unix"
+#define API_MSG_GLFS_FSOBJ_NULL_STR "fs is NULL"
+#define API_MSG_FS_NOT_INIT_STR "fs is not properly initialized"
+#define API_MSG_FSMUTEX_LOCK_FAILED_STR \
+ "pthread lock on glfs mutex, returned error"
+#define API_MSG_FSMUTEX_UNLOCK_FAILED_STR \
+ "pthread unlock on glfs mutex, returned error"
+#define API_MSG_COND_WAIT_FAILED_STR "cond wait failed"
+#define API_MSG_INVALID_SYSRQ_STR "not a valid sysrq"
+#define API_MSG_GRAPH_CONSTRUCT_FAILED_STR "failed to construct the graph"
+#define API_MSG_API_XLATOR_ERROR_STR \
+ "api master xlator cannot be specified in volume file"
+#define API_MSG_STATEDUMP_FAILED_STR "statedump failed"
+#define API_MSG_DECODE_XDR_FAILED_STR \
+ "Failed to decode xdr response for GF_CBK_STATEDUMP"
+#define API_MSG_NULL_STR "NULL"
+#define API_MSG_XDR_PAYLOAD_FAILED_STR "failed to create XDR payload"
+#define API_MSG_CALL_NOT_SUCCESSFUL_STR \
+ "GET_VOLUME_INFO RPC call is not successful"
+#define API_MSG_XDR_RESPONSE_DECODE_FAILED_STR \
+ "Failed to decode xdr response for GET_VOLUME_INFO"
+#define API_MSG_CALL_NOT_VALID_STR \
+ "Response received for GET_VOLUME_INFO RPC is not valid"
+#define API_MSG_GET_VOLINFO_CBK_FAILED_STR \
+ "In GET_VOLUME_INFO cbk, received error"
+#define API_MSG_FETCH_VOLUUID_FAILED_STR "Unable to fetch volume UUID"
+#define API_MSG_INSUFF_SIZE_STR "Insufficient size passed"
+#define API_MSG_FRAME_CREAT_FAILED_STR "failed to create the frame"
+#define API_MSG_DICT_SET_FAILED_STR "failed to set"
+#define API_MSG_XDR_DECODE_FAILED_STR "XDR decoding error"
+#define API_MSG_GET_VOLFILE_FAILED_STR "failed to get the volume file"
+#define API_MSG_VOLFILE_INFO_STR "No change in volfile, continuing"
+#define API_MSG_UNABLE_TO_DEL_STR "unable to delete file"
+#define API_MSG_WRONG_OPVERSION_STR \
+ "Server is operating at an op-version which is not supported"
+#define API_MSG_DICT_SERIALIZE_FAILED_STR "Failed to serialize dictionary"
+#define API_MSG_REMOTE_HOST_CONN_FAILED_STR "Failed to connect to remote-host"
+#define API_MSG_REMOTE_HOST_DISCONN_STR "disconnected from remote-host"
+#define API_MSG_VOLFILE_SERVER_EXHAUST_STR "Exhausted all volfile servers"
+#define API_MSG_VOLFILE_CONNECTING_STR "connecting to next volfile server"
+#define API_MSG_CREATE_RPC_CLIENT_FAILED_STR "failed to create rpc clnt"
+#define API_MSG_REG_NOTIFY_FUNC_FAILED_STR "failed to register notify function"
+#define API_MSG_REG_CBK_FUNC_FAILED_STR "failed to register callback function"
+#define API_MSG_NEW_GRAPH_STR "New graph coming up"
+#define API_MSG_HANDLE_NOT_SET_STR "handle not set. Flags handled for xstat are"
#endif /* !_GFAPI_MESSAGES_H__ */
diff --git a/api/src/gfapi.aliases b/api/src/gfapi.aliases
index c77d72b0534..bc639e6b99f 100644
--- a/api/src/gfapi.aliases
+++ b/api/src/gfapi.aliases
@@ -196,6 +196,6 @@ _pub_glfs_copy_file_range _glfs_copy_file_range$GFAPI_6.0
_pub_glfs_fsetattr _glfs_fsetattr$GFAPI_6.0
_pub_glfs_setattr _glfs_setattr$GFAPI_6.0
-_pub_glfs_h_creat_open _glfs_h_creat_open@GFAPI_6.6
-
_pub_glfs_set_statedump_path _glfs_set_statedump_path@GFAPI_7.0
+
+_pub_glfs_h_creat_open _glfs_h_creat_open@GFAPI_6.6
diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
index e6adea5ea9f..6aa3c5602d1 100644
--- a/api/src/glfs-fops.c
+++ b/api/src/glfs-fops.c
@@ -119,8 +119,8 @@ glfs_get_upcall_cache_invalidation(struct gf_upcall *to_up_data,
ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
if (!ca_data) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -154,8 +154,8 @@ glfs_get_upcall_lease(struct gf_upcall *to_up_data,
ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
if (!ca_data) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -292,6 +292,7 @@ glfs_iatt_to_statx(struct glfs *fs, const struct iatt *iatt,
statx->glfs_st_attributes_mask = 0;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_iatt_from_statx, 6.0)
void
priv_glfs_iatt_from_statx(struct iatt *iatt, const struct glfs_stat *statx)
{
@@ -371,7 +372,6 @@ priv_glfs_iatt_from_statx(struct iatt *iatt, const struct glfs_stat *statx)
iatt->ia_attributes = statx->glfs_st_attributes;
iatt->ia_attributes_mask = statx->glfs_st_attributes_mask;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_iatt_from_statx, 6.0);
void
glfsflags_from_gfapiflags(struct glfs_stat *stat, int *glvalid)
@@ -415,6 +415,7 @@ glfs_loc_unlink(loc_t *loc)
return 0;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_open, 3.4.0)
struct glfs_fd *
pub_glfs_open(struct glfs *fs, const char *path, int flags)
{
@@ -509,8 +510,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_open, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_close, 3.4.0)
int
pub_glfs_close(struct glfs_fd *glfd)
{
@@ -565,8 +565,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_close, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lstat, 3.4.0)
int
pub_glfs_lstat(struct glfs *fs, const char *path, struct stat *stat)
{
@@ -607,8 +606,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lstat, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_stat, 3.4.0)
int
pub_glfs_stat(struct glfs *fs, const char *path, struct stat *stat)
{
@@ -649,8 +647,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_stat, 3.4.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_statx, 6.0)
int
priv_glfs_statx(struct glfs *fs, const char *path, const unsigned int mask,
struct glfs_stat *statxbuf)
@@ -704,8 +701,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_statx, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstat, 3.4.0)
int
pub_glfs_fstat(struct glfs_fd *glfd, struct stat *stat)
{
@@ -754,8 +750,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstat, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_creat, 3.4.0)
struct glfs_fd *
pub_glfs_creat(struct glfs *fs, const char *path, int flags, mode_t mode)
{
@@ -902,8 +897,6 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_creat, 3.4.0);
-
#ifdef HAVE_SEEK_HOLE
static int
glfs_seek(struct glfs_fd *glfd, off_t offset, int whence)
@@ -957,6 +950,7 @@ out:
}
#endif
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lseek, 3.4.0)
off_t
pub_glfs_lseek(struct glfs_fd *glfd, off_t offset, int whence)
{
@@ -1012,8 +1006,6 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lseek, 3.4.0);
-
static ssize_t
glfs_preadv_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags, struct glfs_stat *poststat)
@@ -1091,6 +1083,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv, 3.4.0)
ssize_t
pub_glfs_preadv(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags)
@@ -1098,8 +1091,7 @@ pub_glfs_preadv(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
return glfs_preadv_common(glfd, iovec, iovcnt, offset, flags, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read, 3.4.0)
ssize_t
pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
{
@@ -1108,6 +1100,11 @@ pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = buf;
iov.iov_len = count;
@@ -1116,8 +1113,7 @@ pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pread34, glfs_pread, 3.4.0)
ssize_t
pub_glfs_pread34(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
int flags)
@@ -1135,8 +1131,7 @@ pub_glfs_pread34(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pread34, glfs_pread, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread, 6.0)
ssize_t
pub_glfs_pread(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
int flags, struct glfs_stat *poststat)
@@ -1154,21 +1149,23 @@ pub_glfs_pread(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv, 3.4.0)
ssize_t
pub_glfs_readv(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = pub_glfs_preadv(glfd, iov, count, glfd->offset, flags);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv, 3.4.0);
-
struct glfs_io {
struct glfs_fd *glfd;
int op;
@@ -1370,6 +1367,7 @@ invalid_fs:
return -1;
}
+GFAPI_SYMVER_PUBLIC(glfs_preadv_async34, glfs_preadv_async, 3.4.0)
int
pub_glfs_preadv_async34(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk34 fn,
@@ -1379,8 +1377,7 @@ pub_glfs_preadv_async34(struct glfs_fd *glfd, const struct iovec *iovec,
(void *)fn, data);
}
-GFAPI_SYMVER_PUBLIC(glfs_preadv_async34, glfs_preadv_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv_async, 6.0)
int
pub_glfs_preadv_async(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk fn,
@@ -1390,8 +1387,7 @@ pub_glfs_preadv_async(struct glfs_fd *glfd, const struct iovec *iovec,
_gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_read_async34, glfs_read_async, 3.4.0)
int
pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
glfs_io_cbk34 fn, void *data)
@@ -1401,6 +1397,11 @@ pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = buf;
iov.iov_len = count;
@@ -1410,8 +1411,7 @@ pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_read_async34, glfs_read_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read_async, 6.0)
int
pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
glfs_io_cbk fn, void *data)
@@ -1421,6 +1421,11 @@ pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = buf;
iov.iov_len = count;
@@ -1430,8 +1435,7 @@ pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pread_async34, glfs_pread_async, 3.4.0)
int
pub_glfs_pread_async34(struct glfs_fd *glfd, void *buf, size_t count,
off_t offset, int flags, glfs_io_cbk34 fn, void *data)
@@ -1450,8 +1454,7 @@ pub_glfs_pread_async34(struct glfs_fd *glfd, void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pread_async34, glfs_pread_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread_async, 6.0)
int
pub_glfs_pread_async(struct glfs_fd *glfd, void *buf, size_t count,
off_t offset, int flags, glfs_io_cbk fn, void *data)
@@ -1470,34 +1473,40 @@ pub_glfs_pread_async(struct glfs_fd *glfd, void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_readv_async34, glfs_readv_async, 3.4.0)
int
pub_glfs_readv_async34(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags, glfs_io_cbk34 fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
_gf_true, (void *)fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_readv_async34, glfs_readv_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv_async, 6.0)
int
pub_glfs_readv_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags, glfs_io_cbk fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
_gf_false, fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv_async, 6.0);
-
static ssize_t
glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags, struct glfs_stat *prestat,
@@ -1583,6 +1592,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_copy_file_range, 6.0)
ssize_t
pub_glfs_copy_file_range(struct glfs_fd *glfd_in, off64_t *off_in,
struct glfs_fd *glfd_out, off64_t *off_out, size_t len,
@@ -1736,8 +1746,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_copy_file_range, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev, 3.4.0)
ssize_t
pub_glfs_pwritev(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags)
@@ -1745,8 +1754,7 @@ pub_glfs_pwritev(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
return glfs_pwritev_common(glfd, iovec, iovcnt, offset, flags, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write, 3.4.0)
ssize_t
pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
{
@@ -1755,6 +1763,11 @@ pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = (void *)buf;
iov.iov_len = count;
@@ -1763,21 +1776,24 @@ pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev, 3.4.0)
ssize_t
pub_glfs_writev(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = pub_glfs_pwritev(glfd, iov, count, glfd->offset, flags);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pwrite34, glfs_pwrite, 3.4.0)
ssize_t
pub_glfs_pwrite34(struct glfs_fd *glfd, const void *buf, size_t count,
off_t offset, int flags)
@@ -1795,8 +1811,7 @@ pub_glfs_pwrite34(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pwrite34, glfs_pwrite, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite, 6.0)
ssize_t
pub_glfs_pwrite(struct glfs_fd *glfd, const void *buf, size_t count,
off_t offset, int flags, struct glfs_stat *prestat,
@@ -1815,8 +1830,6 @@ pub_glfs_pwrite(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite, 6.0);
-
extern glfs_t *
pub_glfs_from_glfd(glfs_fd_t *);
@@ -1935,6 +1948,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_pwritev_async34, glfs_pwritev_async, 3.4.0)
int
pub_glfs_pwritev_async34(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk34 fn,
@@ -1944,8 +1958,7 @@ pub_glfs_pwritev_async34(struct glfs_fd *glfd, const struct iovec *iovec,
_gf_true, (void *)fn, data);
}
-GFAPI_SYMVER_PUBLIC(glfs_pwritev_async34, glfs_pwritev_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev_async, 6.0)
int
pub_glfs_pwritev_async(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk fn,
@@ -1955,8 +1968,7 @@ pub_glfs_pwritev_async(struct glfs_fd *glfd, const struct iovec *iovec,
_gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_write_async34, glfs_write_async, 3.4.0)
int
pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
int flags, glfs_io_cbk34 fn, void *data)
@@ -1966,6 +1978,11 @@ pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = (void *)buf;
iov.iov_len = count;
@@ -1975,8 +1992,7 @@ pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_write_async34, glfs_write_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write_async, 6.0)
int
pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
int flags, glfs_io_cbk fn, void *data)
@@ -1986,6 +2002,11 @@ pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = (void *)buf;
iov.iov_len = count;
@@ -1995,8 +2016,7 @@ pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pwrite_async34, glfs_pwrite_async, 3.4.0)
int
pub_glfs_pwrite_async34(struct glfs_fd *glfd, const void *buf, int count,
off_t offset, int flags, glfs_io_cbk34 fn, void *data)
@@ -2015,8 +2035,7 @@ pub_glfs_pwrite_async34(struct glfs_fd *glfd, const void *buf, int count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pwrite_async34, glfs_pwrite_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite_async, 6.0)
int
pub_glfs_pwrite_async(struct glfs_fd *glfd, const void *buf, int count,
off_t offset, int flags, glfs_io_cbk fn, void *data)
@@ -2035,34 +2054,40 @@ pub_glfs_pwrite_async(struct glfs_fd *glfd, const void *buf, int count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_writev_async34, glfs_writev_async, 3.4.0)
int
pub_glfs_writev_async34(struct glfs_fd *glfd, const struct iovec *iov,
int count, int flags, glfs_io_cbk34 fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
_gf_true, (void *)fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_writev_async34, glfs_writev_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev_async, 6.0)
int
pub_glfs_writev_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags, glfs_io_cbk fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
_gf_false, fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev_async, 6.0);
-
static int
glfs_fsync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2127,14 +2152,14 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_fsync34, glfs_fsync, 3.4.0)
int
pub_glfs_fsync34(struct glfs_fd *glfd)
{
return glfs_fsync_common(glfd, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC(glfs_fsync34, glfs_fsync, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync, 6.0)
int
pub_glfs_fsync(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2142,8 +2167,6 @@ pub_glfs_fsync(struct glfs_fd *glfd, struct glfs_stat *prestat,
return glfs_fsync_common(glfd, prestat, poststat);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync, 6.0);
-
static int
glfs_fsync_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
@@ -2224,6 +2247,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_fsync_async34, glfs_fsync_async, 3.4.0)
int
pub_glfs_fsync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
{
@@ -2240,8 +2264,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_fsync_async34, glfs_fsync_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync_async, 6.0)
int
pub_glfs_fsync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
{
@@ -2258,8 +2281,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync_async, 6.0);
-
static int
glfs_fdatasync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2324,14 +2345,14 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_fdatasync34, glfs_fdatasync, 3.4.0)
int
pub_glfs_fdatasync34(struct glfs_fd *glfd)
{
return glfs_fdatasync_common(glfd, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC(glfs_fdatasync34, glfs_fdatasync, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync, 6.0)
int
pub_glfs_fdatasync(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2339,8 +2360,7 @@ pub_glfs_fdatasync(struct glfs_fd *glfd, struct glfs_stat *prestat,
return glfs_fdatasync_common(glfd, prestat, poststat);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_fdatasync_async34, glfs_fdatasync_async, 3.4.0)
int
pub_glfs_fdatasync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
{
@@ -2357,8 +2377,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_fdatasync_async34, glfs_fdatasync_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync_async, 6.0)
int
pub_glfs_fdatasync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
{
@@ -2375,8 +2394,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync_async, 6.0);
-
static int
glfs_ftruncate_common(struct glfs_fd *glfd, off_t offset,
struct glfs_stat *prestat, struct glfs_stat *poststat)
@@ -2442,14 +2459,14 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_ftruncate34, glfs_ftruncate, 3.4.0)
int
pub_glfs_ftruncate34(struct glfs_fd *glfd, off_t offset)
{
return glfs_ftruncate_common(glfd, offset, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC(glfs_ftruncate34, glfs_ftruncate, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate, 6.0)
int
pub_glfs_ftruncate(struct glfs_fd *glfd, off_t offset,
struct glfs_stat *prestat, struct glfs_stat *poststat)
@@ -2457,8 +2474,7 @@ pub_glfs_ftruncate(struct glfs_fd *glfd, off_t offset,
return glfs_ftruncate_common(glfd, offset, prestat, poststat);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_truncate, 3.7.15)
int
pub_glfs_truncate(struct glfs *fs, const char *path, off_t length)
{
@@ -2504,8 +2520,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_truncate, 3.7.15);
-
static int
glfs_ftruncate_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
@@ -2598,6 +2612,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_ftruncate_async34, glfs_ftruncate_async, 3.4.0)
int
pub_glfs_ftruncate_async34(struct glfs_fd *glfd, off_t offset, glfs_io_cbk34 fn,
void *data)
@@ -2606,8 +2621,7 @@ pub_glfs_ftruncate_async34(struct glfs_fd *glfd, off_t offset, glfs_io_cbk34 fn,
data);
}
-GFAPI_SYMVER_PUBLIC(glfs_ftruncate_async34, glfs_ftruncate_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate_async, 6.0)
int
pub_glfs_ftruncate_async(struct glfs_fd *glfd, off_t offset, glfs_io_cbk fn,
void *data)
@@ -2615,8 +2629,7 @@ pub_glfs_ftruncate_async(struct glfs_fd *glfd, off_t offset, glfs_io_cbk fn,
return glfs_ftruncate_async_common(glfd, offset, _gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_access, 3.4.0)
int
pub_glfs_access(struct glfs *fs, const char *path, int mode)
{
@@ -2662,8 +2675,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_access, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlink, 3.4.0)
int
pub_glfs_symlink(struct glfs *fs, const char *data, const char *path)
{
@@ -2753,8 +2765,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlink, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlink, 3.4.0)
int
pub_glfs_readlink(struct glfs *fs, const char *path, char *buf, size_t bufsiz)
{
@@ -2811,8 +2822,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlink, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknod, 3.4.0)
int
pub_glfs_mknod(struct glfs *fs, const char *path, mode_t mode, dev_t dev)
{
@@ -2902,8 +2912,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknod, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdir, 3.4.0)
int
pub_glfs_mkdir(struct glfs *fs, const char *path, mode_t mode)
{
@@ -2993,8 +3002,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlink, 3.4.0)
int
pub_glfs_unlink(struct glfs *fs, const char *path)
{
@@ -3050,8 +3058,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlink, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rmdir, 3.4.0)
int
pub_glfs_rmdir(struct glfs *fs, const char *path)
{
@@ -3106,8 +3113,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rmdir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rename, 3.4.0)
int
pub_glfs_rename(struct glfs *fs, const char *oldpath, const char *newpath)
{
@@ -3196,8 +3202,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rename, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_link, 3.4.0)
int
pub_glfs_link(struct glfs *fs, const char *oldpath, const char *newpath)
{
@@ -3283,8 +3288,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_link, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_opendir, 3.4.0)
struct glfs_fd *
pub_glfs_opendir(struct glfs *fs, const char *path)
{
@@ -3365,8 +3369,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_opendir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_closedir, 3.4.0)
int
pub_glfs_closedir(struct glfs_fd *glfd)
{
@@ -3387,22 +3390,30 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_closedir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_telldir, 3.4.0)
long
pub_glfs_telldir(struct glfs_fd *fd)
{
+ if (fd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
return fd->offset;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_telldir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_seekdir, 3.4.0)
void
pub_glfs_seekdir(struct glfs_fd *fd, long offset)
{
gf_dirent_t *entry = NULL;
gf_dirent_t *tmp = NULL;
+ if (fd == NULL) {
+ errno = EBADF;
+ return;
+ }
+
if (fd->offset == offset)
return;
@@ -3425,8 +3436,6 @@ pub_glfs_seekdir(struct glfs_fd *fd, long offset)
*/
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_seekdir, 3.4.0);
-
static int
glfs_discard_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno,
@@ -3517,6 +3526,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_discard_async35, glfs_discard_async, 3.5.0)
int
pub_glfs_discard_async35(struct glfs_fd *glfd, off_t offset, size_t len,
glfs_io_cbk34 fn, void *data)
@@ -3525,8 +3535,7 @@ pub_glfs_discard_async35(struct glfs_fd *glfd, off_t offset, size_t len,
data);
}
-GFAPI_SYMVER_PUBLIC(glfs_discard_async35, glfs_discard_async, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard_async, 6.0)
int
pub_glfs_discard_async(struct glfs_fd *glfd, off_t offset, size_t len,
glfs_io_cbk fn, void *data)
@@ -3534,8 +3543,6 @@ pub_glfs_discard_async(struct glfs_fd *glfd, off_t offset, size_t len,
return glfs_discard_async_common(glfd, offset, len, _gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard_async, 6.0);
-
static int
glfs_zerofill_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno,
@@ -3628,6 +3635,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_zerofill_async35, glfs_zerofill_async, 3.5.0)
int
pub_glfs_zerofill_async35(struct glfs_fd *glfd, off_t offset, off_t len,
glfs_io_cbk34 fn, void *data)
@@ -3636,8 +3644,7 @@ pub_glfs_zerofill_async35(struct glfs_fd *glfd, off_t offset, off_t len,
data);
}
-GFAPI_SYMVER_PUBLIC(glfs_zerofill_async35, glfs_zerofill_async, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill_async, 6.0)
int
pub_glfs_zerofill_async(struct glfs_fd *glfd, off_t offset, off_t len,
glfs_io_cbk fn, void *data)
@@ -3645,8 +3652,6 @@ pub_glfs_zerofill_async(struct glfs_fd *glfd, off_t offset, off_t len,
return glfs_zerofill_async_common(glfd, offset, len, _gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill_async, 6.0);
-
void
gf_dirent_to_dirent(gf_dirent_t *gf_dirent, struct dirent *dirent)
{
@@ -3806,6 +3811,7 @@ unlock:
return buf;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus_r, 3.4.0)
int
pub_glfs_readdirplus_r(struct glfs_fd *glfd, struct stat *stat,
struct dirent *ext, struct dirent **res)
@@ -3861,8 +3867,7 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus_r, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir_r, 3.4.0)
int
pub_glfs_readdir_r(struct glfs_fd *glfd, struct dirent *buf,
struct dirent **res)
@@ -3870,8 +3875,7 @@ pub_glfs_readdir_r(struct glfs_fd *glfd, struct dirent *buf,
return pub_glfs_readdirplus_r(glfd, 0, buf, res);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir_r, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus, 3.5.0)
struct dirent *
pub_glfs_readdirplus(struct glfs_fd *glfd, struct stat *stat)
{
@@ -3885,16 +3889,14 @@ pub_glfs_readdirplus(struct glfs_fd *glfd, struct stat *stat)
return res;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir, 3.5.0)
struct dirent *
pub_glfs_readdir(struct glfs_fd *glfd)
{
return pub_glfs_readdirplus(glfd, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_statvfs, 3.4.0)
int
pub_glfs_statvfs(struct glfs *fs, const char *path, struct statvfs *buf)
{
@@ -3940,8 +3942,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_statvfs, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setattr, 6.0)
int
pub_glfs_setattr(struct glfs *fs, const char *path, struct glfs_stat *stat,
int follow)
@@ -4001,8 +4002,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setattr, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetattr, 6.0)
int
pub_glfs_fsetattr(struct glfs_fd *glfd, struct glfs_stat *stat)
{
@@ -4055,8 +4055,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetattr, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chmod, 3.4.0)
int
pub_glfs_chmod(struct glfs *fs, const char *path, mode_t mode)
{
@@ -4073,8 +4072,7 @@ pub_glfs_chmod(struct glfs *fs, const char *path, mode_t mode)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chmod, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmod, 3.4.0)
int
pub_glfs_fchmod(struct glfs_fd *glfd, mode_t mode)
{
@@ -4091,8 +4089,7 @@ pub_glfs_fchmod(struct glfs_fd *glfd, mode_t mode)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmod, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chown, 3.4.0)
int
pub_glfs_chown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
{
@@ -4117,8 +4114,7 @@ pub_glfs_chown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chown, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lchown, 3.4.0)
int
pub_glfs_lchown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
{
@@ -4143,8 +4139,7 @@ pub_glfs_lchown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lchown, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchown, 3.4.0)
int
pub_glfs_fchown(struct glfs_fd *glfd, uid_t uid, gid_t gid)
{
@@ -4169,8 +4164,7 @@ pub_glfs_fchown(struct glfs_fd *glfd, uid_t uid, gid_t gid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchown, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_utimens, 3.4.0)
int
pub_glfs_utimens(struct glfs *fs, const char *path,
const struct timespec times[2])
@@ -4190,8 +4184,7 @@ pub_glfs_utimens(struct glfs *fs, const char *path,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_utimens, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lutimens, 3.4.0)
int
pub_glfs_lutimens(struct glfs *fs, const char *path,
const struct timespec times[2])
@@ -4211,8 +4204,7 @@ pub_glfs_lutimens(struct glfs *fs, const char *path,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lutimens, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_futimens, 3.4.0)
int
pub_glfs_futimens(struct glfs_fd *glfd, const struct timespec times[2])
{
@@ -4231,8 +4223,6 @@ pub_glfs_futimens(struct glfs_fd *glfd, const struct timespec times[2])
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_futimens, 3.4.0);
-
int
glfs_getxattr_process(void *value, size_t size, dict_t *xattr, const char *name)
{
@@ -4332,6 +4322,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getxattr, 3.4.0)
ssize_t
pub_glfs_getxattr(struct glfs *fs, const char *path, const char *name,
void *value, size_t size)
@@ -4339,8 +4330,7 @@ pub_glfs_getxattr(struct glfs *fs, const char *path, const char *name,
return glfs_getxattr_common(fs, path, name, value, size, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lgetxattr, 3.4.0)
ssize_t
pub_glfs_lgetxattr(struct glfs *fs, const char *path, const char *name,
void *value, size_t size)
@@ -4348,8 +4338,7 @@ pub_glfs_lgetxattr(struct glfs *fs, const char *path, const char *name,
return glfs_getxattr_common(fs, path, name, value, size, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lgetxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fgetxattr, 3.4.0)
ssize_t
pub_glfs_fgetxattr(struct glfs_fd *glfd, const char *name, void *value,
size_t size)
@@ -4412,8 +4401,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fgetxattr, 3.4.0);
-
int
glfs_listxattr_process(void *value, size_t size, dict_t *xattr)
{
@@ -4497,22 +4484,21 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_listxattr, 3.4.0)
ssize_t
pub_glfs_listxattr(struct glfs *fs, const char *path, void *value, size_t size)
{
return glfs_listxattr_common(fs, path, value, size, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_listxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_llistxattr, 3.4.0)
ssize_t
pub_glfs_llistxattr(struct glfs *fs, const char *path, void *value, size_t size)
{
return glfs_listxattr_common(fs, path, value, size, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_llistxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_flistxattr, 3.4.0)
ssize_t
pub_glfs_flistxattr(struct glfs_fd *glfd, void *value, size_t size)
{
@@ -4562,8 +4548,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_flistxattr, 3.4.0);
-
int
glfs_setxattr_common(struct glfs *fs, const char *path, const char *name,
const void *value, size_t size, int flags, int follow)
@@ -4643,6 +4627,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setxattr, 3.4.0)
int
pub_glfs_setxattr(struct glfs *fs, const char *path, const char *name,
const void *value, size_t size, int flags)
@@ -4650,8 +4635,7 @@ pub_glfs_setxattr(struct glfs *fs, const char *path, const char *name,
return glfs_setxattr_common(fs, path, name, value, size, flags, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lsetxattr, 3.4.0)
int
pub_glfs_lsetxattr(struct glfs *fs, const char *path, const char *name,
const void *value, size_t size, int flags)
@@ -4659,8 +4643,7 @@ pub_glfs_lsetxattr(struct glfs *fs, const char *path, const char *name,
return glfs_setxattr_common(fs, path, name, value, size, flags, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lsetxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetxattr, 3.4.0)
int
pub_glfs_fsetxattr(struct glfs_fd *glfd, const char *name, const void *value,
size_t size, int flags)
@@ -4735,8 +4718,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetxattr, 3.4.0);
-
int
glfs_removexattr_common(struct glfs *fs, const char *path, const char *name,
int follow)
@@ -4787,22 +4768,21 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_removexattr, 3.4.0)
int
pub_glfs_removexattr(struct glfs *fs, const char *path, const char *name)
{
return glfs_removexattr_common(fs, path, name, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_removexattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lremovexattr, 3.4.0)
int
pub_glfs_lremovexattr(struct glfs *fs, const char *path, const char *name)
{
return glfs_removexattr_common(fs, path, name, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lremovexattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fremovexattr, 3.4.0)
int
pub_glfs_fremovexattr(struct glfs_fd *glfd, const char *name)
{
@@ -4845,8 +4825,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fremovexattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fallocate, 3.5.0)
int
pub_glfs_fallocate(struct glfs_fd *glfd, int keep_size, off_t offset,
size_t len)
@@ -4897,8 +4876,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fallocate, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard, 3.5.0)
int
pub_glfs_discard(struct glfs_fd *glfd, off_t offset, size_t len)
{
@@ -4948,8 +4926,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill, 3.5.0)
int
pub_glfs_zerofill(struct glfs_fd *glfd, off_t offset, off_t len)
{
@@ -4997,8 +4974,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chdir, 3.4.0)
int
pub_glfs_chdir(struct glfs *fs, const char *path)
{
@@ -5048,8 +5024,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chdir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchdir, 3.4.0)
int
pub_glfs_fchdir(struct glfs_fd *glfd)
{
@@ -5101,8 +5076,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchdir, 3.4.0);
-
static gf_boolean_t warn_realpath = _gf_true; /* log once */
static char *
@@ -5185,22 +5158,21 @@ invalid_fs:
return retpath;
}
+GFAPI_SYMVER_PUBLIC(glfs_realpath34, glfs_realpath, 3.4.0)
char *
pub_glfs_realpath34(struct glfs *fs, const char *path, char *resolved_path)
{
return glfs_realpath_common(fs, path, resolved_path, _gf_true);
}
-GFAPI_SYMVER_PUBLIC(glfs_realpath34, glfs_realpath, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_realpath, 3.7.17)
char *
pub_glfs_realpath(struct glfs *fs, const char *path, char *resolved_path)
{
return glfs_realpath_common(fs, path, resolved_path, _gf_false);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_realpath, 3.7.17);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getcwd, 3.4.0)
char *
pub_glfs_getcwd(struct glfs *fs, char *buf, size_t n)
{
@@ -5249,8 +5221,6 @@ invalid_fs:
return buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getcwd, 3.4.0);
-
static void
gf_flock_to_flock(struct gf_flock *gf_flock, struct flock *flock)
{
@@ -5336,11 +5306,9 @@ glfs_lock_common(struct glfs_fd *glfd, int cmd, struct flock *flock,
if (ret == 0 && (cmd == F_SETLK || cmd == F_SETLKW)) {
ret = fd_lk_insert_and_merge(fd, cmd, &saved_flock);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0,
- API_MSG_LOCK_INSERT_MERGE_FAILED,
- "Lock insertion and splitting/merging failed "
- "on gfid %s",
- uuid_utoa(fd->inode->gfid));
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ API_MSG_LOCK_INSERT_MERGE_FAILED, "gfid=%s",
+ uuid_utoa(fd->inode->gfid), NULL);
ret = 0;
}
}
@@ -5359,6 +5327,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_file_lock, 4.0.0)
int
pub_glfs_file_lock(struct glfs_fd *glfd, int cmd, struct flock *flock,
glfs_lock_mode_t lk_mode)
@@ -5379,9 +5348,8 @@ pub_glfs_file_lock(struct glfs_fd *glfd, int cmd, struct flock *flock,
* GLFS_LK_MANDATORY */
ret = dict_set_uint32(xdata_in, GF_LOCK_MODE, GF_LK_MANDATORY);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0,
- API_MSG_SETTING_LOCK_TYPE_FAILED,
- "Setting lock type failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ API_MSG_SETTING_LOCK_TYPE_FAILED, NULL);
ret = -1;
errno = ENOMEM;
goto out;
@@ -5396,16 +5364,14 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_file_lock, 4.0.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_posix_lock, 3.4.0)
int
pub_glfs_posix_lock(struct glfs_fd *glfd, int cmd, struct flock *flock)
{
return glfs_lock_common(glfd, cmd, flock, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_posix_lock, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fd_set_lkowner, 3.10.7)
int
pub_glfs_fd_set_lkowner(struct glfs_fd *glfd, void *data, int len)
{
@@ -5422,8 +5388,8 @@ pub_glfs_fd_set_lkowner(struct glfs_fd *glfd, void *data, int len)
if ((len <= 0) || (len > GFAPI_MAX_LOCK_OWNER_LEN)) {
errno = EINVAL;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "Invalid lk_owner len (%d)", len);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "lk_owner len=%d", len, NULL);
goto out;
}
@@ -5441,8 +5407,8 @@ out:
invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fd_set_lkowner, 3.10.7);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_dup, 3.4.0)
struct glfs_fd *
pub_glfs_dup(struct glfs_fd *glfd)
{
@@ -5493,8 +5459,6 @@ invalid_fs:
return dupfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_dup, 3.4.0);
-
static void
glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
{
@@ -5507,8 +5471,8 @@ glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
u_list = GF_CALLOC(1, sizeof(*u_list), glfs_mt_upcall_entry_t);
if (!u_list) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5530,8 +5494,7 @@ glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
}
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY,
- "Upcall entry validation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
goto out;
}
@@ -5601,9 +5564,9 @@ glfs_recall_lease_fd(struct glfs *fs, struct gf_upcall *up_data)
inode = inode_find(subvol->itable, up_data->gfid);
if (!inode) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INODE_FIND_FAILED,
- "Unable to find inode entry for gfid:%s graph id:%d",
- uuid_utoa(up_data->gfid), subvol->graph->id);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INODE_FIND_FAILED,
+ "gfid=%s", uuid_utoa(up_data->gfid), "graph_id=%d",
+ subvol->graph->id, NULL);
goto out;
}
@@ -5681,8 +5644,8 @@ glfs_recall_lease_upcall(struct glfs *fs, struct glfs_upcall *up_arg,
* the handle and hence will no more be interested in
* the upcall for this particular gfid.
*/
- gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
- "handle creation of %s failed", uuid_utoa(up_data->gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
+ "gfid=%s", uuid_utoa(up_data->gfid), NULL);
errno = ESTALE;
goto out;
}
@@ -5771,8 +5734,8 @@ glfs_cbk_upcall_syncop(void *opaque)
up_arg = GLFS_CALLOC(1, sizeof(struct gf_upcall), glfs_release_upcall,
glfs_mt_upcall_entry_t);
if (!up_arg) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5794,14 +5757,14 @@ glfs_cbk_upcall_syncop(void *opaque)
* send upcall then
*/
if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) {
- gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_INVALID_ENTRY,
- "Upcall_EVENT_NULL received. Skipping it.");
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
+ API_MSG_UPCALL_EVENT_NULL_RECEIVED, NULL);
ret = 0;
GLFS_FREE(up_arg);
goto out;
} else if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY,
- "Upcall entry validation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
+ GLFS_FREE(up_arg);
goto out;
}
@@ -5827,8 +5790,8 @@ gf_copy_cache_invalidation(struct gf_upcall_cache_invalidation *src)
glfs_mt_upcall_entry_t);
if (!dst) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5858,8 +5821,8 @@ gf_copy_recall_lease(struct gf_upcall_recall_lease *src)
glfs_mt_upcall_entry_t);
if (!dst) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5887,8 +5850,8 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data)
args = GF_CALLOC(1, sizeof(struct upcall_syncop_args),
glfs_mt_upcall_entry_t);
if (!args) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall syncop args allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
+ "syncop args", NULL);
goto out;
}
@@ -5924,8 +5887,10 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data)
return args;
out:
if (ret) {
- GF_FREE(args->upcall_data.client_uid);
- GF_FREE(args);
+ if (args) {
+ GF_FREE(args->upcall_data.client_uid);
+ GF_FREE(args);
+ }
}
return NULL;
@@ -5954,9 +5919,9 @@ glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
glfs_upcall_syncop_cbk, NULL, args);
/* should we retry incase of failure? */
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
- "Synctak for Upcall event_type(%d) and gfid(%s) failed",
- upcall_data->event_type, (char *)(upcall_data->gfid));
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
+ "event_type=%d", upcall_data->event_type, "gfid=%s",
+ (char *)(upcall_data->gfid), NULL);
upcall_syncop_args_free(args);
}
@@ -5978,6 +5943,7 @@ out:
* Otherwise all the upcall events are queued up in a list
* to be read/polled by the applications.
*/
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_process_upcall_event, 3.7.0)
void
priv_glfs_process_upcall_event(struct glfs *fs, void *data)
{
@@ -6045,7 +6011,6 @@ out:
err:
return;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_process_upcall_event, 3.7.0);
ssize_t
glfs_anonymous_pwritev(struct glfs *fs, struct glfs_object *object,
@@ -6084,8 +6049,7 @@ glfs_anonymous_pwritev(struct glfs *fs, struct glfs_object *object,
fd = fd_anonymous(inode);
if (!fd) {
ret = -1;
- gf_msg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED,
- "Allocating anonymous fd failed");
+ gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
errno = ENOMEM;
goto out;
}
@@ -6183,8 +6147,7 @@ glfs_anonymous_preadv(struct glfs *fs, struct glfs_object *object,
fd = fd_anonymous(inode);
if (!fd) {
ret = -1;
- gf_msg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED,
- "Allocating anonymous fd failed");
+ gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
errno = ENOMEM;
goto out;
}
@@ -6233,6 +6196,7 @@ glfs_release_xreaddirp_stat(void *ptr)
* Given glfd of a directory, this function does readdirp and returns
* xstat along with dirents.
*/
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_r, 3.11.0)
int
pub_glfs_xreaddirplus_r(struct glfs_fd *glfd, uint32_t flags,
struct glfs_xreaddirp_stat **xstat_p,
@@ -6327,8 +6291,8 @@ out:
GF_REF_PUT(glfd);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_WARNING, errno, API_MSG_XREADDIRP_R_FAILED,
- "glfs_x_readdirp_r failed - reason (%s)", strerror(errno));
+ gf_smsg(THIS->name, GF_LOG_WARNING, errno, API_MSG_XREADDIRP_R_FAILED,
+ "reason=%s", strerror(errno), NULL);
if (xstat)
GLFS_FREE(xstat);
@@ -6341,24 +6305,23 @@ out:
invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_r, 3.11.0);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_stat, 3.11.0)
struct stat *
pub_glfs_xreaddirplus_get_stat(struct glfs_xreaddirp_stat *xstat)
{
GF_VALIDATE_OR_GOTO("glfs_xreaddirplus_get_stat", xstat, out);
if (!xstat->flags_handled & GFAPI_XREADDIRP_STAT)
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "GFAPI_XREADDIRP_STAT is not set. Flags"
- "handled for xstat(%p) are (%x)",
- xstat, xstat->flags_handled);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_FLAGS_HANDLE,
+ "GFAPI_XREADDIRP_STAT"
+ "xstat=%p",
+ xstat, "handles=%x", xstat->flags_handled, NULL);
return &xstat->st;
out:
return NULL;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_stat, 3.11.0);
void
gf_lease_to_glfs_lease(struct gf_lease *gf_lease, struct glfs_lease *lease)
@@ -6378,6 +6341,7 @@ glfs_lease_to_gf_lease(struct glfs_lease *lease, struct gf_lease *gf_lease)
memcpy(gf_lease->lease_id, lease->lease_id, LEASE_ID_SIZE);
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lease, 4.0.0)
int
pub_glfs_lease(struct glfs_fd *glfd, struct glfs_lease *lease,
glfs_recall_cbk fn, void *data)
@@ -6458,8 +6422,8 @@ pub_glfs_lease(struct glfs_fd *glfd, struct glfs_lease *lease,
if (ret == 0) {
ret = fd_ctx_set(glfd->fd, subvol, (uint64_t)(long)glfd);
if (ret) {
- gf_msg(subvol->name, GF_LOG_ERROR, ENOMEM, API_MSG_FDCTX_SET_FAILED,
- "Setting fd ctx failed for fd(%p)", glfd->fd);
+ gf_smsg(subvol->name, GF_LOG_ERROR, ENOMEM,
+ API_MSG_FDCTX_SET_FAILED, "fd=%p", glfd->fd, NULL);
goto out;
}
glfd->cbk = fn;
@@ -6479,5 +6443,3 @@ out:
invalid_fs:
return ret;
}
-
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lease, 4.0.0);
diff --git a/api/src/glfs-handleops.c b/api/src/glfs-handleops.c
index cdf368379d6..53c2ee896f9 100644
--- a/api/src/glfs-handleops.c
+++ b/api/src/glfs-handleops.c
@@ -60,6 +60,7 @@ glfs_iatt_from_stat(struct stat *stat, int valid, struct iatt *iatt,
return;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lookupat, 3.7.4)
struct glfs_object *
pub_glfs_h_lookupat(struct glfs *fs, struct glfs_object *parent,
const char *path, struct stat *stat, int follow)
@@ -126,8 +127,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lookupat, 3.7.4);
-
+GFAPI_SYMVER_PUBLIC(glfs_h_lookupat34, glfs_h_lookupat, 3.4.2)
struct glfs_object *
pub_glfs_h_lookupat34(struct glfs *fs, struct glfs_object *parent,
const char *path, struct stat *stat)
@@ -135,8 +135,7 @@ pub_glfs_h_lookupat34(struct glfs *fs, struct glfs_object *parent,
return pub_glfs_h_lookupat(fs, parent, path, stat, 0);
}
-GFAPI_SYMVER_PUBLIC(glfs_h_lookupat34, glfs_h_lookupat, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_statfs, 3.7.0)
int
pub_glfs_h_statfs(struct glfs *fs, struct glfs_object *object,
struct statvfs *statvfs)
@@ -194,8 +193,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_statfs, 3.7.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_stat, 3.4.2)
int
pub_glfs_h_stat(struct glfs *fs, struct glfs_object *object, struct stat *stat)
{
@@ -259,8 +257,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_stat, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getattrs, 3.4.2)
int
pub_glfs_h_getattrs(struct glfs *fs, struct glfs_object *object,
struct stat *stat)
@@ -317,8 +314,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getattrs, 3.4.2);
-
int
glfs_h_getxattrs_common(struct glfs *fs, struct glfs_object *object,
dict_t **xattr, const char *name,
@@ -380,6 +375,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getxattrs, 3.5.1)
int
pub_glfs_h_getxattrs(struct glfs *fs, struct glfs_object *object,
const char *name, void *value, size_t size)
@@ -416,8 +412,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getxattrs, 3.5.1);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setattrs, 3.4.2)
int
pub_glfs_h_setattrs(struct glfs *fs, struct glfs_object *object,
struct stat *stat, int valid)
@@ -480,8 +475,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setattrs, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setxattrs, 3.5.0)
int
pub_glfs_h_setxattrs(struct glfs *fs, struct glfs_object *object,
const char *name, const void *value, size_t size,
@@ -568,8 +562,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setxattrs, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_removexattrs, 3.5.1)
int
pub_glfs_h_removexattrs(struct glfs *fs, struct glfs_object *object,
const char *name)
@@ -626,8 +619,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_removexattrs, 3.5.1);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_open, 3.4.2)
struct glfs_fd *
pub_glfs_h_open(struct glfs *fs, struct glfs_object *object, int flags)
{
@@ -727,8 +719,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_open, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat, 3.4.2)
struct glfs_object *
pub_glfs_h_creat(struct glfs *fs, struct glfs_object *parent, const char *path,
int flags, mode_t mode, struct stat *stat)
@@ -840,8 +831,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat_open, 6.6)
struct glfs_object *
pub_glfs_h_creat_open(struct glfs *fs, struct glfs_object *parent,
const char *path, int flags, mode_t mode,
@@ -975,8 +965,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat_open, 6.6);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mkdir, 3.4.2)
struct glfs_object *
pub_glfs_h_mkdir(struct glfs *fs, struct glfs_object *parent, const char *path,
mode_t mode, struct stat *stat)
@@ -1074,8 +1063,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mkdir, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mknod, 3.4.2)
struct glfs_object *
pub_glfs_h_mknod(struct glfs *fs, struct glfs_object *parent, const char *path,
mode_t mode, dev_t dev, struct stat *stat)
@@ -1172,8 +1160,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mknod, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_unlink, 3.4.2)
int
pub_glfs_h_unlink(struct glfs *fs, struct glfs_object *parent, const char *path)
{
@@ -1244,8 +1231,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_unlink, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_opendir, 3.4.2)
struct glfs_fd *
pub_glfs_h_opendir(struct glfs *fs, struct glfs_object *object)
{
@@ -1327,8 +1313,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_opendir, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_access, 3.6.0)
int
pub_glfs_h_access(struct glfs *fs, struct glfs_object *object, int mask)
{
@@ -1385,8 +1370,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_access, 3.6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_extract_handle, 3.4.2)
ssize_t
pub_glfs_h_extract_handle(struct glfs_object *object, unsigned char *handle,
int len)
@@ -1417,8 +1401,7 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_extract_handle, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_create_from_handle, 3.4.2)
struct glfs_object *
pub_glfs_h_create_from_handle(struct glfs *fs, unsigned char *handle, int len,
struct stat *stat)
@@ -1495,9 +1478,9 @@ pub_glfs_h_create_from_handle(struct glfs *fs, unsigned char *handle, int len,
ret = syncop_lookup(subvol, &loc, &iatt, 0, 0, 0);
DECODE_SYNCOP_ERR(ret);
if (ret) {
- gf_msg(subvol->name, GF_LOG_WARNING, errno,
- API_MSG_INODE_REFRESH_FAILED, "inode refresh of %s failed: %s",
- uuid_utoa(loc.gfid), strerror(errno));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno,
+ API_MSG_INODE_REFRESH_FAILED, "gfid=%s", uuid_utoa(loc.gfid),
+ "error=%s", strerror(errno), NULL);
goto out;
}
@@ -1508,8 +1491,8 @@ pub_glfs_h_create_from_handle(struct glfs *fs, unsigned char *handle, int len,
}
inode_lookup(newinode);
} else {
- gf_msg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
- "inode linking of %s failed", uuid_utoa(loc.gfid));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
+ "gfid=%s", uuid_utoa(loc.gfid), NULL);
goto out;
}
@@ -1541,8 +1524,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_create_from_handle, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_close, 3.4.2)
int
pub_glfs_h_close(struct glfs_object *object)
{
@@ -1555,8 +1537,7 @@ pub_glfs_h_close(struct glfs_object *object)
return 0;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_close, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_truncate, 3.4.2)
int
pub_glfs_h_truncate(struct glfs *fs, struct glfs_object *object, off_t offset)
{
@@ -1616,8 +1597,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_truncate, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_symlink, 3.4.2)
struct glfs_object *
pub_glfs_h_symlink(struct glfs *fs, struct glfs_object *parent,
const char *name, const char *data, struct stat *stat)
@@ -1716,8 +1696,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_symlink, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_readlink, 3.4.2)
int
pub_glfs_h_readlink(struct glfs *fs, struct glfs_object *object, char *buf,
size_t bufsiz)
@@ -1782,8 +1761,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_readlink, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_link, 3.4.2)
int
pub_glfs_h_link(struct glfs *fs, struct glfs_object *linksrc,
struct glfs_object *parent, const char *name)
@@ -1880,8 +1858,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_link, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_rename, 3.4.2)
int
pub_glfs_h_rename(struct glfs *fs, struct glfs_object *olddir,
const char *oldname, struct glfs_object *newdir,
@@ -1991,8 +1968,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_rename, 3.4.2);
-
/*
* Given a handle/gfid, find if the corresponding inode is present in
* the inode table. If yes create and return the corresponding glfs_object.
@@ -2097,8 +2072,8 @@ glfs_h_poll_cache_invalidation(struct glfs *fs, struct glfs_upcall *up_arg,
* the handle and hence will no more be interested in
* the upcall for this particular gfid.
*/
- gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
- "handle creation of %s failed", uuid_utoa(upcall_data->gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
+ "gfid=%s", uuid_utoa(upcall_data->gfid), NULL);
errno = ESTALE;
goto out;
}
@@ -2121,9 +2096,9 @@ glfs_h_poll_cache_invalidation(struct glfs *fs, struct glfs_upcall *up_arg,
p_object = glfs_h_find_handle(fs, ca_data->p_stat.ia_gfid,
GFAPI_HANDLE_LENGTH);
if (!p_object) {
- gf_msg(THIS->name, GF_LOG_DEBUG, errno,
- API_MSG_CREATE_HANDLE_FAILED, "handle creation of %s failed",
- uuid_utoa(ca_data->p_stat.ia_gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
+ API_MSG_CREATE_HANDLE_FAILED, "gfid=%s",
+ uuid_utoa(ca_data->p_stat.ia_gfid), NULL);
errno = ESTALE;
goto out;
}
@@ -2137,9 +2112,9 @@ glfs_h_poll_cache_invalidation(struct glfs *fs, struct glfs_upcall *up_arg,
oldp_object = glfs_h_find_handle(fs, ca_data->oldp_stat.ia_gfid,
GFAPI_HANDLE_LENGTH);
if (!oldp_object) {
- gf_msg(THIS->name, GF_LOG_DEBUG, errno,
- API_MSG_CREATE_HANDLE_FAILED, "handle creation of %s failed",
- uuid_utoa(ca_data->oldp_stat.ia_gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
+ API_MSG_CREATE_HANDLE_FAILED, "gfid=%s",
+ uuid_utoa(ca_data->oldp_stat.ia_gfid), NULL);
errno = ESTALE;
/* By the time we receive upcall old parent_dir may
* have got removed. We still need to send upcall
@@ -2200,6 +2175,7 @@ glfs_release_upcall(void *ptr)
* calling glfs_fini(..). Hence making an assumption that 'fs' & ctx structures
* cannot be freed while in this routine.
*/
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_poll_upcall, 3.7.16)
int
pub_glfs_h_poll_upcall(struct glfs *fs, struct glfs_upcall **up_arg)
{
@@ -2317,8 +2293,6 @@ err:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_poll_upcall, 3.7.16);
-
static gf_boolean_t log_upcall370 = _gf_true; /* log once */
/* The old glfs_h_poll_upcall interface requires intimate knowledge of the
@@ -2332,6 +2306,7 @@ static gf_boolean_t log_upcall370 = _gf_true; /* log once */
*
* WARNING: this function will be removed in the future.
*/
+GFAPI_SYMVER_PUBLIC(glfs_h_poll_upcall370, glfs_h_poll_upcall, 3.7.0)
int
pub_glfs_h_poll_upcall370(struct glfs *fs, struct glfs_callback_arg *up_arg)
{
@@ -2399,12 +2374,11 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_h_poll_upcall370, glfs_h_poll_upcall, 3.7.0);
-
#ifdef HAVE_ACL_LIBACL_H
#include <glusterfs/glusterfs-acl.h>
#include <acl/libacl.h>
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_set, 3.7.0)
int
pub_glfs_h_acl_set(struct glfs *fs, struct glfs_object *object,
const acl_type_t type, const acl_t acl)
@@ -2453,6 +2427,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_get, 3.7.0)
acl_t
pub_glfs_h_acl_get(struct glfs *fs, struct glfs_object *object,
const acl_type_t type)
@@ -2507,6 +2482,7 @@ invalid_fs:
return acl;
}
#else /* !HAVE_ACL_LIBACL_H */
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_get, 3.7.0)
acl_t
pub_glfs_h_acl_get(struct glfs *fs, struct glfs_object *object,
const acl_type_t type)
@@ -2515,6 +2491,7 @@ pub_glfs_h_acl_get(struct glfs *fs, struct glfs_object *object,
return NULL;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_set, 3.7.0)
int
pub_glfs_h_acl_set(struct glfs *fs, struct glfs_object *object,
const acl_type_t type, const acl_t acl)
@@ -2523,10 +2500,9 @@ pub_glfs_h_acl_set(struct glfs *fs, struct glfs_object *object,
return -1;
}
#endif
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_set, 3.7.0);
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_get, 3.7.0);
/* The API to perform read using anonymous fd */
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_read, 3.7.0)
ssize_t
pub_glfs_h_anonymous_read(struct glfs *fs, struct glfs_object *object,
const void *buf, size_t count, off_t offset)
@@ -2550,9 +2526,8 @@ pub_glfs_h_anonymous_read(struct glfs *fs, struct glfs_object *object,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_read, 3.7.0);
-
/* The API to perform write using anonymous fd */
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_write, 3.7.0)
ssize_t
pub_glfs_h_anonymous_write(struct glfs *fs, struct glfs_object *object,
const void *buf, size_t count, off_t offset)
@@ -2576,8 +2551,7 @@ pub_glfs_h_anonymous_write(struct glfs *fs, struct glfs_object *object,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_write, 3.7.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_object_copy, 3.11.0)
struct glfs_object *
pub_glfs_object_copy(struct glfs_object *src)
{
@@ -2588,9 +2562,8 @@ pub_glfs_object_copy(struct glfs_object *src)
object = GF_CALLOC(1, sizeof(struct glfs_object), glfs_mt_glfs_object_t);
if (object == NULL) {
errno = ENOMEM;
- gf_msg(THIS->name, GF_LOG_WARNING, errno, API_MSG_CREATE_HANDLE_FAILED,
- "glfs_dup_object for gfid-%s failed",
- uuid_utoa(src->inode->gfid));
+ gf_smsg(THIS->name, GF_LOG_WARNING, errno, API_MSG_CREATE_HANDLE_FAILED,
+ "glfs_dup_object gfid=%s", uuid_utoa(src->inode->gfid), NULL);
return NULL;
}
@@ -2600,26 +2573,25 @@ pub_glfs_object_copy(struct glfs_object *src)
out:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_object_copy, 3.11.0);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_object, 3.11.0)
struct glfs_object *
pub_glfs_xreaddirplus_get_object(struct glfs_xreaddirp_stat *xstat)
{
GF_VALIDATE_OR_GOTO("glfs_xreaddirplus_get_object", xstat, out);
if (!(xstat->flags_handled & GFAPI_XREADDIRP_HANDLE))
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "GFAPI_XREADDIRP_HANDLE is not set. Flags"
- "handled for xstat(%p) are (%x)",
- xstat, xstat->flags_handled);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_HANDLE_NOT_SET,
+ "GFAPI_XREADDIRP_HANDLE xstat=%p", xstat, "handle=%x",
+ xstat->flags_handled, NULL);
return xstat->object;
out:
return NULL;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_object, 3.11.0);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lease, 4.0.0)
int
pub_glfs_h_lease(struct glfs *fs, struct glfs_object *object,
struct glfs_lease *lease)
@@ -2681,5 +2653,3 @@ out:
invalid_fs:
return ret;
}
-
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lease, 4.0.0);
diff --git a/api/src/glfs-internal.h b/api/src/glfs-internal.h
index 55401b2910e..7cc3b18a104 100644
--- a/api/src/glfs-internal.h
+++ b/api/src/glfs-internal.h
@@ -16,6 +16,7 @@
#include <glusterfs/upcall-utils.h>
#include "glfs-handles.h"
#include <glusterfs/refcount.h>
+#include <glusterfs/syncop.h>
#define GLFS_SYMLINK_MAX_FOLLOW 2048
@@ -80,25 +81,40 @@
#ifndef GFAPI_PRIVATE
#define GFAPI_PRIVATE(sym, ver) /**/
#endif
+#if __GNUC__ >= 10
#define GFAPI_SYMVER_PUBLIC_DEFAULT(fn, ver) \
- asm(".symver pub_" STR(fn) ", " STR(fn) "@@GFAPI_" STR(ver))
+ __attribute__((__symver__(STR(fn) "@@GFAPI_" STR(ver))))
#define GFAPI_SYMVER_PRIVATE_DEFAULT(fn, ver) \
- asm(".symver priv_" STR(fn) ", " STR(fn) "@@GFAPI_PRIVATE_" STR(ver))
+ __attribute__((__symver__(STR(fn) "@@GFAPI_PRIVATE_" STR(ver))))
#define GFAPI_SYMVER_PUBLIC(fn1, fn2, ver) \
- asm(".symver pub_" STR(fn1) ", " STR(fn2) "@GFAPI_" STR(ver))
+ __attribute__((__symver__(STR(fn2) "@GFAPI_" STR(ver))))
#define GFAPI_SYMVER_PRIVATE(fn1, fn2, ver) \
- asm(".symver priv_" STR(fn1) ", " STR(fn2) "@GFAPI_PRIVATE_" STR(ver))
+ __attribute__((__symver__(STR(fn2) "@GFAPI_PRIVATE_" STR(ver))))
+
+#else
+#define GFAPI_SYMVER_PUBLIC_DEFAULT(fn, ver) \
+ asm(".symver pub_" STR(fn) ", " STR(fn) "@@GFAPI_" STR(ver));
+
+#define GFAPI_SYMVER_PRIVATE_DEFAULT(fn, ver) \
+ asm(".symver priv_" STR(fn) ", " STR(fn) "@@GFAPI_PRIVATE_" STR(ver));
+
+#define GFAPI_SYMVER_PUBLIC(fn1, fn2, ver) \
+ asm(".symver pub_" STR(fn1) ", " STR(fn2) "@GFAPI_" STR(ver));
+
+#define GFAPI_SYMVER_PRIVATE(fn1, fn2, ver) \
+ asm(".symver priv_" STR(fn1) ", " STR(fn2) "@GFAPI_PRIVATE_" STR(ver));
+#endif
#define STR(str) #str
#else
#ifndef GFAPI_PUBLIC
-#define GFAPI_PUBLIC(sym, ver) __asm("_" __STRING(sym) "$GFAPI_" __STRING(ver))
+#define GFAPI_PUBLIC(sym, ver) __asm("_" __STRING(sym) "$GFAPI_" __STRING(ver));
#endif
#ifndef GFAPI_PRIVATE
#define GFAPI_PRIVATE(sym, ver) \
- __asm("_" __STRING(sym) "$GFAPI_PRIVATE_" __STRING(ver))
+ __asm("_" __STRING(sym) "$GFAPI_PRIVATE_" __STRING(ver));
#endif
#define GFAPI_SYMVER_PUBLIC_DEFAULT(fn, dotver) /**/
#define GFAPI_SYMVER_PRIVATE_DEFAULT(fn, dotver) /**/
@@ -207,6 +223,7 @@ struct glfs {
glfs_upcall_cbk up_cbk; /* upcall cbk function to be registered */
void *up_data; /* Opaque data provided by application
* during upcall registration */
+ struct list_head waitq; /* waiting synctasks */
};
/* This enum is used to maintain the state of glfd. In case of async fops
@@ -442,6 +459,34 @@ glfs_process_upcall_event(struct glfs *fs, void *data)
THIS = glfd->fd->inode->table->xl->ctx->master; \
} while (0)
+#define __GLFS_LOCK_WAIT(fs) \
+ do { \
+ struct synctask *task = NULL; \
+ \
+ task = synctask_get(); \
+ \
+ if (task) { \
+ list_add_tail(&task->waitq, &fs->waitq); \
+ pthread_mutex_unlock(&fs->mutex); \
+ synctask_yield(task, NULL); \
+ pthread_mutex_lock(&fs->mutex); \
+ } else { \
+ /* non-synctask */ \
+ pthread_cond_wait(&fs->cond, &fs->mutex); \
+ } \
+ } while (0)
+
+#define __GLFS_SYNCTASK_WAKE(fs) \
+ do { \
+ struct synctask *waittask = NULL; \
+ \
+ while (!list_empty(&fs->waitq)) { \
+ waittask = list_entry(fs->waitq.next, struct synctask, waitq); \
+ list_del_init(&waittask->waitq); \
+ synctask_wake(waittask); \
+ } \
+ } while (0)
+
/*
By default all lock attempts from user context must
use glfs_lock() and glfs_unlock(). This allows
@@ -466,10 +511,10 @@ glfs_lock(struct glfs *fs, gf_boolean_t wait_for_migration)
pthread_mutex_lock(&fs->mutex);
while (!fs->init)
- pthread_cond_wait(&fs->cond, &fs->mutex);
+ __GLFS_LOCK_WAIT(fs);
while (wait_for_migration && fs->migration_in_progress)
- pthread_cond_wait(&fs->cond, &fs->mutex);
+ __GLFS_LOCK_WAIT(fs);
return 0;
}
diff --git a/api/src/glfs-master.c b/api/src/glfs-master.c
index 4bc69d1f3d8..100dcc16cc0 100644
--- a/api/src/glfs-master.c
+++ b/api/src/glfs-master.c
@@ -75,9 +75,10 @@ notify(xlator_t *this, int event, void *data, ...)
switch (event) {
case GF_EVENT_GRAPH_NEW:
- gf_msg(this->name, GF_LOG_INFO, 0, API_MSG_NEW_GRAPH,
- "New graph %s (%d) coming up",
- uuid_utoa((unsigned char *)graph->graph_uuid), graph->id);
+ gf_smsg(this->name, GF_LOG_INFO, 0, API_MSG_NEW_GRAPH,
+ "graph-uuid=%s",
+ uuid_utoa((unsigned char *)graph->graph_uuid), "id=%d",
+ graph->id, NULL);
break;
case GF_EVENT_CHILD_UP:
pthread_mutex_lock(&fs->mutex);
@@ -120,9 +121,8 @@ mem_acct_init(xlator_t *this)
ret = xlator_mem_acct_init(this, glfs_mt_end + 1);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
- "Failed to initialise "
- "memory accounting");
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
+ NULL);
return ret;
}
diff --git a/api/src/glfs-mgmt.c b/api/src/glfs-mgmt.c
index 66a13432f38..7c82b8cd162 100644
--- a/api/src/glfs-mgmt.c
+++ b/api/src/glfs-mgmt.c
@@ -46,16 +46,15 @@ glfs_process_volfp(struct glfs *fs, FILE *fp)
ctx = fs->ctx;
graph = glusterfs_graph_construct(fp);
if (!graph) {
- gf_msg("glfs", GF_LOG_ERROR, errno, API_MSG_GRAPH_CONSTRUCT_FAILED,
- "failed to construct the graph");
+ gf_smsg("glfs", GF_LOG_ERROR, errno, API_MSG_GRAPH_CONSTRUCT_FAILED,
+ NULL);
goto out;
}
for (trav = graph->first; trav; trav = trav->next) {
if (strcmp(trav->type, "mount/api") == 0) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_API_XLATOR_ERROR,
- "api master xlator cannot be specified "
- "in volume file");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_API_XLATOR_ERROR,
+ NULL);
goto out;
}
}
@@ -120,32 +119,28 @@ mgmt_cbk_statedump(struct rpc_clnt *rpc, void *mydata, void *data)
this = mydata;
if (!this) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "NULL mydata");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_NULL, "mydata", NULL);
errno = EINVAL;
goto out;
}
fs = this->private;
if (!fs) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "NULL glfs");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_NULL, "glfs", NULL);
errno = EINVAL;
goto out;
}
iov = (struct iovec *)data;
if (!iov) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "NULL iovec data");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_NULL, "iovec data", NULL);
errno = EINVAL;
goto out;
}
ret = xdr_to_generic(*iov, &target_pid, (xdrproc_t)xdr_gf_statedump);
if (ret < 0) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "Failed to decode xdr response for GF_CBK_STATEDUMP");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_DECODE_XDR_FAILED, NULL);
goto out;
}
@@ -156,22 +151,21 @@ mgmt_cbk_statedump(struct rpc_clnt *rpc, void *mydata, void *data)
ret = glfs_sysrq(fs, GLFS_SYSRQ_STATEDUMP);
if (ret < 0) {
- gf_msg("glfs", GF_LOG_INFO, 0, API_MSG_STATEDUMP_FAILED,
- "statedump failed");
+ gf_smsg("glfs", GF_LOG_INFO, 0, API_MSG_STATEDUMP_FAILED, NULL);
}
}
out:
return ret;
}
-rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_statedump},
+static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
+ GF_CBK_EVENT_NOTIFY},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_statedump, GF_CBK_STATEDUMP},
};
-struct rpcclnt_cb_program mgmt_cbk_prog = {
+static struct rpcclnt_cb_program mgmt_cbk_prog = {
.progname = "GlusterFS Callback",
.prognum = GLUSTER_CBK_PROGRAM,
.progver = GLUSTER_CBK_VERSION,
@@ -179,7 +173,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = {
.numactors = GF_CBK_MAXVALUE,
};
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = "NULL",
[GF_HNDSK_SETVOLUME] = "SETVOLUME",
[GF_HNDSK_GETSPEC] = "GETSPEC",
@@ -188,7 +182,7 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_GET_VOLUME_INFO] = "GETVOLUMEINFO",
};
-rpc_clnt_prog_t clnt_handshake_prog = {
+static rpc_clnt_prog_t clnt_handshake_prog = {
.progname = "GlusterFS Handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
@@ -230,8 +224,8 @@ mgmt_submit_request(void *req, call_frame_t *frame, glusterfs_ctx_t *ctx,
/* Create the xdr payload */
ret = xdr_serialize_generic(iov, req, xdrproc);
if (ret == -1) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_XDR_PAYLOAD_FAILED,
- "failed to create XDR payload");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_XDR_PAYLOAD_FAILED,
+ NULL);
goto out;
}
iov.iov_len = ret;
@@ -261,7 +255,6 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = 0;
char *volume_id_str = NULL;
dict_t *dict = NULL;
- char key[1024] = {0};
gf_get_volume_info_rsp rsp = {
0,
};
@@ -275,8 +268,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
args = frame->local;
if (!ctx) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "NULL context");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_NULL,
+ "context", NULL);
errno = EINVAL;
ret = -1;
goto out;
@@ -285,8 +278,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
fs = ((xlator_t *)ctx->master)->private;
if (-1 == req->rpc_status) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "GET_VOLUME_INFO RPC call is not successful");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL,
+ API_MSG_CALL_NOT_SUCCESSFUL, NULL);
errno = EINVAL;
ret = -1;
goto out;
@@ -295,9 +288,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_get_volume_info_rsp);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_ERROR, 0,
- API_MSG_XDR_RESPONSE_DECODE_FAILED,
- "Failed to decode xdr response for GET_VOLUME_INFO");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0,
+ API_MSG_XDR_RESPONSE_DECODE_FAILED, NULL);
goto out;
}
@@ -313,9 +305,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
if (!rsp.dict.dict_len) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "Response received for "
- "GET_VOLUME_INFO RPC call is not valid");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_CALL_NOT_VALID,
+ NULL);
ret = -1;
errno = EINVAL;
goto out;
@@ -336,8 +327,7 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- snprintf(key, sizeof(key), "volume_id");
- ret = dict_get_str(dict, key, &volume_id_str);
+ ret = dict_get_str_sizen(dict, "volume_id", &volume_id_str);
if (ret) {
errno = EINVAL;
goto out;
@@ -353,11 +343,9 @@ out:
}
if (ret) {
- gf_msg(frame->this->name, GF_LOG_ERROR, errno,
- API_MSG_GET_VOLINFO_CBK_FAILED,
- "In GET_VOLUME_INFO "
- "cbk, received error: %s",
- strerror(errno));
+ gf_smsg(frame->this->name, GF_LOG_ERROR, errno,
+ API_MSG_GET_VOLINFO_CBK_FAILED, "error=%s", strerror(errno),
+ NULL);
}
if (dict)
@@ -376,6 +364,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volumeid, 3.5.0)
int
pub_glfs_get_volumeid(struct glfs *fs, char *volid, size_t size)
{
@@ -399,9 +388,8 @@ pub_glfs_get_volumeid(struct glfs *fs, char *volid, size_t size)
glfs_get_volume_info(fs);
if (gf_uuid_is_null(fs->vol_uuid)) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_FETCH_VOLUUID_FAILED,
- "Unable to fetch "
- "volume UUID");
+ gf_smsg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_FETCH_VOLUUID_FAILED,
+ NULL);
goto out;
}
@@ -413,8 +401,7 @@ done:
}
if (size < uuid_size) {
- gf_msg(THIS->name, GF_LOG_ERROR, ERANGE, API_MSG_INSUFF_SIZE,
- "Insufficient size passed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ERANGE, API_MSG_INSUFF_SIZE, NULL);
errno = ERANGE;
goto out;
}
@@ -432,8 +419,6 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volumeid, 3.5.0);
-
int
glfs_get_volume_info(struct glfs *fs)
{
@@ -447,8 +432,7 @@ glfs_get_volume_info(struct glfs *fs)
ctx = fs->ctx;
frame = create_frame(THIS, ctx->pool);
if (!frame) {
- gf_msg("glfs", GF_LOG_ERROR, ENOMEM, API_MSG_FRAME_CREAT_FAILED,
- "failed to create the frame");
+ gf_smsg("glfs", GF_LOG_ERROR, ENOMEM, API_MSG_FRAME_CREAT_FAILED, NULL);
ret = -1;
goto out;
}
@@ -504,8 +488,8 @@ glfs_get_volume_info_rpc(call_frame_t *frame, xlator_t *this, struct glfs *fs)
flags = (int32_t)GF_GET_VOLUME_UUID; // ctx->flags;
ret = dict_set_int32(dict, "flags", flags);
if (ret) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
- "failed to set flags");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL,
+ API_MSG_DICT_SET_FAILED, "flags", NULL);
goto out;
}
@@ -575,8 +559,8 @@ glfs_mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ctx = frame->this->ctx;
if (!ctx) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "NULL context");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_NULL,
+ "context", NULL);
errno = EINVAL;
ret = -1;
goto out;
@@ -592,16 +576,15 @@ glfs_mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_ERROR, 0, API_MSG_XDR_DECODE_FAILED,
- "XDR decoding error");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0, API_MSG_XDR_DECODE_FAILED,
+ NULL);
ret = -1;
goto out;
}
if (-1 == rsp.op_ret) {
- gf_msg(frame->this->name, GF_LOG_ERROR, rsp.op_errno,
- API_MSG_GET_VOLFILE_FAILED,
- "failed to get the 'volume file' from server");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, rsp.op_errno,
+ API_MSG_GET_VOLFILE_FAILED, "from server", NULL);
ret = -1;
errno = rsp.op_errno;
goto out;
@@ -650,8 +633,7 @@ volfile:
if ((size == fs->oldvollen) &&
(memcmp(fs->oldvolfile, rsp.spec, size) == 0)) {
pthread_mutex_unlock(&fs->mutex);
- gf_msg(frame->this->name, GF_LOG_INFO, 0, API_MSG_VOLFILE_INFO,
- "No change in volfile, continuing");
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, API_MSG_VOLFILE_INFO, NULL);
goto out;
}
pthread_mutex_unlock(&fs->mutex);
@@ -668,8 +650,8 @@ volfile:
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_INFO, 0, API_MSG_VOLFILE_INFO,
- "Unable to delete file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, API_MSG_UNABLE_TO_DEL,
+ "template=%s", template, NULL);
ret = 0;
}
@@ -731,9 +713,7 @@ out:
// Stop if server is running at an unsupported op-version
if (ENOTSUP == ret) {
- gf_msg("mgmt", GF_LOG_ERROR, ENOTSUP, API_MSG_WRONG_OPVERSION,
- "Server is operating at an op-version which is not "
- "supported");
+ gf_smsg("mgmt", GF_LOG_ERROR, ENOTSUP, API_MSG_WRONG_OPVERSION, NULL);
errno = ENOTSUP;
glfs_init_done(fs, -1);
}
@@ -742,9 +722,8 @@ out:
/* Do it only for the first time */
/* Failed to get the volume file, something wrong,
restart the process */
- gf_msg("glfs-mgmt", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "failed to fetch volume file (key:%s)",
- ctx->cmd_args.volfile_id);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, EINVAL, API_MSG_GET_VOLFILE_FAILED,
+ "key=%s", ctx->cmd_args.volfile_id, NULL);
if (!need_retry) {
if (!errno)
errno = EINVAL;
@@ -767,7 +746,7 @@ glfs_volfile_fetch(struct glfs *fs)
gf_getspec_req req = {
0,
};
- int ret = 0;
+ int ret = -1;
call_frame_t *frame = NULL;
glusterfs_ctx_t *ctx = NULL;
dict_t *dict = NULL;
@@ -775,14 +754,11 @@ glfs_volfile_fetch(struct glfs *fs)
ctx = fs->ctx;
cmd_args = &ctx->cmd_args;
- frame = create_frame(THIS, ctx->pool);
-
req.key = cmd_args->volfile_id;
req.flags = 0;
dict = dict_new();
if (!dict) {
- ret = -1;
goto out;
}
@@ -790,15 +766,15 @@ glfs_volfile_fetch(struct glfs *fs)
// decision
ret = dict_set_int32(dict, "min-op-version", GD_OP_VERSION_MIN);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
- "Failed to set min-op-version in request dict");
+ gf_smsg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
+ "min-op-version", NULL);
goto out;
}
ret = dict_set_int32(dict, "max-op-version", GD_OP_VERSION_MAX);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
- "Failed to set max-op-version in request dict");
+ gf_smsg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
+ "max-op-version", NULL);
goto out;
}
@@ -810,8 +786,14 @@ glfs_volfile_fetch(struct glfs *fs)
ret = dict_allocate_and_serialize(dict, &req.xdata.xdata_val,
&req.xdata.xdata_len);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, API_MSG_DICT_SERIALIZE_FAILED,
- "Failed to serialize dictionary");
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, API_MSG_DICT_SERIALIZE_FAILED,
+ NULL);
+ goto out;
+ }
+
+ frame = create_frame(THIS, ctx->pool);
+ if (!frame) {
+ ret = -1;
goto out;
}
@@ -852,15 +834,13 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
case RPC_CLNT_DISCONNECT:
if (!ctx->active) {
if (rpc_trans->connect_failed)
- gf_msg("glfs-mgmt", GF_LOG_ERROR, 0,
- API_MSG_REMOTE_HOST_CONN_FAILED,
- "failed to connect to remote-host: %s",
- ctx->cmd_args.volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, 0,
+ API_MSG_REMOTE_HOST_CONN_FAILED, "server=%s",
+ ctx->cmd_args.volfile_server, NULL);
else
- gf_msg("glfs-mgmt", GF_LOG_INFO, 0,
- API_MSG_REMOTE_HOST_CONN_FAILED,
- "disconnected from remote-host: %s",
- ctx->cmd_args.volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_INFO, 0,
+ API_MSG_REMOTE_HOST_CONN_FAILED, "server=%s",
+ ctx->cmd_args.volfile_server, NULL);
if (!rpc->disabled) {
/*
@@ -875,9 +855,8 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
server = ctx->cmd_args.curr_server;
if (server->list.next == &ctx->cmd_args.volfile_servers) {
errno = ENOTCONN;
- gf_msg("glfs-mgmt", GF_LOG_INFO, ENOTCONN,
- API_MSG_VOLFILE_SERVER_EXHAUST,
- "Exhausted all volfile servers");
+ gf_smsg("glfs-mgmt", GF_LOG_INFO, ENOTCONN,
+ API_MSG_VOLFILE_SERVER_EXHAUST, NULL);
glfs_init_done(fs, -1);
break;
}
@@ -890,10 +869,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
ret = dict_set_str(rpc_trans->options, "transport-type",
server->transport);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set transport-type: %s",
- server->transport);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED, "transport-type=%s",
+ server->transport, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -904,10 +882,10 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
"transport.socket.connect-path",
server->volfile_server);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set socket.connect-path: %s",
- server->volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED,
+ "socket.connect-path=%s",
+ server->volfile_server, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -922,9 +900,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
ret = dict_set_int32(rpc_trans->options, "remote-port",
server->port);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set remote-port: %d", server->port);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED, "remote-port=%d",
+ server->port, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -933,10 +911,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
ret = dict_set_str(rpc_trans->options, "remote-host",
server->volfile_server);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set remote-host: %s",
- server->volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED, "remote-host=%s",
+ server->volfile_server, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -949,10 +926,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
"transport.socket.connect-path");
}
- gf_msg("glfs-mgmt", GF_LOG_INFO, 0, API_MSG_VOLFILE_CONNECTING,
- "connecting to next volfile server %s"
- " at port %d with transport: %s",
- server->volfile_server, server->port, server->transport);
+ gf_smsg("glfs-mgmt", GF_LOG_INFO, 0, API_MSG_VOLFILE_CONNECTING,
+ "server=%s", server->volfile_server, "port=%d",
+ server->port, "transport=%s", server->transport, NULL);
}
break;
case RPC_CLNT_CONNECT:
@@ -960,9 +936,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
if (ret && (ctx->active == NULL)) {
/* Do it only for the first time */
/* Exit the process.. there are some wrong options */
- gf_msg("glfs-mgmt", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "failed to fetch volume file (key:%s)",
- ctx->cmd_args.volfile_id);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, EINVAL,
+ API_MSG_GET_VOLFILE_FAILED, "key=%s",
+ ctx->cmd_args.volfile_id, NULL);
errno = EINVAL;
glfs_init_done(fs, -1);
}
@@ -1037,30 +1013,25 @@ glfs_mgmt_init(struct glfs *fs)
if (ret)
goto out;
- if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) {
- ctx->secure_mgmt = 1;
- ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
- }
-
rpc = rpc_clnt_new(options, THIS, THIS->name, 8);
if (!rpc) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_CREATE_RPC_CLIENT_FAILED,
- "failed to create rpc clnt");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_CREATE_RPC_CLIENT_FAILED,
+ NULL);
goto out;
}
ret = rpc_clnt_register_notify(rpc, mgmt_rpc_notify, THIS);
if (ret) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_NOTIFY_FUNC_FAILED,
- "failed to register notify function");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_NOTIFY_FUNC_FAILED,
+ NULL);
goto out;
}
ret = rpcclnt_cbk_program_register(rpc, &mgmt_cbk_prog, THIS);
if (ret) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_CBK_FUNC_FAILED,
- "failed to register callback function");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_CBK_FUNC_FAILED,
+ NULL);
goto out;
}
diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c
index a79f4905749..8a393ecb464 100644
--- a/api/src/glfs-resolve.c
+++ b/api/src/glfs-resolve.c
@@ -65,6 +65,9 @@ __glfs_first_lookup(struct glfs *fs, xlator_t *subvol)
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return ret;
}
@@ -116,9 +119,9 @@ glfs_refresh_inode_safe(xlator_t *subvol, inode_t *oldinode,
DECODE_SYNCOP_ERR(ret);
if (ret) {
- gf_msg(subvol->name, GF_LOG_WARNING, errno,
- API_MSG_INODE_REFRESH_FAILED, "inode refresh of %s failed: %s",
- uuid_utoa(oldinode->gfid), strerror(errno));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno,
+ API_MSG_INODE_REFRESH_FAILED, "gfid=%s",
+ uuid_utoa(oldinode->gfid), "err=%s", strerror(errno), NULL);
loc_wipe(&loc);
return NULL;
}
@@ -129,9 +132,8 @@ glfs_refresh_inode_safe(xlator_t *subvol, inode_t *oldinode,
inode_ctx_set(newinode, THIS, &ctx_value);
inode_lookup(newinode);
} else {
- gf_msg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
- "inode linking of %s failed",
- uuid_utoa((unsigned char *)&iatt.ia_gfid));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
+ "gfid=%s", uuid_utoa((unsigned char *)&iatt.ia_gfid), NULL);
}
loc_wipe(&loc);
@@ -154,9 +156,13 @@ __glfs_refresh_inode(struct glfs *fs, xlator_t *subvol, inode_t *inode,
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return newinode;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_loc_touchup, 3.4.0)
int
priv_glfs_loc_touchup(loc_t *loc)
{
@@ -171,8 +177,6 @@ priv_glfs_loc_touchup(loc_t *loc)
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_loc_touchup, 3.4.0);
-
int
glfs_resolve_symlink(struct glfs *fs, xlator_t *subvol, inode_t *inode,
char **lpath)
@@ -335,6 +339,7 @@ glfs_resolve_component(struct glfs *fs, xlator_t *subvol, inode_t *parent,
if (temp_parent) {
inode_unref(loc.parent);
loc.parent = temp_parent;
+ gf_uuid_copy(loc.pargfid, temp_parent->gfid);
inode_find_directory_name(loc.inode, &loc.name);
}
@@ -345,10 +350,12 @@ glfs_resolve_component(struct glfs *fs, xlator_t *subvol, inode_t *parent,
if (temp_parent) {
inode_unref(loc.parent);
loc.parent = temp_parent;
+ gf_uuid_copy(loc.pargfid, temp_parent->gfid);
inode_find_directory_name(loc.inode, &loc.name);
} else if (__is_root_gfid(loc.inode->gfid)) {
inode_unref(loc.parent);
loc.parent = inode_ref(loc.inode);
+ gf_uuid_copy(loc.pargfid, loc.inode->gfid);
loc.name = ".";
} else {
inode_unref(loc.inode);
@@ -439,9 +446,8 @@ glfs_resolve_component(struct glfs *fs, xlator_t *subvol, inode_t *parent,
inode = inode_link(loc.inode, loc.parent, component, &ciatt);
if (!inode) {
- gf_msg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
- "inode linking of %s failed",
- uuid_utoa((unsigned char *)&ciatt.ia_gfid));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
+ "gfid=%s", uuid_utoa((unsigned char *)&ciatt.ia_gfid), NULL);
goto out;
} else if (inode == loc.inode)
inode_ctx_set(inode, THIS, &ctx_value);
@@ -460,6 +466,7 @@ out:
return inode;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve_at, 3.4.0)
int
priv_glfs_resolve_at(struct glfs *fs, xlator_t *subvol, inode_t *at,
const char *origpath, loc_t *loc, struct iatt *iatt,
@@ -610,8 +617,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve_at, 3.4.0);
-
int
glfs_resolve_path(struct glfs *fs, xlator_t *subvol, const char *origpath,
loc_t *loc, struct iatt *iatt, int follow, int reval)
@@ -625,8 +630,8 @@ glfs_resolve_path(struct glfs *fs, xlator_t *subvol, const char *origpath,
cwd = glfs_cwd_get(fs);
if (NULL == cwd) {
- gf_msg(subvol->name, GF_LOG_WARNING, EIO, API_MSG_GET_CWD_FAILED,
- "Failed to get cwd");
+ gf_smsg(subvol->name, GF_LOG_WARNING, EIO, API_MSG_GET_CWD_FAILED,
+ NULL);
errno = EIO;
goto out;
}
@@ -640,6 +645,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve, 3.7.0)
int
priv_glfs_resolve(struct glfs *fs, xlator_t *subvol, const char *origpath,
loc_t *loc, struct iatt *iatt, int reval)
@@ -650,7 +656,6 @@ priv_glfs_resolve(struct glfs *fs, xlator_t *subvol, const char *origpath,
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve, 3.7.0);
int
glfs_lresolve(struct glfs *fs, xlator_t *subvol, const char *origpath,
@@ -680,28 +685,27 @@ glfs_migrate_fd_locks_safe(struct glfs *fs, xlator_t *oldsubvol, fd_t *oldfd,
NULL, NULL);
DECODE_SYNCOP_ERR(ret);
if (ret < 0) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FGETXATTR_FAILED,
- "fgetxattr (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(oldfd->inode->gfid, uuid1), strerror(errno),
- graphid_str(oldsubvol), oldsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FGETXATTR_FAILED,
+ "gfid=%s", uuid_utoa_r(oldfd->inode->gfid, uuid1), "err=%s",
+ strerror(errno), "subvol=%s", graphid_str(oldsubvol), "id=%d",
+ oldsubvol->graph->id, NULL);
goto out;
}
if (!dict_get(lockinfo, GF_XATTR_LOCKINFO_KEY)) {
- gf_msg(fs->volname, GF_LOG_WARNING, 0, API_MSG_LOCKINFO_KEY_MISSING,
- "missing lockinfo key (%s) on graph %s (%d)",
- uuid_utoa_r(oldfd->inode->gfid, uuid1), graphid_str(oldsubvol),
- oldsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, 0, API_MSG_LOCKINFO_KEY_MISSING,
+ "gfid=%s", uuid_utoa_r(oldfd->inode->gfid, uuid1), "subvol=%s",
+ graphid_str(oldsubvol), "id=%d", oldsubvol->graph->id, NULL);
goto out;
}
ret = syncop_fsetxattr(newsubvol, newfd, lockinfo, 0, NULL, NULL);
DECODE_SYNCOP_ERR(ret);
if (ret < 0) {
- gf_msg(fs->volname, GF_LOG_WARNING, 0, API_MSG_FSETXATTR_FAILED,
- "fsetxattr (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(newfd->inode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, 0, API_MSG_FSETXATTR_FAILED,
+ "gfid=%s", uuid_utoa_r(newfd->inode->gfid, uuid1), "err=%s",
+ strerror(errno), "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
out:
@@ -722,6 +726,7 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
0,
};
char uuid1[64];
+ dict_t *xdata = NULL;
oldinode = oldfd->inode;
oldsubvol = oldinode->table->xl;
@@ -730,32 +735,43 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
return fd_ref(oldfd);
if (!oldsubvol->switched) {
- ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, NULL, NULL);
+ xdata = dict_new();
+ if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
+ gf_smsg(fs->volname, GF_LOG_WARNING, ENOMEM, API_MSG_FSYNC_FAILED,
+ "err=%s", "last-fsync set failed", "gfid=%s",
+ uuid_utoa_r(oldfd->inode->gfid, uuid1), "subvol=%s",
+ graphid_str(oldsubvol), "id=%d", oldsubvol->graph->id,
+ NULL);
+ }
+
+ ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, xdata, NULL);
DECODE_SYNCOP_ERR(ret);
if (ret) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FSYNC_FAILED,
- "fsync() failed "
- "(%s) on %s graph %s (%d)",
- strerror(errno), uuid_utoa_r(oldfd->inode->gfid, uuid1),
- graphid_str(oldsubvol), oldsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FSYNC_FAILED,
+ "err=%s", strerror(errno), "gfid=%s",
+ uuid_utoa_r(oldfd->inode->gfid, uuid1), "subvol=%s",
+ graphid_str(oldsubvol), "id=%d", oldsubvol->graph->id,
+ NULL);
}
}
newinode = glfs_refresh_inode_safe(newsubvol, oldinode, _gf_false);
if (!newinode) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_INODE_REFRESH_FAILED,
- "inode (%s) refresh failed (%s) on graph %s (%d)",
- uuid_utoa_r(oldinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno,
+ API_MSG_INODE_REFRESH_FAILED, "gfid=%s",
+ uuid_utoa_r(oldinode->gfid, uuid1), "err=%s", strerror(errno),
+ "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
newfd = fd_create(newinode, getpid());
if (!newfd) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FDCREATE_FAILED,
- "fd_create (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(newinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno,
+ API_MSG_FDCREATE_FAILED_ON_GRAPH, "gfid=%s",
+ uuid_utoa_r(newinode->gfid, uuid1), "err=%s", strerror(errno),
+ "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
@@ -763,8 +779,7 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
ret = inode_path(oldfd->inode, NULL, (char **)&loc.path);
if (ret < 0) {
- gf_msg(fs->volname, GF_LOG_INFO, 0, API_MSG_INODE_PATH_FAILED,
- "inode_path failed");
+ gf_smsg(fs->volname, GF_LOG_INFO, 0, API_MSG_INODE_PATH_FAILED, NULL);
goto out;
}
@@ -780,21 +795,21 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
loc_wipe(&loc);
if (ret) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_SYNCOP_OPEN_FAILED,
- "syncop_open%s (%s) failed (%s) on graph %s (%d)",
- IA_ISDIR(oldinode->ia_type) ? "dir" : "",
- uuid_utoa_r(newinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_SYNCOP_OPEN_FAILED,
+ "type=%s", IA_ISDIR(oldinode->ia_type) ? "dir" : "", "gfid=%s",
+ uuid_utoa_r(newinode->gfid, uuid1), "err=%s", strerror(errno),
+ "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
ret = glfs_migrate_fd_locks_safe(fs, oldsubvol, oldfd, newsubvol, newfd);
if (ret) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_LOCK_MIGRATE_FAILED,
- "lock migration (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(newinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_LOCK_MIGRATE_FAILED,
+ "gfid=%s", uuid_utoa_r(newinode->gfid, uuid1), "err=%s",
+ strerror(errno), "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
@@ -809,6 +824,9 @@ out:
newfd = NULL;
}
+ if (xdata)
+ dict_unref(xdata);
+
return newfd;
}
@@ -829,6 +847,9 @@ __glfs_migrate_fd(struct glfs *fs, xlator_t *newsubvol, struct glfs_fd *glfd)
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return newfd;
}
@@ -875,9 +896,9 @@ __glfs_migrate_openfds(struct glfs *fs, xlator_t *subvol)
list_for_each_entry(glfd, &fs->openfds, openfds)
{
if (gf_uuid_is_null(glfd->fd->inode->gfid)) {
- gf_msg(fs->volname, GF_LOG_INFO, 0, API_MSG_OPENFD_SKIPPED,
- "skipping openfd %p/%p in graph %s (%d)", glfd, glfd->fd,
- graphid_str(subvol), subvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_INFO, 0, API_MSG_OPENFD_SKIPPED,
+ "glfd=%p", glfd, "glfd->fd=%p", glfd->fd, "subvol=%s",
+ graphid_str(subvol), "id=%d", subvol->graph->id, NULL);
/* create in progress, defer */
continue;
}
@@ -912,10 +933,10 @@ __glfs_active_subvol(struct glfs *fs)
ret = __glfs_first_lookup(fs, new_subvol);
if (ret) {
- gf_msg(fs->volname, GF_LOG_INFO, errno,
- API_MSG_FIRST_LOOKUP_GRAPH_FAILED,
- "first lookup on graph %s (%d) failed (%s)",
- graphid_str(new_subvol), new_subvol->graph->id, strerror(errno));
+ gf_smsg(fs->volname, GF_LOG_INFO, errno,
+ API_MSG_FIRST_LOOKUP_GRAPH_FAILED, "subvol=%s",
+ graphid_str(new_subvol), "id=%d", new_subvol->graph->id,
+ "err=%s", strerror(errno), NULL);
return NULL;
}
@@ -924,11 +945,11 @@ __glfs_active_subvol(struct glfs *fs)
if (!new_cwd) {
char buf1[64];
- gf_msg(fs->volname, GF_LOG_INFO, errno,
- API_MSG_CWD_GRAPH_REF_FAILED,
- "cwd refresh of %s graph %s (%d) failed (%s)",
- uuid_utoa_r(fs->cwd->gfid, buf1), graphid_str(new_subvol),
- new_subvol->graph->id, strerror(errno));
+ gf_smsg(fs->volname, GF_LOG_INFO, errno,
+ API_MSG_CWD_GRAPH_REF_FAILED, "buf=%s",
+ uuid_utoa_r(fs->cwd->gfid, buf1), "subvol=%s",
+ graphid_str(new_subvol), "id=%d", new_subvol->graph->id,
+ "err=%s", strerror(errno), NULL);
return NULL;
}
}
@@ -949,13 +970,13 @@ __glfs_active_subvol(struct glfs *fs)
inode_unref(new_cwd);
}
- gf_msg(fs->volname, GF_LOG_INFO, 0, API_MSG_SWITCHED_GRAPH,
- "switched to graph %s (%d)", graphid_str(new_subvol),
- new_subvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_INFO, 0, API_MSG_SWITCHED_GRAPH, "subvol=%s",
+ graphid_str(new_subvol), "id=%d", new_subvol->graph->id, NULL);
return new_subvol;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_subvol_done, 3.4.0)
void
priv_glfs_subvol_done(struct glfs *fs, xlator_t *subvol)
{
@@ -983,8 +1004,7 @@ priv_glfs_subvol_done(struct glfs *fs, xlator_t *subvol)
}
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_subvol_done, 3.4.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_active_subvol, 3.4.0)
xlator_t *
priv_glfs_active_subvol(struct glfs *fs)
{
@@ -1012,8 +1032,6 @@ priv_glfs_active_subvol(struct glfs *fs)
return subvol;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_active_subvol, 3.4.0);
-
int
__glfs_cwd_set(struct glfs *fs, inode_t *inode)
{
diff --git a/api/src/glfs.c b/api/src/glfs.c
index 98054b6bdb6..b4bf1423f6d 100644
--- a/api/src/glfs.c
+++ b/api/src/glfs.c
@@ -69,8 +69,8 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ret = xlator_mem_acct_init(THIS, glfs_mt_end + 1);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
- "Memory accounting init failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
+ NULL);
return ret;
}
@@ -189,10 +189,8 @@ create_master(struct glfs *fs)
goto err;
if (xlator_set_type(master, "mount/api") == -1) {
- gf_msg("glfs", GF_LOG_ERROR, 0, API_MSG_MASTER_XLATOR_INIT_FAILED,
- "master xlator "
- "for %s initialization failed",
- fs->volname);
+ gf_smsg("glfs", GF_LOG_ERROR, 0, API_MSG_MASTER_XLATOR_INIT_FAILED,
+ "name=%s", fs->volname, NULL);
goto err;
}
@@ -204,8 +202,8 @@ create_master(struct glfs *fs)
ret = xlator_init(master);
if (ret) {
- gf_msg("glfs", GF_LOG_ERROR, 0, API_MSG_GFAPI_XLATOR_INIT_FAILED,
- "failed to initialize gfapi translator");
+ gf_smsg("glfs", GF_LOG_ERROR, 0, API_MSG_GFAPI_XLATOR_INIT_FAILED,
+ NULL);
goto err;
}
@@ -231,9 +229,8 @@ get_volfp(struct glfs *fs)
cmd_args = &fs->ctx->cmd_args;
if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) {
- gf_msg("glfs", GF_LOG_ERROR, errno, API_MSG_VOLFILE_OPEN_FAILED,
- "volume file %s open failed: %s", cmd_args->volfile,
- strerror(errno));
+ gf_smsg("glfs", GF_LOG_ERROR, errno, API_MSG_VOLFILE_OPEN_FAILED,
+ "file=%s", cmd_args->volfile, "err=%s", strerror(errno), NULL);
return NULL;
}
@@ -254,6 +251,11 @@ glfs_volumes_init(struct glfs *fs)
if (!vol_assigned(cmd_args))
return -1;
+ if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) {
+ fs->ctx->secure_mgmt = 1;
+ fs->ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
+ }
+
if (cmd_args->volfile_server) {
ret = glfs_mgmt_init(fs);
goto out;
@@ -262,8 +264,8 @@ glfs_volumes_init(struct glfs *fs)
fp = get_volfp(fs);
if (!fp) {
- gf_msg("glfs", GF_LOG_ERROR, ENOENT, API_MSG_VOL_SPEC_FILE_ERROR,
- "Cannot reach volume specification file");
+ gf_smsg("glfs", GF_LOG_ERROR, ENOENT, API_MSG_VOL_SPEC_FILE_ERROR,
+ NULL);
ret = -1;
goto out;
}
@@ -278,6 +280,7 @@ out:
///////////////////////////////////////////////////////////////////////////////
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_xlator_option, 3.4.0)
int
pub_glfs_set_xlator_option(struct glfs *fs, const char *xlator, const char *key,
const char *value)
@@ -327,8 +330,7 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_xlator_option, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unset_volfile_server, 3.5.1)
int
pub_glfs_unset_volfile_server(struct glfs *fs, const char *transport,
const char *host, const int port)
@@ -369,6 +371,8 @@ pub_glfs_unset_volfile_server(struct glfs *fs, const char *transport,
list_for_each_entry_safe(server, tmp, &cmd_args->curr_server->list, list)
{
+ if (!server->volfile_server || !server->transport)
+ continue;
if ((!strcmp(server->volfile_server, host) &&
!strcmp(server->transport, transport_val) &&
(server->port == port_val))) {
@@ -386,8 +390,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unset_volfile_server, 3.5.1);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile_server, 3.4.0)
int
pub_glfs_set_volfile_server(struct glfs *fs, const char *transport,
const char *host, int port)
@@ -420,14 +423,11 @@ pub_glfs_set_volfile_server(struct glfs *fs, const char *transport,
server_transport = gf_strdup(transport);
} else if (!strcmp(transport, "rdma")) {
server_transport = gf_strdup(GF_DEFAULT_VOLFILE_TRANSPORT);
- gf_msg("glfs", GF_LOG_WARNING, EINVAL, API_MSG_INVALID_ENTRY,
- "transport RDMA is deprecated, "
- "falling back to tcp");
+ gf_smsg("glfs", GF_LOG_WARNING, EINVAL, API_MSG_TRANS_RDMA_DEP,
+ NULL);
} else {
- gf_msg("glfs", GF_LOG_TRACE, EINVAL, API_MSG_INVALID_ENTRY,
- "transport %s is not supported, "
- "possible values tcp|unix",
- transport);
+ gf_smsg("glfs", GF_LOG_TRACE, EINVAL, API_MSG_TRANS_NOT_SUPPORTED,
+ "transport=%s", transport, NULL);
goto out;
}
} else {
@@ -469,8 +469,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile_server, 3.4.0);
-
/* *
* Used to free the arguments allocated by glfs_set_volfile_server()
*/
@@ -513,6 +511,7 @@ glfs_free_xlator_options(cmd_args_t *cmd_args)
}
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsuid, 3.4.2)
int
pub_glfs_setfsuid(uid_t fsuid)
{
@@ -522,8 +521,7 @@ pub_glfs_setfsuid(uid_t fsuid)
return syncopctx_setfsuid(&fsuid);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsuid, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgid, 3.4.2)
int
pub_glfs_setfsgid(gid_t fsgid)
{
@@ -533,8 +531,7 @@ pub_glfs_setfsgid(gid_t fsgid)
return syncopctx_setfsgid(&fsgid);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgid, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgroups, 3.4.2)
int
pub_glfs_setfsgroups(size_t size, const gid_t *list)
{
@@ -544,8 +541,7 @@ pub_glfs_setfsgroups(size_t size, const gid_t *list)
return syncopctx_setfsgroups(size, list);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgroups, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsleaseid, 4.0.0)
int
pub_glfs_setfsleaseid(glfs_leaseid_t leaseid)
{
@@ -567,8 +563,6 @@ pub_glfs_setfsleaseid(glfs_leaseid_t leaseid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsleaseid, 4.0.0);
-
int
get_fop_attr_glfd(dict_t **fop_attr, struct glfs_fd *glfd)
{
@@ -656,14 +650,19 @@ unset_fop_attr(dict_t **fop_attr)
*fop_attr = NULL;
}
}
+
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_from_glfd, 3.4.0)
struct glfs *
pub_glfs_from_glfd(struct glfs_fd *glfd)
{
+ if (glfd == NULL) {
+ errno = EBADF;
+ return NULL;
+ }
+
return glfd->fs;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_from_glfd, 3.4.0);
-
static void
glfs_fd_destroy(struct glfs_fd *glfd)
{
@@ -741,6 +740,7 @@ glfs_new_fs(const char *volname)
INIT_LIST_HEAD(&fs->openfds);
INIT_LIST_HEAD(&fs->upcall_list);
+ INIT_LIST_HEAD(&fs->waitq);
PTHREAD_MUTEX_INIT(&fs->mutex, NULL, fs->pthread_flags, GLFS_INIT_MUTEX,
err);
@@ -811,21 +811,40 @@ unlock:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_new, 3.4.0)
struct glfs *
pub_glfs_new(const char *volname)
{
+ if (!volname) {
+ errno = EINVAL;
+ return NULL;
+ }
+
struct glfs *fs = NULL;
+ int i = 0;
int ret = -1;
glusterfs_ctx_t *ctx = NULL;
xlator_t *old_THIS = NULL;
char pname[16] = "";
char msg[32] = "";
- if (!volname) {
+ if (volname[0] == '/' || volname[0] == '-') {
+ if (strncmp(volname, "/snaps/", 7) == 0) {
+ goto label;
+ }
errno = EINVAL;
return NULL;
}
+ for (i = 0; i < strlen(volname); i++) {
+ if (!isalnum(volname[i]) && (volname[i] != '_') &&
+ (volname[i] != '-')) {
+ errno = EINVAL;
+ return NULL;
+ }
+ }
+
+label:
/*
* Do this as soon as possible in case something else depends on
* pool allocations.
@@ -899,8 +918,7 @@ out:
return fs;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_new, 3.4.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_new_from_ctx, 3.7.0)
struct glfs *
priv_glfs_new_from_ctx(glusterfs_ctx_t *ctx)
{
@@ -919,8 +937,7 @@ out:
return fs;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_new_from_ctx, 3.7.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_free_from_ctx, 3.7.0)
void
priv_glfs_free_from_ctx(struct glfs *fs)
{
@@ -956,8 +973,7 @@ priv_glfs_free_from_ctx(struct glfs *fs)
FREE(fs);
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_free_from_ctx, 3.7.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile, 3.4.0)
int
pub_glfs_set_volfile(struct glfs *fs, const char *volfile)
{
@@ -974,8 +990,7 @@ pub_glfs_set_volfile(struct glfs *fs, const char *volfile)
return 0;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_logging, 3.4.0)
int
pub_glfs_set_logging(struct glfs *fs, const char *logfile, int loglevel)
{
@@ -1013,8 +1028,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_logging, 3.4.0);
-
int
glfs_init_wait(struct glfs *fs)
{
@@ -1033,14 +1046,14 @@ glfs_init_wait(struct glfs *fs)
return ret;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_init_done, 3.4.0)
void
priv_glfs_init_done(struct glfs *fs, int ret)
{
glfs_init_cbk init_cbk;
if (!fs) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_GLFS_FSOBJ_NULL,
- "fs is NULL");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_GLFS_FSOBJ_NULL, NULL);
goto out;
}
@@ -1064,8 +1077,6 @@ out:
return;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_init_done, 3.4.0);
-
int
glfs_init_common(struct glfs *fs)
{
@@ -1093,8 +1104,7 @@ glfs_init_async(struct glfs *fs, glfs_init_cbk cbk)
int ret = -1;
if (!fs || !fs->ctx) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "fs is not properly initialized.");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_FS_NOT_INIT, NULL);
errno = EINVAL;
return ret;
}
@@ -1106,6 +1116,7 @@ glfs_init_async(struct glfs *fs, glfs_init_cbk cbk)
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_init, 3.4.0)
int
pub_glfs_init(struct glfs *fs)
{
@@ -1114,8 +1125,7 @@ pub_glfs_init(struct glfs *fs)
DECLARE_OLD_THIS;
if (!fs || !fs->ctx) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "fs is not properly initialized.");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_FS_NOT_INIT, NULL);
errno = EINVAL;
return ret;
}
@@ -1139,8 +1149,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_init, 3.4.0);
-
static int
glusterfs_ctx_destroy(glusterfs_ctx_t *ctx)
{
@@ -1218,6 +1226,7 @@ glusterfs_ctx_destroy(glusterfs_ctx_t *ctx)
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fini, 3.4.0)
int
pub_glfs_fini(struct glfs *fs)
{
@@ -1229,6 +1238,7 @@ pub_glfs_fini(struct glfs *fs)
call_pool_t *call_pool = NULL;
int fs_init = 0;
int err = -1;
+ struct synctask *waittask = NULL;
DECLARE_OLD_THIS;
@@ -1250,6 +1260,13 @@ pub_glfs_fini(struct glfs *fs)
call_pool = fs->ctx->pool;
+ /* Wake up any suspended synctasks */
+ while (!list_empty(&fs->waitq)) {
+ waittask = list_entry(fs->waitq.next, struct synctask, waitq);
+ list_del_init(&waittask->waitq);
+ synctask_wake(waittask);
+ }
+
while (countdown--) {
/* give some time for background frames to finish */
pthread_mutex_lock(&fs->mutex);
@@ -1268,7 +1285,7 @@ pub_glfs_fini(struct glfs *fs)
}
}
pthread_mutex_unlock(&fs->mutex);
- usleep(100000);
+ gf_nanosleep(100000 * GF_US_IN_NS);
}
/* leaked frames may exist, we ignore */
@@ -1301,10 +1318,8 @@ pub_glfs_fini(struct glfs *fs)
graph = subvol->graph;
err = pthread_mutex_lock(&fs->mutex);
if (err != 0) {
- gf_msg("glfs", GF_LOG_ERROR, err, API_MSG_FSMUTEX_LOCK_FAILED,
- "pthread lock on glfs mutex, "
- "returned error: (%s)",
- strerror(err));
+ gf_smsg("glfs", GF_LOG_ERROR, err, API_MSG_FSMUTEX_LOCK_FAILED,
+ "error=%s", strerror(err), NULL);
goto fail;
}
/* check and wait for CHILD_DOWN for active subvol*/
@@ -1312,19 +1327,17 @@ pub_glfs_fini(struct glfs *fs)
while (graph->used) {
err = pthread_cond_wait(&fs->child_down_cond, &fs->mutex);
if (err != 0)
- gf_msg("glfs", GF_LOG_INFO, err,
- API_MSG_COND_WAIT_FAILED,
- "%s cond wait failed %s", subvol->name,
- strerror(err));
+ gf_smsg("glfs", GF_LOG_INFO, err,
+ API_MSG_COND_WAIT_FAILED, "name=%s",
+ subvol->name, "err=%s", strerror(err), NULL);
}
}
err = pthread_mutex_unlock(&fs->mutex);
if (err != 0) {
- gf_msg("glfs", GF_LOG_ERROR, err, API_MSG_FSMUTEX_UNLOCK_FAILED,
- "pthread unlock on glfs mutex, "
- "returned error: (%s)",
- strerror(err));
+ gf_smsg("glfs", GF_LOG_ERROR, err,
+ API_MSG_FSMUTEX_UNLOCK_FAILED, "error=%s",
+ strerror(err), NULL);
goto fail;
}
}
@@ -1404,8 +1417,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fini, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volfile, 3.6.0)
ssize_t
pub_glfs_get_volfile(struct glfs *fs, void *buf, size_t len)
{
@@ -1431,8 +1443,7 @@ invalid_fs:
return res;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volfile, 3.6.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_ipc, 3.12.0)
int
priv_glfs_ipc(struct glfs *fs, int opcode, void *xd_in, void **xd_out)
{
@@ -1460,8 +1471,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_ipc, 3.12.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_setfspid, 6.1)
int
priv_glfs_setfspid(struct glfs *fs, pid_t pid)
{
@@ -1475,107 +1485,104 @@ priv_glfs_setfspid(struct glfs *fs, pid_t pid)
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_setfspid, 6.1);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_free, 3.7.16)
void
pub_glfs_free(void *ptr)
{
GLFS_FREE(ptr);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_free, 3.7.16);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_fs, 3.7.16)
struct glfs *
pub_glfs_upcall_get_fs(struct glfs_upcall *arg)
{
return arg->fs;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_fs, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_reason, 3.7.16)
enum glfs_upcall_reason
pub_glfs_upcall_get_reason(struct glfs_upcall *arg)
{
return arg->reason;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_reason, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_event, 3.7.16)
void *
pub_glfs_upcall_get_event(struct glfs_upcall *arg)
{
return arg->event;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_event, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_object, 3.7.16)
struct glfs_object *
pub_glfs_upcall_inode_get_object(struct glfs_upcall_inode *arg)
{
return arg->object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_object, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_flags, 3.7.16)
uint64_t
pub_glfs_upcall_inode_get_flags(struct glfs_upcall_inode *arg)
{
return arg->flags;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_flags, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_stat, 3.7.16)
struct stat *
pub_glfs_upcall_inode_get_stat(struct glfs_upcall_inode *arg)
{
return &arg->buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_stat, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_expire, 3.7.16)
uint64_t
pub_glfs_upcall_inode_get_expire(struct glfs_upcall_inode *arg)
{
return arg->expire_time_attr;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_expire, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pobject, 3.7.16)
struct glfs_object *
pub_glfs_upcall_inode_get_pobject(struct glfs_upcall_inode *arg)
{
return arg->p_object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pobject, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pstat, 3.7.16)
struct stat *
pub_glfs_upcall_inode_get_pstat(struct glfs_upcall_inode *arg)
{
return &arg->p_buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pstat, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpobject, 3.7.16)
struct glfs_object *
pub_glfs_upcall_inode_get_oldpobject(struct glfs_upcall_inode *arg)
{
return arg->oldp_object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpobject, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpstat, 3.7.16)
struct stat *
pub_glfs_upcall_inode_get_oldpstat(struct glfs_upcall_inode *arg)
{
return &arg->oldp_buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpstat, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_object, 4.1.6)
struct glfs_object *
pub_glfs_upcall_lease_get_object(struct glfs_upcall_lease *arg)
{
return arg->object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_object, 4.1.6);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_lease_type, 4.1.6)
uint32_t
pub_glfs_upcall_lease_get_lease_type(struct glfs_upcall_lease *arg)
{
return arg->lease_type;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_lease_type, 4.1.6);
/* definitions of the GLFS_SYSRQ_* chars are in glfs.h */
static struct glfs_sysrq_help {
@@ -1585,6 +1592,7 @@ static struct glfs_sysrq_help {
{GLFS_SYSRQ_STATEDUMP, "(S)tatedump"},
{0, NULL}};
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_sysrq, 3.10.0)
int
pub_glfs_sysrq(struct glfs *fs, char sysrq)
{
@@ -1624,8 +1632,8 @@ pub_glfs_sysrq(struct glfs *fs, char sysrq)
gf_proc_dump_info(SIGUSR1, ctx);
break;
default:
- gf_msg("glfs", GF_LOG_ERROR, ENOTSUP, API_MSG_INVALID_ENTRY,
- "'%c' is not a valid sysrq", sysrq);
+ gf_smsg("glfs", GF_LOG_ERROR, ENOTSUP, API_MSG_INVALID_SYSRQ,
+ "sysrq=%c", sysrq, NULL);
errno = ENOTSUP;
ret = -1;
}
@@ -1633,8 +1641,7 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_sysrq, 3.10.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_register, 3.13.0)
int
pub_glfs_upcall_register(struct glfs *fs, uint32_t event_list,
glfs_upcall_cbk cbk, void *data)
@@ -1656,8 +1663,8 @@ pub_glfs_upcall_register(struct glfs *fs, uint32_t event_list,
if ((event_list != GLFS_EVENT_ANY) && (event_list & ~up_events)) {
errno = EINVAL;
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "invalid event_list (0x%08x)", event_list);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "event_list=(0x%08x)", event_list, NULL);
goto out;
}
@@ -1690,8 +1697,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_register, 3.13.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_unregister, 3.13.0)
int
pub_glfs_upcall_unregister(struct glfs *fs, uint32_t event_list)
{
@@ -1709,8 +1715,8 @@ pub_glfs_upcall_unregister(struct glfs *fs, uint32_t event_list)
if ((event_list != GLFS_EVENT_ANY) && (event_list & ~up_events)) {
errno = EINVAL;
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "invalid event_list (0x%08x)", event_list);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "event_list=(0x%08x)", event_list, NULL);
goto out;
}
@@ -1738,8 +1744,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_unregister, 3.13.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_statedump_path, 7.0)
int
pub_glfs_set_statedump_path(struct glfs *fs, const char *path)
{
@@ -1799,5 +1804,3 @@ err:
invalid_fs:
return -1;
}
-
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_statedump_path, 7.0);
diff --git a/build-aux/pkg-version b/build-aux/pkg-version
index 7c57c639a5c..17ceab70c03 100755
--- a/build-aux/pkg-version
+++ b/build-aux/pkg-version
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# To override version/release from git,
# create VERSION file containing text with version/release
diff --git a/cli/src/Makefile.am b/cli/src/Makefile.am
index 3e7511fe1b7..16063f27c7f 100644
--- a/cli/src/Makefile.am
+++ b/cli/src/Makefile.am
@@ -5,6 +5,7 @@ gluster_SOURCES = cli.c registry.c input.c cli-cmd.c cli-rl.c cli-cmd-global.c \
cli-cmd-system.c cli-cmd-misc.c cli-xml-output.c cli-quotad-client.c cli-cmd-snapshot.c
gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD) \
+ $(top_builddir)/libglusterd/src/libglusterd.la \
$(RLLIBS) $(top_builddir)/rpc/xdr/src/libgfxdr.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(XML_LIBS)
@@ -13,13 +14,14 @@ gluster_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = cli.h cli-mem-types.h cli-cmd.h cli-quotad-client.h
AM_CPPFLAGS = $(GF_CPPFLAGS) \
- -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src\
- -I$(top_srcdir)/rpc/xdr/src\
- -I$(top_builddir)/rpc/xdr/src\
+ -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_srcdir)/libglusterd/src \
+ -I$(top_builddir)/rpc/xdr/src \
-DDATADIR=\"$(localstatedir)\" \
-DCONFDIR=\"$(sysconfdir)/glusterfs\" \
-DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\"\
- -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) -DSBIN_DIR=\"$(sbindir)\"
+ -DGLFSHEAL_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\"\
+ -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE)
AM_CFLAGS = -Wall $(GF_CFLAGS) $(XML_CFLAGS)
@@ -28,5 +30,8 @@ CLEANFILES =
$(top_builddir)/libglusterfs/src/libglusterfs.la:
$(MAKE) -C $(top_builddir)/libglusterfs/src/ all
+$(top_builddir)/libglusterd/src/libglusterd.la:
+ $(MAKE) -C $(top_builddir)/libglusterd/src/ all
+
install-data-hook:
$(mkdir_p) $(DESTDIR)$(localstatedir)/run/gluster
diff --git a/cli/src/cli-cmd-global.c b/cli/src/cli-cmd-global.c
index d0729ac1f0a..2c9a5f01bb1 100644
--- a/cli/src/cli-cmd-global.c
+++ b/cli/src/cli-cmd-global.c
@@ -27,8 +27,6 @@
#include <glusterfs/syscall.h>
#include <glusterfs/common-utils.h>
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_global_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -36,6 +34,10 @@ int
cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount);
+int
+cli_cmd_ganesha_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount);
+
struct cli_cmd global_cmds[] = {
{
"global help",
@@ -48,6 +50,11 @@ struct cli_cmd global_cmds[] = {
cli_cmd_get_state_cbk,
"Get local state representation of mentioned daemon",
},
+ {
+ "nfs-ganesha {enable| disable} ",
+ cli_cmd_ganesha_cbk,
+ "Enable/disable NFS-Ganesha support",
+ },
{NULL, NULL, NULL}};
int
@@ -89,8 +96,9 @@ out:
}
int
-cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
- const char **words, int wordcount)
+cli_cmd_ganesha_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+
{
int sent = 0;
int parse_error = 0;
@@ -101,10 +109,53 @@ cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
cli_local_t *local = NULL;
char *op_errstr = NULL;
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GANESHA];
+
frame = create_frame(THIS, THIS->ctx->pool);
if (!frame)
goto out;
+ ret = cli_cmd_ganesha_parse(state, words, wordcount, &options, &op_errstr);
+ if (ret) {
+ if (op_errstr) {
+ cli_err("%s", op_errstr);
+ GF_FREE(op_errstr);
+ } else
+ cli_usage_out(word->pattern);
+ parse_error = 1;
+ goto out;
+ }
+
+ CLI_LOCAL_INIT(local, words, frame, options);
+
+ if (proc->fn) {
+ ret = proc->fn(frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ cli_cmd_sent_status_get(&sent);
+ if ((sent == 0) && (parse_error == 0))
+ cli_out("Setting global option failed");
+ }
+
+ CLI_STACK_DESTROY(frame);
+ return ret;
+}
+
+int
+cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int sent = 0;
+ int parse_error = 0;
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+ cli_local_t *local = NULL;
+ char *op_errstr = NULL;
+
ret = cli_cmd_get_state_parse(state, words, wordcount, &options,
&op_errstr);
@@ -120,6 +171,12 @@ cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_STATE];
diff --git a/cli/src/cli-cmd-misc.c b/cli/src/cli-cmd-misc.c
index 120d4ab69b5..e961d88da86 100644
--- a/cli/src/cli-cmd-misc.c
+++ b/cli/src/cli-cmd-misc.c
@@ -18,10 +18,6 @@
#include "cli-mem-types.h"
#include "protocol-common.h"
-extern struct rpc_clnt *global_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
extern struct cli_cmd volume_cmds[];
extern struct cli_cmd bitrot_cmds[];
extern struct cli_cmd quota_cmds[];
@@ -57,7 +53,7 @@ int
cli_cmd_display_help(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount)
{
- struct cli_cmd *cmd[] = {
+ static struct cli_cmd *cmd[] = {
cli_misc_cmds, cli_probe_cmds, volume_cmds, bitrot_cmds,
quota_cmds, snapshot_cmds, global_cmds, NULL};
struct cli_cmd *cmd_ind = NULL;
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index decdd10cb50..34620b4a31b 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -26,7 +26,7 @@
#define MAX_SNAP_DESCRIPTION_LEN 1024
-struct snap_config_opt_vals_ snap_confopt_vals[] = {
+static struct snap_config_opt_vals_ snap_confopt_vals[] = {
{.op_name = "snap-max-hard-limit",
.question = "Changing snapshot-max-hard-limit "
"will limit the creation of new snapshots "
@@ -567,9 +567,9 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
char *bricks = NULL;
char *ta_brick = NULL;
int32_t brick_count = 0;
- char *opwords[] = {"replica", "stripe", "transport",
- "disperse", "redundancy", "disperse-data",
- "arbiter", "thin-arbiter", NULL};
+ static char *opwords[] = {"replica", "stripe", "transport",
+ "disperse", "redundancy", "disperse-data",
+ "arbiter", "thin-arbiter", NULL};
char *w = NULL;
int op_count = 0;
@@ -675,7 +675,8 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
index += 2;
} else if (!strcmp(words[index], "thin-arbiter")) {
ret = gf_string2int(words[index + 1], &thin_arbiter_count);
- if ((ret == -1) || (thin_arbiter_count != 1)) {
+ if ((ret == -1) || (thin_arbiter_count != 1) ||
+ (replica_count != 2)) {
cli_err(
"For thin-arbiter "
"configuration, "
@@ -1194,19 +1195,19 @@ cli_cmd_quota_parse(const char **words, int wordcount, dict_t **options)
};
int64_t value = 0;
gf_quota_type type = GF_QUOTA_OPTION_TYPE_NONE;
- char *opwords[] = {"enable",
- "disable",
- "limit-usage",
- "remove",
- "list",
- "alert-time",
- "soft-timeout",
- "hard-timeout",
- "default-soft-limit",
- "limit-objects",
- "list-objects",
- "remove-objects",
- NULL};
+ static char *opwords[] = {"enable",
+ "disable",
+ "limit-usage",
+ "remove",
+ "list",
+ "alert-time",
+ "soft-timeout",
+ "hard-timeout",
+ "default-soft-limit",
+ "limit-objects",
+ "list-objects",
+ "remove-objects",
+ NULL};
char *w = NULL;
uint32_t time = 0;
double percent = 0;
@@ -1622,6 +1623,11 @@ cli_add_key_group(dict_t *dict, char *key, char *value, char **op_errstr)
}
goto out;
}
+
+ /* Treat line that start with "#" as comments */
+ if ('#' == line[0])
+ continue;
+
opt_count++;
tok_key = strtok_r(line, "=", &saveptr);
tok_val = strtok_r(NULL, "\r\n", &saveptr);
@@ -1847,7 +1853,7 @@ cli_cmd_volume_add_brick_parse(struct cli_state *state, const char **words,
int ret = -1;
int brick_count = 0, brick_index = 0;
char *bricks = NULL;
- char *opwords_cl[] = {"replica", "stripe", NULL};
+ static char *opwords_cl[] = {"replica", "stripe", NULL};
gf1_cluster_type type = GF_CLUSTER_TYPE_NONE;
int count = 1;
int arbiter_count = 0;
@@ -2007,8 +2013,9 @@ cli_cmd_volume_remove_brick_parse(struct cli_state *state, const char **words,
int32_t j = 0;
char *tmp_brick = NULL;
char *tmp_brick1 = NULL;
- char *type_opword[] = {"replica", NULL};
- char *opwords[] = {"start", "commit", "stop", "status", "force", NULL};
+ static char *type_opword[] = {"replica", NULL};
+ static char *opwords[] = {"start", "commit", "stop",
+ "status", "force", NULL};
char *w = NULL;
int32_t command = GF_OP_CMD_NONE;
long count = 0;
@@ -2579,8 +2586,6 @@ cli_cmd_log_rotate_parse(const char **words, int wordcount, dict_t **options)
if (strcmp("rotate", words[3]) == 0)
volname = (char *)words[2];
- else if (strcmp("rotate", words[2]) == 0)
- volname = (char *)words[3];
GF_ASSERT(volname);
ret = dict_set_str(dict, "volname", volname);
@@ -2856,8 +2861,8 @@ out:
}
int32_t
-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options,
- char **errstr)
+cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **errstr)
{
int32_t ret = -1;
dict_t *dict = NULL;
@@ -2867,13 +2872,16 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options,
unsigned slavei = 0;
unsigned glob = 0;
unsigned cmdi = 0;
- char *opwords[] = {"create", "status", "start", "stop", "config",
- "force", "delete", "ssh-port", "no-verify", "push-pem",
- "detail", "pause", "resume", NULL};
+ static char *opwords[] = {"create", "status", "start", "stop",
+ "config", "force", "delete", "ssh-port",
+ "no-verify", "push-pem", "detail", "pause",
+ "resume", NULL};
char *w = NULL;
char *save_ptr = NULL;
char *slave_temp = NULL;
char *token = NULL;
+ gf_answer_t answer = GF_ANSWER_NO;
+ const char *question = NULL;
GF_ASSERT(words);
GF_ASSERT(options);
@@ -3060,16 +3068,36 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options,
}
if (!ret)
ret = dict_set_int32(dict, "type", type);
- if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG)
+ if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG) {
+ if (!strcmp((char *)words[wordcount - 2], "ignore-deletes") &&
+ !strcmp((char *)words[wordcount - 1], "true")) {
+ question =
+ "There exists ~15 seconds delay for the option to take"
+ " effect from stime of the corresponding brick. Please"
+ " check the log for the time, the option is effective."
+ " Proceed";
+
+ answer = cli_cmd_get_confirmation(state, question);
+
+ if (GF_ANSWER_NO == answer) {
+ gf_log("cli", GF_LOG_INFO,
+ "Operation "
+ "cancelled, exiting");
+ *errstr = gf_strdup("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
+ }
+
ret = config_parse(words, wordcount, dict, cmdi, glob);
+ }
out:
if (slave_temp)
GF_FREE(slave_temp);
- if (ret) {
- if (dict)
- dict_unref(dict);
- } else
+ if (ret && dict)
+ dict_unref(dict);
+ else
*options = dict;
return ret;
@@ -3086,7 +3114,7 @@ cli_cmd_volume_profile_parse(const char **words, int wordcount,
gf1_cli_info_op info_op = GF_CLI_INFO_NONE;
gf_boolean_t is_peek = _gf_false;
- char *opwords[] = {"start", "stop", "info", NULL};
+ static char *opwords[] = {"start", "stop", "info", NULL};
char *w = NULL;
GF_ASSERT(words);
@@ -3187,8 +3215,9 @@ cli_cmd_volume_top_parse(const char **words, int wordcount, dict_t **options)
int count = 0;
gf_boolean_t nfs = _gf_false;
char *delimiter = NULL;
- char *opwords[] = {"open", "read", "write", "opendir", "readdir",
- "read-perf", "write-perf", "clear", NULL};
+ static char *opwords[] = {"open", "read", "write",
+ "opendir", "readdir", "read-perf",
+ "write-perf", "clear", NULL};
char *w = NULL;
GF_ASSERT(words);
@@ -3367,9 +3396,9 @@ cli_cmd_get_statusop(const char *arg)
int i = 0;
uint32_t ret = GF_CLI_STATUS_NONE;
char *w = NULL;
- char *opwords[] = {"detail", "mem", "clients", "fd", "inode",
- "callpool", "tasks", "client-list", NULL};
- struct {
+ static char *opwords[] = {"detail", "mem", "clients", "fd", "inode",
+ "callpool", "tasks", "client-list", NULL};
+ static struct {
char *opname;
uint32_t opcode;
} optable[] = {{"detail", GF_CLI_STATUS_DETAIL},
@@ -3567,9 +3596,9 @@ out:
gf_boolean_t
cli_cmd_validate_dumpoption(const char *arg, char **option)
{
- char *opwords[] = {"all", "nfs", "mem", "iobuf", "callpool",
- "priv", "fd", "inode", "history", "inodectx",
- "fdctx", "quotad", NULL};
+ static char *opwords[] = {"all", "nfs", "mem", "iobuf", "callpool",
+ "priv", "fd", "inode", "history", "inodectx",
+ "fdctx", "quotad", NULL};
char *w = NULL;
w = str_getunamb(arg, opwords);
@@ -3877,8 +3906,6 @@ heal_command_type_get(const char *command)
[GF_SHD_OP_HEAL_INDEX] = NULL,
[GF_SHD_OP_HEAL_FULL] = "full",
[GF_SHD_OP_INDEX_SUMMARY] = "info",
- [GF_SHD_OP_HEALED_FILES] = NULL,
- [GF_SHD_OP_HEAL_FAILED_FILES] = NULL,
[GF_SHD_OP_SPLIT_BRAIN_FILES] = NULL,
[GF_SHD_OP_STATISTICS] = "statistics",
[GF_SHD_OP_STATISTICS_HEAL_COUNT] = NULL,
@@ -5220,24 +5247,25 @@ cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
dict_t *dict = NULL;
gf1_cli_snapshot type = GF_SNAP_OPTION_TYPE_NONE;
char *w = NULL;
- char *opwords[] = {"create", "delete", "restore", "activate",
- "deactivate", "list", "status", "config",
- "info", "clone", NULL};
- char *invalid_snapnames[] = {"description", "force", "volume", "all", NULL};
- char *invalid_volnames[] = {"volume",
- "type",
- "subvolumes",
- "option",
- "end-volume",
- "all",
- "volume_not_in_ring",
- "description",
- "force",
- "snap-max-hard-limit",
- "snap-max-soft-limit",
- "auto-delete",
- "activate-on-create",
- NULL};
+ static char *opwords[] = {"create", "delete", "restore", "activate",
+ "deactivate", "list", "status", "config",
+ "info", "clone", NULL};
+ static char *invalid_snapnames[] = {"description", "force", "volume", "all",
+ NULL};
+ static char *invalid_volnames[] = {"volume",
+ "type",
+ "subvolumes",
+ "option",
+ "end-volume",
+ "all",
+ "volume_not_in_ring",
+ "description",
+ "force",
+ "snap-max-hard-limit",
+ "snap-max-soft-limit",
+ "auto-delete",
+ "activate-on-create",
+ NULL};
GF_ASSERT(words);
GF_ASSERT(options);
@@ -5557,16 +5585,18 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
int32_t ret = -1;
char *w = NULL;
char *volname = NULL;
- char *opwords[] = {
- "enable", "disable", "scrub-throttle", "scrub-frequency", "scrub",
- "signing-time", NULL};
- char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
- char *scrub_freq_values[] = {"hourly", "daily", "weekly", "biweekly",
- "monthly", "minute", NULL};
- char *scrub_values[] = {"pause", "resume", "status", "ondemand", NULL};
+ static char *opwords[] = {"enable", "disable", "scrub-throttle",
+ "scrub-frequency", "scrub", "signing-time",
+ "signer-threads", NULL};
+ static char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
+ static char *scrub_freq_values[] = {
+ "hourly", "daily", "weekly", "biweekly", "monthly", "minute", NULL};
+ static char *scrub_values[] = {"pause", "resume", "status", "ondemand",
+ NULL};
dict_t *dict = NULL;
gf_bitrot_type type = GF_BITROT_OPTION_TYPE_NONE;
int32_t expiry_time = 0;
+ int32_t signer_th_count = 0;
GF_ASSERT(words);
GF_ASSERT(options);
@@ -5747,6 +5777,31 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
}
goto set_type;
}
+ } else if (!strcmp(words[3], "signer-threads")) {
+ if (!words[4]) {
+ cli_err(
+ "Missing signer-thread value for bitrot "
+ "option");
+ ret = -1;
+ goto out;
+ } else {
+ type = GF_BITROT_OPTION_TYPE_SIGNER_THREADS;
+
+ signer_th_count = strtol(words[4], NULL, 0);
+ if (signer_th_count < 1) {
+ cli_err("signer-thread count should not be less than 1");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_uint32(dict, "signer-threads",
+ (unsigned int)signer_th_count);
+ if (ret) {
+ cli_out("Failed to set dict for bitrot");
+ goto out;
+ }
+ goto set_type;
+ }
} else {
cli_err(
"Invalid option %s for bitrot. Please enter valid "
@@ -5755,7 +5810,6 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
ret = -1;
goto out;
}
-
set_type:
ret = dict_set_int32(dict, "type", type);
if (ret < 0)
@@ -5772,3 +5826,121 @@ out:
return ret;
}
+
+/* Parsing global option for NFS-Ganesha config
+ * gluster nfs-ganesha enable/disable */
+
+int32_t
+cli_cmd_ganesha_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **op_errstr)
+{
+ dict_t *dict = NULL;
+ int ret = -1;
+ char *key = NULL;
+ char *value = NULL;
+ char *w = NULL;
+ static char *opwords[] = {"enable", "disable", NULL};
+ const char *question = NULL;
+ gf_answer_t answer = GF_ANSWER_NO;
+
+ GF_ASSERT(words);
+ GF_ASSERT(options);
+
+ dict = dict_new();
+
+ if (!dict)
+ goto out;
+
+ if (wordcount != 2)
+ goto out;
+
+ key = (char *)words[0];
+ value = (char *)words[1];
+
+ if (!key || !value) {
+ cli_out("Usage : nfs-ganesha <enable/disable>");
+ ret = -1;
+ goto out;
+ }
+
+ ret = gf_strip_whitespace(value, strlen(value));
+ if (ret == -1)
+ goto out;
+
+ if (strcmp(key, "nfs-ganesha")) {
+ gf_asprintf(op_errstr,
+ "Global option: error: ' %s '"
+ "is not a valid global option.",
+ key);
+ ret = -1;
+ goto out;
+ }
+
+ w = str_getunamb(value, opwords);
+ if (!w) {
+ cli_out(
+ "Invalid global option \n"
+ "Usage : nfs-ganesha <enable/disable>");
+ ret = -1;
+ goto out;
+ }
+
+ if (strcmp(value, "enable") == 0) {
+ question =
+ "Enabling NFS-Ganesha requires Gluster-NFS to be "
+ "disabled across the trusted pool. Do you "
+ "still want to continue?\n";
+ } else if (strcmp(value, "disable") == 0) {
+ question =
+ "Disabling NFS-Ganesha will tear down the entire "
+ "ganesha cluster across the trusted pool. Do you "
+ "still want to continue?\n";
+ } else {
+ ret = -1;
+ goto out;
+ }
+ answer = cli_cmd_get_confirmation(state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log("cli", GF_LOG_ERROR,
+ "Global operation "
+ "cancelled, exiting");
+ ret = -1;
+ goto out;
+ }
+ cli_out("This will take a few minutes to complete. Please wait ..");
+
+ ret = dict_set_str(dict, "key", key);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set on key failed");
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "value", value);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set on value failed");
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "globalname", "All");
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "dict set on global"
+ " key failed.");
+ goto out;
+ }
+
+ ret = dict_set_int32(dict, "hold_global_locks", _gf_true);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "dict set on global key "
+ "failed.");
+ goto out;
+ }
+
+ *options = dict;
+out:
+ if (ret)
+ dict_unref(dict);
+
+ return ret;
+}
diff --git a/cli/src/cli-cmd-peer.c b/cli/src/cli-cmd-peer.c
index e42a1139b87..084998701d8 100644
--- a/cli/src/cli-cmd-peer.c
+++ b/cli/src/cli-cmd-peer.c
@@ -20,10 +20,6 @@
#include "protocol-common.h"
#include <glusterfs/events.h>
-extern struct rpc_clnt *global_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_peer_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -48,10 +44,6 @@ cli_cmd_peer_probe_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PROBE];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
dict = dict_new();
if (!dict)
goto out;
@@ -77,6 +69,12 @@ cli_cmd_peer_probe_cbk(struct cli_state *state, struct cli_cmd_word *word,
}
*/
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -127,10 +125,6 @@ cli_cmd_peer_deprobe_cbk(struct cli_state *state, struct cli_cmd_word *word,
"want to proceed?";
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEPROBE];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
dict = dict_new();
ret = dict_set_str(dict, "hostname", (char *)words[2]);
@@ -162,6 +156,12 @@ cli_cmd_peer_deprobe_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
diff --git a/cli/src/cli-cmd-snapshot.c b/cli/src/cli-cmd-snapshot.c
index 814ab82f6eb..859d6b2e40d 100644
--- a/cli/src/cli-cmd-snapshot.c
+++ b/cli/src/cli-cmd-snapshot.c
@@ -17,8 +17,6 @@
#include "cli-cmd.h"
#include "cli-mem-types.h"
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_snapshot_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -36,12 +34,6 @@ cli_cmd_snapshot_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SNAP];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (frame == NULL) {
- ret = -1;
- goto out;
- }
-
/* Parses the command entered by the user */
ret = cli_cmd_snapshot_parse(words, wordcount, &options, state);
if (ret) {
@@ -55,6 +47,12 @@ cli_cmd_snapshot_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (frame == NULL) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
diff --git a/cli/src/cli-cmd-system.c b/cli/src/cli-cmd-system.c
index cb3a9ea7484..801e8f4efed 100644
--- a/cli/src/cli-cmd-system.c
+++ b/cli/src/cli-cmd-system.c
@@ -18,10 +18,6 @@
#include "cli-mem-types.h"
#include "protocol-common.h"
-extern struct rpc_clnt *global_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_system_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -43,25 +39,26 @@ cli_cmd_getspec_cbk(struct cli_state *state, struct cli_cmd_word *word,
call_frame_t *frame = NULL;
dict_t *dict = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
+ if (wordcount != 3) {
+ cli_usage_out(word->pattern);
goto out;
+ }
dict = dict_new();
if (!dict)
goto out;
- if (wordcount != 3) {
- cli_usage_out(word->pattern);
- goto out;
- }
-
ret = dict_set_str(dict, "volid", (char *)words[2]);
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GETSPEC];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
ret = proc->fn(frame, THIS, dict);
}
@@ -74,6 +71,7 @@ out:
if (dict)
dict_unref(dict);
+ CLI_STACK_DESTROY(frame);
return ret;
}
@@ -86,25 +84,26 @@ cli_cmd_pmap_b2p_cbk(struct cli_state *state, struct cli_cmd_word *word,
call_frame_t *frame = NULL;
dict_t *dict = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
+ if (wordcount != 4) {
+ cli_usage_out(word->pattern);
goto out;
+ }
dict = dict_new();
if (!dict)
goto out;
- if (wordcount != 4) {
- cli_usage_out(word->pattern);
- goto out;
- }
-
ret = dict_set_str(dict, "brick", (char *)words[3]);
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PMAP_PORTBYBRICK];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
ret = proc->fn(frame, THIS, dict);
}
@@ -116,6 +115,8 @@ out:
if (dict)
dict_unref(dict);
+
+ CLI_STACK_DESTROY(frame);
return ret;
}
@@ -175,6 +176,7 @@ make_seq_dict(int argc, char **argv)
{
char index[] = "4294967296"; // 1<<32
int i = 0;
+ int len;
int ret = 0;
dict_t *dict = dict_new();
@@ -182,8 +184,8 @@ make_seq_dict(int argc, char **argv)
return NULL;
for (i = 0; i < argc; i++) {
- snprintf(index, sizeof(index), "%d", i);
- ret = dict_set_str(dict, index, argv[i]);
+ len = snprintf(index, sizeof(index), "%d", i);
+ ret = dict_set_strn(dict, index, len, argv[i]);
if (ret == -1)
break;
}
@@ -395,7 +397,7 @@ out:
return ret;
}
-struct cli_cmd cli_system_cmds[] = {
+static struct cli_cmd cli_system_cmds[] = {
{"system:: getspec <VOLNAME>", cli_cmd_getspec_cbk,
"fetch the volume file for the volume <VOLNAME>"},
@@ -439,6 +441,7 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
char *tmp = NULL;
int ret = -1;
int i = -1;
+ int len;
int cmd_args_count = 0;
int in_cmd_args_count = 0;
rpc_clnt_procedure_t *proc = NULL;
@@ -451,15 +454,16 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- dict = dict_new();
- if (!dict)
- goto out;
-
command = strtok_r((char *)words[2], " ", &saveptr);
if (command == NULL) {
gf_log("cli", GF_LOG_ERROR, "Failed to parse command");
goto out;
}
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
do {
tmp = strtok_r(NULL, " ", &saveptr);
if (tmp) {
@@ -487,9 +491,9 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
for (i = 1; i <= cmd_args_count; i++) {
in_cmd_args_count++;
- snprintf(cmd_arg_name, sizeof(cmd_arg_name), "cmd_arg_%d",
- in_cmd_args_count);
- ret = dict_set_str(dict, cmd_arg_name, (char *)words[2 + i]);
+ len = snprintf(cmd_arg_name, sizeof(cmd_arg_name), "cmd_arg_%d",
+ in_cmd_args_count);
+ ret = dict_set_strn(dict, cmd_arg_name, len, (char *)words[2 + i]);
if (ret) {
gf_log("", GF_LOG_ERROR, "Unable to set %s in dict", cmd_arg_name);
goto out;
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index f454b097aa7..f238851586e 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -28,10 +28,6 @@
#include <glusterfs/common-utils.h>
#include <glusterfs/events.h>
-extern struct rpc_clnt *global_rpc;
-extern struct rpc_clnt *global_quotad_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
extern rpc_clnt_prog_t cli_quotad_clnt;
static int
@@ -65,10 +61,6 @@ cli_cmd_volume_info_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if ((wordcount == 2) || (wordcount == 3 && !strcmp(words[2], "all"))) {
ctx.flags = GF_CLI_GET_NEXT_VOLUME;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_NEXT_VOLUME];
@@ -91,6 +83,10 @@ cli_cmd_volume_info_cbk(struct cli_state *state, struct cli_cmd_word *word,
if (!local)
goto out;
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
local->get_vol.flags = ctx.flags;
if (ctx.volname)
local->get_vol.volname = gf_strdup(ctx.volname);
@@ -216,10 +212,6 @@ cli_cmd_volume_create_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CREATE_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_create_parse(state, words, wordcount, &options,
&bricks);
@@ -245,6 +237,12 @@ cli_cmd_volume_create_cbk(struct cli_state *state, struct cli_cmd_word *word,
}
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -287,14 +285,6 @@ cli_cmd_volume_delete_cbk(struct cli_state *state, struct cli_cmd_word *word,
"Do you want to continue?";
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DELETE_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
- dict = dict_new();
- if (!dict)
- goto out;
-
if (wordcount != 3) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -303,6 +293,10 @@ cli_cmd_volume_delete_cbk(struct cli_state *state, struct cli_cmd_word *word,
volname = (char *)words[2];
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
ret = dict_set_str(dict, "volname", volname);
if (ret) {
gf_log(THIS->name, GF_LOG_WARNING, "dict set failed");
@@ -324,6 +318,12 @@ cli_cmd_volume_delete_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -359,30 +359,15 @@ cli_cmd_volume_start_cbk(struct cli_state *state, struct cli_cmd_word *word,
int flags = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 3 || wordcount > 4) {
cli_usage_out(word->pattern);
parse_error = 1;
goto out;
}
- dict = dict_new();
- if (!dict) {
- goto out;
- }
-
if (!words[2])
goto out;
- ret = dict_set_str(dict, "volname", (char *)words[2]);
- if (ret) {
- gf_log(THIS->name, GF_LOG_ERROR, "dict set failed");
- goto out;
- }
-
if (wordcount == 4) {
if (!strcmp("force", words[3])) {
flags |= GF_CLI_FLAG_OP_FORCE;
@@ -393,6 +378,18 @@ cli_cmd_volume_start_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
}
+
+ dict = dict_new();
+ if (!dict) {
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "volname", (char *)words[2]);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set failed");
+ goto out;
+ }
+
ret = dict_set_int32(dict, "flags", flags);
if (ret) {
gf_log(THIS->name, GF_LOG_ERROR, "dict set failed");
@@ -401,6 +398,12 @@ cli_cmd_volume_start_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_START_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -487,10 +490,6 @@ cli_cmd_volume_stop_cbk(struct cli_state *state, struct cli_cmd_word *word,
"Stopping volume will make its data inaccessible. "
"Do you want to continue?";
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 3 || wordcount > 4) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -541,6 +540,12 @@ cli_cmd_volume_stop_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STOP_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -577,20 +582,16 @@ cli_cmd_volume_rename_cbk(struct cli_state *state, struct cli_cmd_word *word,
int sent = 0;
int parse_error = 0;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
- dict = dict_new();
- if (!dict)
- goto out;
-
if (wordcount != 4) {
cli_usage_out(word->pattern);
parse_error = 1;
goto out;
}
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
ret = dict_set_str(dict, "old-volname", (char *)words[2]);
if (ret)
@@ -604,6 +605,11 @@ cli_cmd_volume_rename_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RENAME_VOLUME];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
ret = proc->fn(frame, THIS, dict);
}
@@ -642,10 +648,6 @@ cli_cmd_volume_defrag_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
#endif
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_defrag_parse(words, wordcount, &dict);
if (ret) {
@@ -655,6 +657,12 @@ cli_cmd_volume_defrag_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEFRAG_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -703,10 +711,6 @@ cli_cmd_volume_reset_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_reset_parse(words, wordcount, &options);
if (ret) {
cli_usage_out(word->pattern);
@@ -714,6 +718,12 @@ cli_cmd_volume_reset_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -818,10 +828,6 @@ cli_cmd_volume_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SET_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_set_parse(state, words, wordcount, &options,
&op_errstr);
if (ret) {
@@ -835,6 +841,12 @@ cli_cmd_volume_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1042,10 +1054,6 @@ cli_cmd_volume_add_brick_cbk(struct cli_state *state, struct cli_cmd_word *word,
"filesystem operations on the volume after the change. Do you "
"really want to continue with 'stripe' count option ? ";
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_add_brick_parse(state, words, wordcount, &options, 0);
if (ret) {
cli_usage_out(word->pattern);
@@ -1090,6 +1098,12 @@ cli_cmd_volume_add_brick_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_BRICK];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1291,12 +1305,6 @@ cli_cmd_quota_handle_list_all(const char **words, dict_t *options)
goto out;
}
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame) {
- ret = -1;
- goto out;
- }
-
volname_dup = gf_strdup(volname);
if (!volname_dup) {
ret = -1;
@@ -1328,6 +1336,12 @@ cli_cmd_quota_handle_list_all(const char **words, dict_t *options)
if (ret)
goto out;
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, xdata);
proc = &cli_quotad_clnt.proctable[GF_AGGREGATOR_GETLIMIT];
@@ -1740,10 +1754,6 @@ cli_cmd_volume_remove_brick_cbk(struct cli_state *state,
int32_t command = GF_OP_CMD_NONE;
char *question = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_remove_brick_parse(state, words, wordcount, &options,
&need_question, &brick_count,
&command);
@@ -1809,6 +1819,12 @@ cli_cmd_volume_remove_brick_cbk(struct cli_state *state,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1855,10 +1871,6 @@ cli_cmd_volume_reset_brick_cbk(struct cli_state *state,
#endif
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_BRICK];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_reset_brick_parse(words, wordcount, &options);
if (ret) {
@@ -1877,6 +1889,12 @@ cli_cmd_volume_reset_brick_cbk(struct cli_state *state,
}
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1923,10 +1941,6 @@ cli_cmd_volume_replace_brick_cbk(struct cli_state *state,
#endif
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REPLACE_BRICK];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_replace_brick_parse(words, wordcount, &options);
if (ret) {
@@ -1935,6 +1949,12 @@ cli_cmd_volume_replace_brick_cbk(struct cli_state *state,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2030,8 +2050,7 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- if (!((strcmp("rotate", words[2]) == 0) ||
- (strcmp("rotate", words[3]) == 0))) {
+ if (!(strcmp("rotate", words[3]) == 0)) {
cli_usage_out(word->pattern);
parse_error = 1;
goto out;
@@ -2039,6 +2058,10 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_ROTATE];
+ ret = cli_cmd_log_rotate_parse(words, wordcount, &options);
+ if (ret)
+ goto out;
+
frame = create_frame(THIS, THIS->ctx->pool);
if (!frame) {
gf_log(THIS->name, GF_LOG_ERROR, "failed to create frame");
@@ -2046,10 +2069,6 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- ret = cli_cmd_log_rotate_parse(words, wordcount, &options);
- if (ret)
- goto out;
-
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2151,13 +2170,7 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GSYNC_SET];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (frame == NULL) {
- ret = -1;
- goto out;
- }
-
- ret = cli_cmd_gsync_set_parse(words, wordcount, &options, &errstr);
+ ret = cli_cmd_gsync_set_parse(state, words, wordcount, &options, &errstr);
if (ret) {
if (errstr) {
cli_err("%s", errstr);
@@ -2169,6 +2182,12 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (frame == NULL) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
@@ -2540,7 +2559,7 @@ cli_launch_glfs_heal(int heal_op, dict_t *options)
runinit(&runner);
ret = dict_get_str(options, "volname", &volname);
- runner_add_args(&runner, SBIN_DIR "/glfsheal", volname, NULL);
+ runner_add_args(&runner, GLFSHEAL_PREFIX "/glfsheal", volname, NULL);
runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
switch (heal_op) {
@@ -2615,9 +2634,6 @@ cli_cmd_volume_heal_cbk(struct cli_state *state, struct cli_cmd_word *word,
int heal_op = 0;
this = THIS;
- frame = create_frame(this, this->ctx->pool);
- if (!frame)
- goto out;
if (wordcount < 3) {
cli_usage_out(word->pattern);
@@ -2644,6 +2660,12 @@ cli_cmd_volume_heal_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME];
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2678,10 +2700,6 @@ cli_cmd_volume_statedump_cbk(struct cli_state *state, struct cli_cmd_word *word,
int parse_error = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 3) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -2707,6 +2725,12 @@ cli_cmd_volume_statedump_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATEDUMP_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2734,12 +2758,11 @@ cli_cmd_volume_list_cbk(struct cli_state *state, struct cli_cmd_word *word,
rpc_clnt_procedure_t *proc = NULL;
int sent = 0;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LIST_VOLUME];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
ret = proc->fn(frame, THIS, NULL);
}
@@ -2768,10 +2791,6 @@ cli_cmd_volume_clearlocks_cbk(struct cli_state *state,
int parse_error = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 7 || wordcount > 8) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -2798,6 +2817,12 @@ cli_cmd_volume_clearlocks_cbk(struct cli_state *state,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CLRLOCKS_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2828,10 +2853,6 @@ cli_cmd_volume_barrier_cbk(struct cli_state *state, struct cli_cmd_word *word,
int parse_error = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount != 4) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -2853,6 +2874,12 @@ cli_cmd_volume_barrier_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BARRIER_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
@@ -2887,10 +2914,6 @@ cli_cmd_volume_getopt_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
options = dict_new();
if (!options)
goto out;
@@ -2905,6 +2928,12 @@ cli_cmd_volume_getopt_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOL_OPT];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
@@ -2934,6 +2963,16 @@ struct cli_cmd bitrot_cmds[] = {
{"volume bitrot <VOLNAME> {enable|disable}", NULL, /*cli_cmd_bitrot_cbk,*/
"Enable/disable bitrot for volume <VOLNAME>"},
+ {"volume bitrot <VOLNAME> signing-time <time-in-secs>",
+ NULL, /*cli_cmd_bitrot_cbk,*/
+ "Waiting time for an object after last fd is closed to start signing "
+ "process"},
+
+ {"volume bitrot <VOLNAME> signer-threads <count>",
+ NULL, /*cli_cmd_bitrot_cbk,*/
+ "Number of signing process threads. Usually set to number of available "
+ "cores"},
+
{"volume bitrot <VOLNAME> scrub-throttle {lazy|normal|aggressive}",
NULL, /*cli_cmd_bitrot_cbk,*/
"Set the speed of the scrubber for volume <VOLNAME>"},
@@ -2949,6 +2988,8 @@ struct cli_cmd bitrot_cmds[] = {
"the scrubber. ondemand starts the scrubber immediately."},
{"volume bitrot <VOLNAME> {enable|disable}\n"
+ "volume bitrot <VOLNAME> signing-time <time-in-secs>\n"
+ "volume bitrot <VOLNAME> signer-threads <count>\n"
"volume bitrot <volname> scrub-throttle {lazy|normal|aggressive}\n"
"volume bitrot <volname> scrub-frequency {hourly|daily|weekly|biweekly"
"|monthly}\n"
@@ -3043,18 +3084,14 @@ struct cli_cmd volume_cmds[] = {
{"volume set <VOLNAME> <KEY> <VALUE>", cli_cmd_volume_set_cbk,
"set options for volume <VOLNAME>"},
- {"volume set <VOLNAME> group <GROUP>", cli_cmd_volume_set_cbk,
- "This option can be used for setting multiple pre-defined volume options"
- "where group_name is a file under /var/lib/glusterd/groups containing one"
- "key, value pair per line"},
+ {"volume set <VOLNAME> group <GROUP>", cli_cmd_volume_set_cbk,
+ "This option can be used for setting multiple pre-defined volume options "
+ "where group_name is a file under /var/lib/glusterd/groups containing one "
+ "key value pair per line"},
{"volume log <VOLNAME> rotate [BRICK]", cli_cmd_log_rotate_cbk,
"rotate the log file for corresponding volume/brick"},
- {"volume log rotate <VOLNAME> [BRICK]", cli_cmd_log_rotate_cbk,
- "rotate the log file for corresponding volume/brick"
- " NOTE: This is an old syntax, will be deprecated from next release."},
-
{"volume sync <HOSTNAME> [all|<VOLNAME>]", cli_cmd_sync_volume_cbk,
"sync the volume information from a peer"},
@@ -3077,8 +3114,8 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_profile_cbk, "volume profile operations"},
{"volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick "
- "<brick>] [list-cnt <value>] |\n"
- "volume top <VOLNAME> {read-perf|write-perf} [bs <size> count <count>] "
+ "<brick>] [list-cnt <value>] | "
+ "{read-perf|write-perf} [bs <size> count <count>] "
"[brick <brick>] [list-cnt <value>]",
cli_cmd_volume_top_cbk, "volume top operations"},
diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c
index 2ee8b1b4968..2d458b16a56 100644
--- a/cli/src/cli-cmd.c
+++ b/cli/src/cli-cmd.c
@@ -28,11 +28,7 @@ static pthread_cond_t conn = PTHREAD_COND_INITIALIZER;
static pthread_mutex_t conn_mutex = PTHREAD_MUTEX_INITIALIZER;
int cli_op_ret = 0;
-int connected = 0;
-
-int
-cli_cmd_log_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
- const char **words, int wordcount);
+static gf_boolean_t connected = _gf_false;
static unsigned
cli_cmd_needs_connection(struct cli_cmd_word *word)
@@ -236,18 +232,6 @@ out:
}
int
-cli_cmd_cond_init()
-{
- pthread_mutex_init(&cond_mutex, NULL);
- pthread_cond_init(&cond, NULL);
-
- pthread_mutex_init(&conn_mutex, NULL);
- pthread_cond_init(&conn, NULL);
-
- return 0;
-}
-
-int
cli_cmd_lock()
{
pthread_mutex_lock(&cond_mutex);
@@ -344,19 +328,32 @@ cli_cmd_await_connected(unsigned conn_timo)
}
int32_t
-cli_cmd_broadcast_connected()
+cli_cmd_broadcast_connected(gf_boolean_t status)
{
pthread_mutex_lock(&conn_mutex);
{
- connected = 1;
+ connected = status;
pthread_cond_broadcast(&conn);
}
-
pthread_mutex_unlock(&conn_mutex);
return 0;
}
+gf_boolean_t
+cli_cmd_connected(void)
+{
+ gf_boolean_t status;
+
+ pthread_mutex_lock(&conn_mutex);
+ {
+ status = connected;
+ }
+ pthread_mutex_unlock(&conn_mutex);
+
+ return status;
+}
+
int
cli_cmd_submit(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog, int procnum, struct iobref *iobref,
@@ -366,7 +363,8 @@ cli_cmd_submit(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
unsigned timeout = 0;
if ((GLUSTER_CLI_PROFILE_VOLUME == procnum) ||
- (GLUSTER_CLI_HEAL_VOLUME == procnum))
+ (GLUSTER_CLI_HEAL_VOLUME == procnum) ||
+ (GLUSTER_CLI_GANESHA == procnum))
timeout = cli_ten_minutes_timeout;
else
timeout = cli_default_conn_timeout;
diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h
index b6bc5659936..c1c068c7085 100644
--- a/cli/src/cli-cmd.h
+++ b/cli/src/cli-cmd.h
@@ -102,9 +102,6 @@ int
cli_cmd_broadcast_response(int32_t status);
int
-cli_cmd_cond_init();
-
-int
cli_cmd_lock();
int
diff --git a/cli/src/cli-quotad-client.c b/cli/src/cli-quotad-client.c
index 52ab97ee815..772b8f75bd9 100644
--- a/cli/src/cli-quotad-client.c
+++ b/cli/src/cli-quotad-client.c
@@ -10,9 +10,6 @@
#include "cli-quotad-client.h"
-extern struct rpc_clnt global_quotad_rpc;
-extern struct rpc_clnt_program cli_quotad_clnt;
-
int
cli_quotad_submit_request(void *req, call_frame_t *frame, rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref, xlator_t *this,
@@ -60,7 +57,7 @@ cli_quotad_submit_request(void *req, call_frame_t *frame, rpc_clnt_prog_t *prog,
}
/* Send the msg */
- ret = rpc_clnt_submit(&global_quotad_rpc, prog, procnum, cbkfn, &iov, count,
+ ret = rpc_clnt_submit(global_quotad_rpc, prog, procnum, cbkfn, &iov, count,
NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
ret = 0;
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 35985ab44c6..9b6b0c7fa50 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -19,6 +19,12 @@
#define INDENT_MAIN_HEAD "%-25s %s "
+#define RETURNING "Returning %d"
+#define XML_ERROR "Error outputting to xml"
+#define XDR_DECODE_FAIL "Failed to decode xdr response"
+#define DICT_SERIALIZE_FAIL "Failed to serialize to data to dictionary"
+#define DICT_UNSERIALIZE_FAIL "Failed to unserialize the dictionary"
+
/* Do not show estimates if greater than this number */
#define REBAL_ESTIMATE_SEC_UPPER_LIMIT (60 * 24 * 3600)
#define REBAL_ESTIMATE_START_TIME 600
@@ -29,32 +35,21 @@
#include <sys/uio.h>
#include <stdlib.h>
#include <sys/mount.h>
-#include "cli1-xdr.h"
-#include "xdr-generic.h"
-#include "protocol-common.h"
-#include "cli-mem-types.h"
#include <glusterfs/compat.h>
-#include <glusterfs/upcall-utils.h>
-
+#include "cli-mem-types.h"
#include <glusterfs/syscall.h>
#include "glusterfs3.h"
#include "portmap-xdr.h"
#include <glusterfs/byte-order.h>
-#include "cli-quotad-client.h"
#include <glusterfs/run.h>
-#include <glusterfs/quota-common-utils.h>
#include <glusterfs/events.h>
enum gf_task_types { GF_TASK_TYPE_REBALANCE, GF_TASK_TYPE_REMOVE_BRICK };
-extern struct rpc_clnt *global_quotad_rpc;
-extern rpc_clnt_prog_t cli_quotad_clnt;
-extern rpc_clnt_prog_t *cli_rpc_prog;
-extern int cli_op_ret;
-extern int connected;
+rpc_clnt_prog_t cli_quotad_clnt;
-int32_t
+static int32_t
gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data);
char *cli_vol_status_str[] = {
@@ -74,27 +69,27 @@ char *cli_vol_task_status_str[] = {"not started",
"fix-layout failed",
"unknown"};
-int32_t
+static int32_t
gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data);
-int32_t
+static int32_t
gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data);
-int
+static int
cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this,
rpc_clnt_prog_t *prog, struct iobref *iobref);
-int
+static int
add_cli_cmd_timeout_to_dict(dict_t *dict);
-rpc_clnt_prog_t cli_handshake_prog = {
+static rpc_clnt_prog_t cli_handshake_prog = {
.progname = "cli handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
};
-rpc_clnt_prog_t cli_pmap_prog = {
+static rpc_clnt_prog_t cli_pmap_prog = {
.progname = "cli portmap",
.prognum = GLUSTER_PMAP_PROGRAM,
.progver = GLUSTER_PMAP_VERSION,
@@ -133,7 +128,7 @@ gf_free_xdr_fsm_log_rsp(gf1_cli_fsm_log_rsp rsp)
}
}
-int
+static int
gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -141,9 +136,7 @@ gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
0,
};
int ret = -1;
- char msg[1024] = {
- 0,
- };
+ char msg[1024] = "success";
GF_ASSERT(myframe);
@@ -154,7 +147,7 @@ gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
// rsp.op_ret = -1;
// rsp.op_errno = EINVAL;
goto out;
@@ -162,22 +155,23 @@ gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_log("cli", GF_LOG_INFO, "Received resp to probe");
- if (rsp.op_errstr && (strlen(rsp.op_errstr) > 0)) {
+ if (rsp.op_errstr && rsp.op_errstr[0] != '\0') {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
- if (rsp.op_ret)
+ if (rsp.op_ret) {
gf_log("cli", GF_LOG_ERROR, "%s", msg);
+ }
}
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret,
rsp.op_errno, (rsp.op_ret) ? msg : NULL);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
if (!rsp.op_ret)
- cli_out("peer probe: success. %s", msg);
+ cli_out("peer probe: %s", msg);
else
cli_err("peer probe: failed: %s", msg);
@@ -189,7 +183,7 @@ out:
return ret;
}
-int
+static int
gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -197,9 +191,7 @@ gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
0,
};
int ret = -1;
- char msg[1024] = {
- 0,
- };
+ char msg[1024] = "success";
GF_ASSERT(myframe);
@@ -210,7 +202,7 @@ gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
// rsp.op_ret = -1;
// rsp.op_errno = EINVAL;
goto out;
@@ -219,19 +211,17 @@ gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_log("cli", GF_LOG_INFO, "Received resp to deprobe");
if (rsp.op_ret) {
- if (strlen(rsp.op_errstr) > 0) {
+ if (rsp.op_errstr[0] != '\0') {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
gf_log("cli", GF_LOG_ERROR, "%s", rsp.op_errstr);
}
- } else {
- snprintf(msg, sizeof(msg), "success");
}
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret,
rsp.op_errno, (rsp.op_ret) ? msg : NULL);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -248,11 +238,11 @@ out:
return ret;
}
-int
-gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix)
+static int
+gf_cli_output_peer_hostnames(dict_t *dict, int count, const char *prefix)
{
int ret = -1;
- char key[256] = {
+ char key[512] = {
0,
};
int i = 0;
@@ -263,8 +253,8 @@ gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix)
* as friend.hostname
*/
for (i = 1; i < count; i++) {
- snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
- ret = dict_get_str(dict, key, &hostname);
+ ret = snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
+ ret = dict_get_strn(dict, key, ret, &hostname);
if (ret)
break;
cli_out("%s", hostname);
@@ -274,7 +264,7 @@ gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix)
return ret;
}
-int
+static int
gf_cli_output_peer_status(dict_t *dict, int count)
{
int ret = -1;
@@ -284,26 +274,27 @@ gf_cli_output_peer_status(dict_t *dict, int count)
char key[256] = {
0,
};
+ int keylen;
char *state = NULL;
int32_t connected = 0;
- char *connected_str = NULL;
+ const char *connected_str = NULL;
int hostname_count = 0;
cli_out("Number of Peers: %d", count);
i = 1;
while (i <= count) {
- snprintf(key, 256, "friend%d.uuid", i);
- ret = dict_get_str(dict, key, &uuid_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
+ ret = dict_get_strn(dict, key, keylen, &uuid_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.hostname", i);
- ret = dict_get_str(dict, key, &hostname_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", i);
+ ret = dict_get_strn(dict, key, keylen, &hostname_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.connected", i);
- ret = dict_get_int32(dict, key, &connected);
+ keylen = snprintf(key, sizeof(key), "friend%d.connected", i);
+ ret = dict_get_int32n(dict, key, keylen, &connected);
if (ret)
goto out;
if (connected)
@@ -311,16 +302,16 @@ gf_cli_output_peer_status(dict_t *dict, int count)
else
connected_str = "Disconnected";
- snprintf(key, 256, "friend%d.state", i);
- ret = dict_get_str(dict, key, &state);
+ keylen = snprintf(key, sizeof(key), "friend%d.state", i);
+ ret = dict_get_strn(dict, key, keylen, &state);
if (ret)
goto out;
cli_out("\nHostname: %s\nUuid: %s\nState: %s (%s)", hostname_buf,
uuid_buf, state, connected_str);
- snprintf(key, sizeof(key), "friend%d.hostname_count", i);
- ret = dict_get_int32(dict, key, &hostname_count);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &hostname_count);
/* Print other addresses only if there are more than 1.
*/
if ((ret == 0) && (hostname_count > 1)) {
@@ -340,7 +331,7 @@ out:
return ret;
}
-int
+static int
gf_cli_output_pool_list(dict_t *dict, int count)
{
int ret = -1;
@@ -348,18 +339,19 @@ gf_cli_output_pool_list(dict_t *dict, int count)
char *hostname_buf = NULL;
int32_t hostname_len = 8; /*min len 8 chars*/
int32_t i = 1;
- char key[256] = {
+ char key[64] = {
0,
};
+ int keylen;
int32_t connected = 0;
- char *connected_str = NULL;
+ const char *connected_str = NULL;
if (count <= 0)
goto out;
while (i <= count) {
- snprintf(key, 256, "friend%d.hostname", i);
- ret = dict_get_str(dict, key, &hostname_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", i);
+ ret = dict_get_strn(dict, key, keylen, &hostname_buf);
if (ret)
goto out;
@@ -374,18 +366,18 @@ gf_cli_output_pool_list(dict_t *dict, int count)
i = 1;
while (i <= count) {
- snprintf(key, 256, "friend%d.uuid", i);
- ret = dict_get_str(dict, key, &uuid_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
+ ret = dict_get_strn(dict, key, keylen, &uuid_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.hostname", i);
- ret = dict_get_str(dict, key, &hostname_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", i);
+ ret = dict_get_strn(dict, key, keylen, &hostname_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.connected", i);
- ret = dict_get_int32(dict, key, &connected);
+ keylen = snprintf(key, sizeof(key), "friend%d.connected", i);
+ ret = dict_get_int32n(dict, key, keylen, &connected);
if (ret)
goto out;
if (connected)
@@ -406,7 +398,7 @@ out:
/* function pointer for gf_cli_output_{pool_list,peer_status} */
typedef int (*cli_friend_output_fn)(dict_t *, int);
-int
+static int
gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -418,11 +410,15 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
char msg[1024] = {
0,
};
- char *cmd = NULL;
+ const char *cmd = NULL;
cli_friend_output_fn friend_output_fn;
call_frame_t *frame = NULL;
unsigned long flags = 0;
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
GF_ASSERT(myframe);
frame = myframe;
@@ -440,14 +436,9 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
/* 'free' the flags set by gf_cli_list_friends */
frame->local = NULL;
- if (-1 == req->rpc_status) {
- goto out;
- }
-
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
// rsp.op_ret = -1;
// rsp.op_errno = EINVAL;
goto out;
@@ -462,7 +453,7 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno,
msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
cli_err("%s", msg);
@@ -481,7 +472,7 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
&dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -489,11 +480,11 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno,
msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret) {
goto out;
}
@@ -507,7 +498,7 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno,
NULL);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
} else {
ret = -1;
}
@@ -531,7 +522,7 @@ out:
return ret;
}
-int
+static int
gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -551,7 +542,7 @@ gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -574,11 +565,11 @@ gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count,
"Failed to get daemon state. Check glusterd"
" log file for more details");
} else {
- ret = dict_get_str(dict, "daemon", &daemon_name);
+ ret = dict_get_str_sizen(dict, "daemon", &daemon_name);
if (ret)
gf_log("cli", GF_LOG_ERROR, "Couldn't get daemon name");
- ret = dict_get_str(dict, "ofilepath", &ofilepath);
+ ret = dict_get_str_sizen(dict, "ofilepath", &ofilepath);
if (ret)
gf_log("cli", GF_LOG_ERROR, "Couldn't get filepath");
@@ -598,7 +589,7 @@ out:
return ret;
}
-void
+static void
cli_out_options(char *substr, char *optstr, char *valstr)
{
char *ptr1 = NULL;
@@ -649,22 +640,24 @@ static int
print_brick_details(dict_t *dict, int volcount, int start_index, int end_index,
int replica_count)
{
- char key[1024] = {
+ char key[64] = {
0,
};
+ int keylen;
int index = start_index;
int isArbiter = 0;
int ret = -1;
char *brick = NULL;
while (index <= end_index) {
- snprintf(key, 1024, "volume%d.brick%d", volcount, index);
- ret = dict_get_str(dict, key, &brick);
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d", volcount,
+ index);
+ ret = dict_get_strn(dict, key, keylen, &brick);
if (ret)
goto out;
- snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", volcount,
- index);
- if (dict_get(dict, key))
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
+ volcount, index);
+ if (dict_getn(dict, key, keylen))
isArbiter = 1;
else
isArbiter = 0;
@@ -679,7 +672,8 @@ print_brick_details(dict_t *dict, int volcount, int start_index, int end_index,
out:
return ret;
}
-void
+
+static void
gf_cli_print_number_of_bricks(int type, int brick_count, int dist_count,
int stripe_count, int replica_count,
int disperse_count, int redundancy_count,
@@ -705,7 +699,7 @@ gf_cli_print_number_of_bricks(int type, int brick_count, int dist_count,
}
}
-int
+static int
gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -731,7 +725,8 @@ gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
char *ta_brick = NULL;
dict_t *dict = NULL;
cli_local_t *local = NULL;
- char key[1024] = {0};
+ char key[64] = {0};
+ int keylen;
char err_str[2048] = {0};
gf_cli_rsp rsp = {0};
char *caps __attribute__((unused)) = NULL;
@@ -751,8 +746,7 @@ gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -776,11 +770,11 @@ gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret)
goto out;
@@ -817,7 +811,7 @@ xml_output:
ret = cli_xml_output_vol_info_begin(local, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -825,7 +819,7 @@ xml_output:
if (dict) {
ret = cli_xml_output_vol_info(local, dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -833,80 +827,80 @@ xml_output:
if (local->get_vol.flags == GF_CLI_GET_VOLUME) {
ret = cli_xml_output_vol_info_end(local);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
}
while (i < count) {
cli_out(" ");
- snprintf(key, 256, "volume%d.name", i);
- ret = dict_get_str(dict, key, &volname);
+ keylen = snprintf(key, sizeof(key), "volume%d.name", i);
+ ret = dict_get_strn(dict, key, keylen, &volname);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.type", i);
- ret = dict_get_int32(dict, key, &type);
+ keylen = snprintf(key, sizeof(key), "volume%d.type", i);
+ ret = dict_get_int32n(dict, key, keylen, &type);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.status", i);
- ret = dict_get_int32(dict, key, &status);
+ keylen = snprintf(key, sizeof(key), "volume%d.status", i);
+ ret = dict_get_int32n(dict, key, keylen, &status);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.brick_count", i);
- ret = dict_get_int32(dict, key, &brick_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.brick_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &brick_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.dist_count", i);
- ret = dict_get_int32(dict, key, &dist_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.dist_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &dist_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.stripe_count", i);
- ret = dict_get_int32(dict, key, &stripe_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &stripe_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.replica_count", i);
- ret = dict_get_int32(dict, key, &replica_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.replica_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &replica_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.disperse_count", i);
- ret = dict_get_int32(dict, key, &disperse_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &disperse_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.redundancy_count", i);
- ret = dict_get_int32(dict, key, &redundancy_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &redundancy_count);
if (ret)
goto out;
- snprintf(key, sizeof(key), "volume%d.arbiter_count", i);
- ret = dict_get_int32(dict, key, &arbiter_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &arbiter_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.transport", i);
- ret = dict_get_int32(dict, key, &transport);
+ keylen = snprintf(key, sizeof(key), "volume%d.transport", i);
+ ret = dict_get_int32n(dict, key, keylen, &transport);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.volume_id", i);
- ret = dict_get_str(dict, key, &volume_id_str);
+ keylen = snprintf(key, sizeof(key), "volume%d.volume_id", i);
+ ret = dict_get_strn(dict, key, keylen, &volume_id_str);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.snap_count", i);
- ret = dict_get_int32(dict, key, &snap_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.snap_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &snap_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.thin_arbiter_count", i);
- ret = dict_get_int32(dict, key, &thin_arbiter_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &thin_arbiter_count);
if (ret)
goto out;
@@ -937,14 +931,14 @@ xml_output:
goto out;
if (thin_arbiter_count) {
- snprintf(key, 1024, "volume%d.thin_arbiter_brick", i);
+ snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick", i);
ret = dict_get_str(dict, key, &ta_brick);
if (ret)
goto out;
cli_out("Thin-arbiter-path: %s", ta_brick);
}
- snprintf(key, 256, "volume%d.opt_count", i);
+ snprintf(key, sizeof(key), "volume%d.opt_count", i);
ret = dict_get_int32(dict, key, &opt_count);
if (ret)
goto out;
@@ -954,7 +948,7 @@ xml_output:
cli_out("Options Reconfigured:");
- snprintf(key, 256, "volume%d.option.", i);
+ snprintf(key, sizeof(key), "volume%d.option.", i);
ret = dict_foreach(dict, _gf_cli_output_volinfo_opts, key);
if (ret)
@@ -975,11 +969,11 @@ out:
gf_free_xdr_cli_rsp(rsp);
- gf_log("cli", GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1006,24 +1000,19 @@ gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_INFO, "Received resp to create volume");
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret)
- goto out;
-
if (global_state->mode & GLUSTER_MODE_XML) {
if (rsp.op_ret == 0) {
rsp_dict = dict_new();
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1031,10 +1020,14 @@ gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_create(rsp_dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret)
+ goto out;
+
if (rsp.op_ret && strcmp(rsp.op_errstr, ""))
cli_err("volume create: %s: failed: %s", volname, rsp.op_errstr);
else if (rsp.op_ret)
@@ -1056,7 +1049,7 @@ out:
return ret;
}
-int
+static int
gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1069,12 +1062,11 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
call_frame_t *frame = NULL;
dict_t *rsp_dict = NULL;
- GF_ASSERT(myframe);
-
if (-1 == req->rpc_status) {
goto out;
}
+ GF_ASSERT(myframe);
frame = myframe;
GF_ASSERT(frame->local);
@@ -1083,14 +1075,7 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
- goto out;
- }
-
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret) {
- gf_log(frame->this->name, GF_LOG_ERROR, "dict get failed");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1102,7 +1087,7 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1110,7 +1095,13 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_generic_volume("volDelete", rsp_dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "dict get failed");
goto out;
}
@@ -1129,11 +1120,11 @@ out:
if (rsp_dict)
dict_unref(rsp_dict);
- gf_log("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1159,8 +1150,7 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1176,17 +1166,7 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to unserialize "
- "response for uuid get");
- goto out;
- }
-
- ret = dict_get_str(dict, "uuid", &uuid_str);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to get uuid "
- "from dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -1194,7 +1174,7 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_dict("uuidGenerate", dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1205,6 +1185,11 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
cli_err("%s", rsp.op_errstr);
} else {
+ ret = dict_get_str_sizen(dict, "uuid", &uuid_str);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR, "Failed to get uuid from dictionary");
+ goto out;
+ }
cli_out("UUID: %s", uuid_str);
}
ret = rsp.op_ret;
@@ -1217,11 +1202,11 @@ out:
if (dict)
dict_unref(dict);
- gf_log("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1246,8 +1231,7 @@ gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1259,7 +1243,7 @@ gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_dict("uuidReset", NULL, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1275,11 +1259,11 @@ out:
cli_local_wipe(local);
gf_free_xdr_cli_rsp(rsp);
- gf_log("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1306,14 +1290,7 @@ gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
- goto out;
- }
-
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR, "dict get failed");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1325,7 +1302,7 @@ gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1333,7 +1310,13 @@ gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_generic_volume("volStart", rsp_dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR, "dict get failed");
goto out;
}
@@ -1355,7 +1338,7 @@ out:
return ret;
}
-int
+static int
gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1382,15 +1365,7 @@ gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
- goto out;
- }
-
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Unable to get volname from dict");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1402,7 +1377,7 @@ gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1410,7 +1385,14 @@ gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_generic_volume("volStop", rsp_dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "Unable to get volname from dict");
goto out;
}
@@ -1433,15 +1415,16 @@ out:
return ret;
}
-int
+static int
gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
{
int ret = -1;
int count = 0;
int i = 1;
- char key[256] = {
+ char key[64] = {
0,
};
+ int keylen;
gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED;
uint64_t files = 0;
uint64_t size = 0;
@@ -1461,19 +1444,26 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
uint64_t time_left = 0;
gf_boolean_t show_estimates = _gf_false;
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "count not set");
goto out;
}
- snprintf(key, sizeof(key), "status-1");
-
- ret = dict_get_int32(dict, key, (int32_t *)&status_rcd);
- if (ret) {
- gf_log("cli", GF_LOG_TRACE, "count %d %d", count, 1);
- gf_log("cli", GF_LOG_TRACE, "failed to get status");
- goto out;
+ for (i = 1; i <= count; i++) {
+ keylen = snprintf(key, sizeof(key), "status-%d", i);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status_rcd);
+ /* If information from a node is missing we should skip
+ * the node and try to fetch information of other nodes.
+ * If information is not found for all nodes, we should
+ * error out.
+ */
+ if (!ret)
+ break;
+ if (ret && i == count) {
+ gf_log("cli", GF_LOG_TRACE, "failed to get status");
+ goto out;
+ }
}
/* Fix layout will be sent to all nodes for the volume
@@ -1512,15 +1502,13 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
time_left = 0;
/* Check if status is NOT_STARTED, and continue early */
- snprintf(key, sizeof(key), "status-%d", i);
+ keylen = snprintf(key, sizeof(key), "status-%d", i);
- ret = dict_get_int32(dict, key, (int32_t *)&status_rcd);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status_rcd);
if (ret == -ENOENT) {
gf_log("cli", GF_LOG_TRACE, "count %d %d", count, i);
gf_log("cli", GF_LOG_TRACE, "failed to get status");
- gf_log("cli", GF_LOG_ERROR,
- "node down and has failed"
- " to set dict");
+ gf_log("cli", GF_LOG_ERROR, "node down and has failed to set dict");
continue;
/* skip this node if value not available*/
} else if (ret) {
@@ -1536,8 +1524,8 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
if (GF_DEFRAG_STATUS_STARTED == status_rcd)
show_estimates = _gf_true;
- snprintf(key, 256, "node-name-%d", i);
- ret = dict_get_str(dict, key, &node_name);
+ keylen = snprintf(key, sizeof(key), "node-name-%d", i);
+ ret = dict_get_strn(dict, key, keylen, &node_name);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get node-name");
@@ -1661,7 +1649,7 @@ out:
return ret;
}
-int
+static int
gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1693,18 +1681,18 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_str(local->dict, "volname", &volname);
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "Failed to get volname");
goto out;
}
- ret = dict_get_int32(local->dict, "rebalance-command", (int32_t *)&cmd);
+ ret = dict_get_int32_sizen(local->dict, "rebalance-command",
+ (int32_t *)&cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get command");
goto out;
@@ -1716,16 +1704,14 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- gf_log("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log("glusterd", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS)) &&
!(global_state->mode & GLUSTER_MODE_XML)) {
- ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
+ ret = dict_get_str_sizen(dict, GF_REBALANCE_TID_KEY, &task_id_str);
if (ret) {
gf_log("cli", GF_LOG_WARNING, "failed to get %s from dict",
GF_REBALANCE_TID_KEY);
@@ -1783,8 +1769,7 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
else
snprintf(msg, sizeof(msg),
- "Failed to get the status of "
- "rebalance process");
+ "Failed to get the status of rebalance process");
goto done;
} else {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
@@ -1823,7 +1808,7 @@ out:
return ret;
}
-int
+static int
gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1844,7 +1829,7 @@ gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -1856,7 +1841,7 @@ gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volRename", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1873,7 +1858,7 @@ out:
return ret;
}
-int
+static int
gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1894,7 +1879,7 @@ gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -1910,7 +1895,7 @@ gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volReset", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1927,7 +1912,63 @@ out:
return ret;
}
-char *
+static int
+gf_cli_ganesha_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ int ret = -1;
+ dict_t *dict = NULL;
+
+ GF_ASSERT(myframe);
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
+ XDR_DECODE_FAIL);
+ goto out;
+ }
+
+ gf_log("cli", GF_LOG_DEBUG, "Received resp to ganesha");
+
+ dict = dict_new();
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+ if (ret)
+ goto out;
+
+ if (rsp.op_ret) {
+ if (strcmp(rsp.op_errstr, ""))
+ cli_err("nfs-ganesha: failed: %s", rsp.op_errstr);
+ else
+ cli_err("nfs-ganesha: failed");
+ }
+
+ else {
+ cli_out("nfs-ganesha : success ");
+ }
+
+ ret = rsp.op_ret;
+
+out:
+ if (dict)
+ dict_unref(dict);
+ cli_cmd_broadcast_response(ret);
+ return ret;
+}
+
+static char *
is_server_debug_xlator(void *myframe)
{
call_frame_t *frame = NULL;
@@ -1972,7 +2013,7 @@ is_server_debug_xlator(void *myframe)
return debug_xlator;
}
-int
+static int
gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1999,7 +2040,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2014,8 +2055,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to unserialize volume set respone dict");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -2026,7 +2066,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
*/
debug_xlator = is_server_debug_xlator(myframe);
- if (dict_get_str(dict, "help-str", &help_str) && !msg[0])
+ if (dict_get_str_sizen(dict, "help-str", &help_str) && !msg[0])
snprintf(msg, sizeof(msg), "Set volume %s",
(rsp.op_ret) ? "unsuccessful" : "successful");
if (rsp.op_ret == 0 && debug_xlator) {
@@ -2041,7 +2081,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volSet", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2093,7 +2133,7 @@ gf_cli_add_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2109,7 +2149,7 @@ gf_cli_add_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volAddBrick", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2125,7 +2165,7 @@ out:
return ret;
}
-int
+static int
gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
@@ -2141,7 +2181,7 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
gf1_op_commands cmd = GF_OP_CMD_NONE;
cli_local_t *local = NULL;
call_frame_t *frame = NULL;
- char *cmd_str = "unknown";
+ const char *cmd_str;
GF_ASSERT(myframe);
@@ -2157,12 +2197,11 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_int32(local->dict, "command", &command);
+ ret = dict_get_int32_sizen(local->dict, "command", &command);
if (ret)
goto out;
@@ -2176,20 +2215,17 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
cmd_str = "status";
break;
default:
+ cmd_str = "unknown";
break;
}
ret = rsp.op_ret;
if (rsp.op_ret == -1) {
if (strcmp(rsp.op_errstr, ""))
- snprintf(msg, sizeof(msg),
- "volume remove-brick %s: "
- "failed: %s",
+ snprintf(msg, sizeof(msg), "volume remove-brick %s: failed: %s",
cmd_str, rsp.op_errstr);
else
- snprintf(msg, sizeof(msg),
- "volume remove-brick %s: "
- "failed",
+ snprintf(msg, sizeof(msg), "volume remove-brick %s: failed",
cmd_str);
if (global_state->mode & GLUSTER_MODE_XML)
@@ -2205,10 +2241,7 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- strncpy(msg,
- "failed to unserialize req-buffer to "
- "dictionary",
- sizeof(msg));
+ strncpy(msg, DICT_UNSERIALIZE_FAIL, sizeof(msg));
if (global_state->mode & GLUSTER_MODE_XML) {
rsp.op_ret = -1;
@@ -2237,8 +2270,7 @@ xml_output:
ret = gf_cli_print_rebalance_status(dict, GF_TASK_TYPE_REMOVE_BRICK);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to print remove-brick "
- "rebalance status");
+ "Failed to print remove-brick rebalance status");
goto out;
}
@@ -2260,7 +2292,7 @@ out:
return ret;
}
-int
+static int
gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -2292,12 +2324,11 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_int32(local->dict, "command", (int32_t *)&cmd);
+ ret = dict_get_int32_sizen(local->dict, "command", (int32_t *)&cmd);
if (ret) {
gf_log("", GF_LOG_ERROR, "failed to get command");
goto out;
@@ -2312,7 +2343,7 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed to unserialize rsp_dict");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -2322,7 +2353,8 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
case GF_OP_CMD_START:
cmd_str = "start";
- ret = dict_get_str(rsp_dict, GF_REMOVE_BRICK_TID_KEY, &task_id_str);
+ ret = dict_get_str_sizen(rsp_dict, GF_REMOVE_BRICK_TID_KEY,
+ &task_id_str);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
"remove-brick-id is not present in dict");
@@ -2352,7 +2384,7 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
rsp.op_errno, msg,
"volRemoveBrick");
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2383,7 +2415,7 @@ out:
return ret;
}
-int
+static int
gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -2393,7 +2425,7 @@ gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
cli_local_t *local = NULL;
call_frame_t *frame = NULL;
- char *rb_operation_str = NULL;
+ const char *rb_operation_str = NULL;
dict_t *rsp_dict = NULL;
char msg[1024] = {
0,
@@ -2414,80 +2446,53 @@ gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_str(local->dict, "operation", &reset_op);
+ ret = dict_get_str_sizen(local->dict, "operation", &reset_op);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "dict_get on operation failed");
goto out;
}
+ if (strcmp(reset_op, "GF_RESET_OP_START") &&
+ strcmp(reset_op, "GF_RESET_OP_COMMIT") &&
+ strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) {
+ ret = -1;
+ goto out;
+ }
+
if (rsp.dict.dict_len) {
/* Unserialize the dictionary */
rsp_dict = dict_new();
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize rsp buffer to dictionary");
+ gf_log(frame->this->name, GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
- if (strcmp(reset_op, "GF_RESET_OP_START") &&
- strcmp(reset_op, "GF_RESET_OP_COMMIT") &&
- strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) {
- rb_operation_str = gf_strdup("Unknown operation");
- ret = -1;
- goto out;
- }
-
if (rsp.op_ret && (strcmp(rsp.op_errstr, ""))) {
- rb_operation_str = gf_strdup(rsp.op_errstr);
+ rb_operation_str = rsp.op_errstr;
} else {
if (!strcmp(reset_op, "GF_RESET_OP_START")) {
if (rsp.op_ret)
- rb_operation_str = gf_strdup(
- "reset-brick "
- "start "
- "operation "
- "failed");
+ rb_operation_str = "reset-brick start operation failed";
else
- rb_operation_str = gf_strdup(
- "reset-brick "
- "start "
- "operation "
- "successful");
+ rb_operation_str = "reset-brick start operation successful";
} else if (!strcmp(reset_op, "GF_RESET_OP_COMMIT")) {
if (rsp.op_ret)
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "operation "
- "failed");
+ rb_operation_str = "reset-brick commit operation failed";
else
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "operation "
- "successful");
+ rb_operation_str = "reset-brick commit operation successful";
} else if (!strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) {
if (rsp.op_ret)
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "force operation "
- "failed");
+ rb_operation_str = "reset-brick commit force operation failed";
else
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "force operation "
- "successful");
+ rb_operation_str =
+ "reset-brick commit force operation successful";
}
}
@@ -2499,7 +2504,7 @@ gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_replace_brick(rsp_dict, rsp.op_ret,
rsp.op_errno, msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2516,9 +2521,6 @@ out:
if (local)
cli_local_wipe(local);
- if (rb_operation_str)
- GF_FREE(rb_operation_str);
-
cli_cmd_broadcast_response(ret);
gf_free_xdr_cli_rsp(rsp);
if (rsp_dict)
@@ -2526,7 +2528,7 @@ out:
return ret;
}
-int
+static int
gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -2536,7 +2538,7 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
cli_local_t *local = NULL;
call_frame_t *frame = NULL;
- char *rb_operation_str = NULL;
+ const char *rb_operation_str = NULL;
dict_t *rsp_dict = NULL;
char msg[1024] = {
0,
@@ -2557,12 +2559,11 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_str(local->dict, "operation", &replace_op);
+ ret = dict_get_str_sizen(local->dict, "operation", &replace_op);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "dict_get on operation failed");
goto out;
@@ -2574,29 +2575,23 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize rsp buffer to dictionary");
+ gf_log(frame->this->name, GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
if (!strcmp(replace_op, "GF_REPLACE_OP_COMMIT_FORCE")) {
if (rsp.op_ret || ret)
- rb_operation_str = gf_strdup(
- "replace-brick commit "
- "force operation failed");
+ rb_operation_str = "replace-brick commit force operation failed";
else
- rb_operation_str = gf_strdup(
- "replace-brick commit "
- "force operation "
- "successful");
+ rb_operation_str =
+ "replace-brick commit force operation successful";
} else {
gf_log(frame->this->name, GF_LOG_DEBUG, "Unknown operation");
}
if (rsp.op_ret && (strcmp(rsp.op_errstr, ""))) {
- rb_operation_str = gf_strdup(rsp.op_errstr);
+ rb_operation_str = rsp.op_errstr;
}
gf_log("cli", GF_LOG_INFO, "Received resp to replace brick");
@@ -2607,7 +2602,7 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_replace_brick(rsp_dict, rsp.op_ret,
rsp.op_errno, msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2624,9 +2619,6 @@ out:
if (local)
cli_local_wipe(local);
- if (rb_operation_str)
- GF_FREE(rb_operation_str);
-
cli_cmd_broadcast_response(ret);
gf_free_xdr_cli_rsp(rsp);
if (rsp_dict)
@@ -2656,7 +2648,7 @@ gf_cli_log_rotate_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2672,7 +2664,7 @@ gf_cli_log_rotate_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volLogRotate", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2710,7 +2702,7 @@ gf_cli_sync_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2726,7 +2718,7 @@ gf_cli_sync_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volSync", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2755,29 +2747,23 @@ print_quota_list_usage_output(cli_local_t *local, char *path, int64_t avail,
char *hl_str = NULL;
char *sl_val = NULL;
- used_str = gf_uint64_2human_readable(used_space->size);
-
- if (limit_set) {
- hl_str = gf_uint64_2human_readable(limits->hl);
- avail_str = gf_uint64_2human_readable(avail);
-
- sl_val = gf_uint64_2human_readable(sl_num);
- }
-
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_quota_xml_output(local, path, limits->hl, sl_str, sl_num,
used_space->size, avail, sl ? "Yes" : "No",
hl ? "Yes" : "No", limit_set);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to "
- "output in xml format for quota "
- "list command");
+ "Failed to output in xml format for quota list command");
}
goto out;
}
+ used_str = gf_uint64_2human_readable(used_space->size);
+
if (limit_set) {
+ hl_str = gf_uint64_2human_readable(limits->hl);
+ sl_val = gf_uint64_2human_readable(sl_num);
+
if (!used_str) {
cli_out("%-40s %7s %7s(%s) %8" PRIu64 "%9" PRIu64
""
@@ -2785,6 +2771,7 @@ print_quota_list_usage_output(cli_local_t *local, char *path, int64_t avail,
path, hl_str, sl_str, sl_val, used_space->size, avail,
sl ? "Yes" : "No", hl ? "Yes" : "No");
} else {
+ avail_str = gf_uint64_2human_readable(avail);
cli_out("%-40s %7s %7s(%s) %8s %7s %15s %20s", path, hl_str, sl_str,
sl_val, used_str, avail_str, sl ? "Yes" : "No",
hl ? "Yes" : "No");
@@ -2820,9 +2807,7 @@ print_quota_list_object_output(cli_local_t *local, char *path, int64_t avail,
hl ? "Yes" : "No", limit_set);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to "
- "output in xml format for quota "
- "list command");
+ "Failed to output in xml format for quota list command");
}
goto out;
}
@@ -2868,16 +2853,17 @@ print_quota_list_output(cli_local_t *local, char *path, char *default_sl,
ret = gf_string2percent(default_sl, &sl_num);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "could not convert default soft limit"
- " to percent");
+ "could not convert default soft limit to percent");
goto out;
}
sl_num = (sl_num * limits->hl) / 100;
sl_final = default_sl;
} else {
sl_num = (limits->sl * limits->hl) / 100;
- snprintf(percent_str, sizeof(percent_str), "%" PRIu64 "%%",
- limits->sl);
+ ret = snprintf(percent_str, sizeof(percent_str), "%" PRIu64 "%%",
+ limits->sl);
+ if (ret < 0)
+ goto out;
sl_final = percent_str;
}
if (type == GF_QUOTA_OPTION_TYPE_LIST)
@@ -2937,9 +2923,8 @@ print_quota_list_from_mountdir(cli_local_t *local, char *mountdir,
ret = sys_lgetxattr(mountdir, key, (void *)&limits, sizeof(limits));
if (ret < 0) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to get the xattr %s "
- "on %s. Reason : %s",
- key, mountdir, strerror(errno));
+ "Failed to get the xattr %s on %s. Reason : %s", key, mountdir,
+ strerror(errno));
switch (errno) {
#if defined(ENODATA)
@@ -2998,9 +2983,7 @@ enoattr:
}
if (ret < 0) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to get quota size "
- "on path %s: %s",
+ gf_log("cli", GF_LOG_ERROR, "Failed to get quota size on path %s: %s",
mountdir, strerror(errno));
print_quota_list_empty(path, type);
goto out;
@@ -3016,7 +2999,7 @@ out:
return ret;
}
-int
+static int
gluster_remove_auxiliary_mount(char *volname)
{
int ret = -1;
@@ -3031,25 +3014,24 @@ gluster_remove_auxiliary_mount(char *volname)
GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(mountdir, volname, "/");
ret = gf_umount_lazy(this->name, mountdir, 1);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "umount on %s failed, "
- "reason : %s",
+ gf_log("cli", GF_LOG_ERROR, "umount on %s failed, reason : %s",
mountdir, strerror(errno));
}
return ret;
}
-int
+static int
gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
dict_t *dict, char *default_sl, int count,
int op_ret, int op_errno, char *op_errstr)
{
int ret = -1;
int i = 0;
- char key[1024] = {
+ char key[32] = {
0,
};
+ int keylen;
char mountdir[PATH_MAX] = {
0,
};
@@ -3059,7 +3041,7 @@ gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
if (!dict || count <= 0)
goto out;
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get quota type");
goto out;
@@ -3069,7 +3051,7 @@ gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
ret = cli_xml_output_vol_quota_limit_list_begin(local, op_ret, op_errno,
op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting xml begin");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
} else {
@@ -3077,13 +3059,11 @@ gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
}
while (count--) {
- snprintf(key, sizeof(key), "path%d", i++);
+ keylen = snprintf(key, sizeof(key), "path%d", i++);
- ret = dict_get_str(dict, key, &path);
+ ret = dict_get_strn(dict, key, keylen, &path);
if (ret < 0) {
- gf_log("cli", GF_LOG_DEBUG,
- "Path not present in limit"
- " list");
+ gf_log("cli", GF_LOG_DEBUG, "Path not present in limit list");
continue;
}
@@ -3099,7 +3079,7 @@ out:
return ret;
}
-int
+static int
print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
{
char *path = NULL;
@@ -3122,25 +3102,22 @@ print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
local = frame->local;
gd_rsp_dict = local->dict;
- ret = dict_get_int32(rsp_dict, "type", &type);
+ ret = dict_get_int32_sizen(rsp_dict, "type", &type);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get type");
goto out;
}
- ret = dict_get_str(rsp_dict, GET_ANCESTRY_PATH_KEY, &path);
+ ret = dict_get_str_sizen(rsp_dict, GET_ANCESTRY_PATH_KEY, &path);
if (ret) {
- gf_log("cli", GF_LOG_WARNING,
- "path key is not present "
- "in dict");
+ gf_log("cli", GF_LOG_WARNING, "path key is not present in dict");
goto out;
}
- ret = dict_get_str(gd_rsp_dict, "default-soft-limit", &default_sl);
+ ret = dict_get_str_sizen(gd_rsp_dict, "default-soft-limit", &default_sl);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR,
- "failed to "
- "get default soft limit");
+ "failed to get default soft limit");
goto out;
}
@@ -3179,19 +3156,18 @@ print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
LOCK(&local->lock);
{
- ret = dict_get_int32(gd_rsp_dict, "quota-list-success-count",
- &success_count);
+ ret = dict_get_int32_sizen(gd_rsp_dict, "quota-list-success-count",
+ &success_count);
if (ret)
success_count = 0;
- ret = dict_set_int32(gd_rsp_dict, "quota-list-success-count",
- success_count + 1);
+ ret = dict_set_int32_sizen(gd_rsp_dict, "quota-list-success-count",
+ success_count + 1);
}
UNLOCK(&local->lock);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to set "
- "quota-list-success-count in dict");
+ "Failed to set quota-list-success-count in dict");
goto out;
}
@@ -3201,9 +3177,7 @@ print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
} else {
ret = cli_xml_output_vol_quota_limit_list_begin(local, 0, 0, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Error in "
- "printing xml output");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -3215,7 +3189,7 @@ out:
return ret;
}
-void *
+static void *
cli_cmd_broadcast_response_detached(void *opaque)
{
int32_t ret = 0;
@@ -3226,7 +3200,7 @@ cli_cmd_broadcast_response_detached(void *opaque)
return NULL;
}
-int32_t
+static int32_t
cli_quota_compare_path(struct list_head *list1, struct list_head *list2)
{
struct list_node *node1 = NULL;
@@ -3243,18 +3217,18 @@ cli_quota_compare_path(struct list_head *list1, struct list_head *list2)
dict1 = node1->ptr;
dict2 = node2->ptr;
- ret = dict_get_str(dict1, GET_ANCESTRY_PATH_KEY, &path1);
+ ret = dict_get_str_sizen(dict1, GET_ANCESTRY_PATH_KEY, &path1);
if (ret < 0)
return 0;
- ret = dict_get_str(dict2, GET_ANCESTRY_PATH_KEY, &path2);
+ ret = dict_get_str_sizen(dict2, GET_ANCESTRY_PATH_KEY, &path2);
if (ret < 0)
return 0;
return strcmp(path1, path2);
}
-int
+static int
cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3284,19 +3258,18 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
LOCK(&local->lock);
{
- ret = dict_get_int32(local->dict, "quota-list-count", &list_count);
+ ret = dict_get_int32_sizen(local->dict, "quota-list-count",
+ &list_count);
if (ret)
list_count = 0;
list_count++;
- ret = dict_set_int32(local->dict, "quota-list-count", list_count);
+ ret = dict_set_int32_sizen(local->dict, "quota-list-count", list_count);
}
UNLOCK(&local->lock);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to set "
- "quota-list-count in dict");
+ gf_log("cli", GF_LOG_ERROR, "Failed to set quota-list-count in dict");
goto out;
}
@@ -3311,8 +3284,7 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -3331,9 +3303,7 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -3347,7 +3317,7 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- ret = dict_get_int32(local->dict, "max_count", &max_count);
+ ret = dict_get_int32_sizen(local->dict, "max_count", &max_count);
if (ret < 0) {
gf_log("cli", GF_LOG_ERROR, "failed to get max_count");
goto out;
@@ -3383,9 +3353,7 @@ out:
ret = pthread_create(&th_id, NULL, cli_cmd_broadcast_response_detached,
(void *)-1);
if (ret)
- gf_log("cli", GF_LOG_ERROR,
- "pthread_create failed: "
- "%s",
+ gf_log("cli", GF_LOG_ERROR, "pthread_create failed: %s",
strerror(errno));
} else {
cli_cmd_broadcast_response(ret);
@@ -3395,7 +3363,7 @@ out:
return ret;
}
-int
+static int
cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3415,7 +3383,7 @@ cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret < 0) {
- gf_log(this->name, GF_LOG_ERROR, "failed to serialize the data");
+ gf_log(this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -3426,28 +3394,29 @@ cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-void
+static void
gf_cli_quota_list(cli_local_t *local, char *volname, dict_t *dict,
char *default_sl, int count, int op_ret, int op_errno,
char *op_errstr)
{
- GF_VALIDATE_OR_GOTO("cli", volname, out);
-
- if (!connected)
+ if (!cli_cmd_connected())
goto out;
- if (count > 0)
+ if (count > 0) {
+ GF_VALIDATE_OR_GOTO("cli", volname, out);
+
gf_cli_print_limit_list_from_dict(local, volname, dict, default_sl,
count, op_ret, op_errno, op_errstr);
+ }
out:
return;
}
-int
+static int
gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3478,8 +3447,7 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -3491,9 +3459,7 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (strcmp(rsp.op_errstr, "")) {
cli_err("quota command failed : %s", rsp.op_errstr);
if (rsp.op_ret == -ENOENT)
- cli_err(
- "please enter the path relative to "
- "the volume");
+ cli_err("please enter the path relative to the volume");
} else {
cli_err("quota command : failed");
}
@@ -3507,24 +3473,17 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
gf_log("cli", GF_LOG_DEBUG, "Received resp to quota command");
- ret = dict_get_str(dict, "volname", &volname);
- if (ret)
- gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname");
-
- ret = dict_get_str(dict, "default-soft-limit", &default_sl);
+ ret = dict_get_str_sizen(dict, "default-soft-limit", &default_sl);
if (ret)
gf_log(frame->this->name, GF_LOG_TRACE,
- "failed to get "
- "default soft limit");
+ "failed to get default soft limit");
// default-soft-limit is part of rsp_dict only iff we sent
// GLUSTER_CLI_QUOTA with type being GF_QUOTA_OPTION_TYPE_LIST
@@ -3534,8 +3493,8 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = -1;
goto out;
}
- ret = dict_set_dynstr(local->dict, "default-soft-limit",
- default_sl_dup);
+ ret = dict_set_dynstr_sizen(local->dict, "default-soft-limit",
+ default_sl_dup);
if (ret) {
gf_log(frame->this->name, GF_LOG_TRACE,
"failed to set default soft limit");
@@ -3543,11 +3502,15 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+ if (ret)
+ gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname");
+
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret)
gf_log(frame->this->name, GF_LOG_TRACE, "failed to get type");
- ret = dict_get_int32(dict, "count", &entry_count);
+ ret = dict_get_int32_sizen(dict, "count", &entry_count);
if (ret)
gf_log(frame->this->name, GF_LOG_TRACE, "failed to get count");
@@ -3560,9 +3523,7 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_quota_limit_list_end(local);
if (ret < 0) {
ret = -1;
- gf_log("cli", GF_LOG_ERROR,
- "Error in printing"
- " xml output");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
}
@@ -3574,7 +3535,7 @@ xml_output:
ret = cli_xml_output_str("volQuota", NULL, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -3599,7 +3560,7 @@ out:
return ret;
}
-int
+static int
gf_cli_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3618,7 +3579,7 @@ gf_cli_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -3650,7 +3611,7 @@ out:
return ret;
}
-int
+static int
gf_cli_pmap_b2p_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3669,7 +3630,7 @@ gf_cli_pmap_b2p_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_port_by_brick_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -3692,7 +3653,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {
@@ -3711,9 +3672,9 @@ gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_int32(dict, "port", &port);
+ ret = dict_get_int32_sizen(dict, "port", &port);
if (ret) {
- ret = dict_set_int32(dict, "port", CLI_GLUSTERD_PORT);
+ ret = dict_set_int32_sizen(dict, "port", CLI_GLUSTERD_PORT);
if (ret)
goto out;
}
@@ -3724,12 +3685,12 @@ gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {
@@ -3748,16 +3709,16 @@ gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data)
}
dict = data;
- ret = dict_get_int32(dict, "port", &port);
+ ret = dict_get_int32_sizen(dict, "port", &port);
if (ret) {
- ret = dict_set_int32(dict, "port", CLI_GLUSTERD_PORT);
+ ret = dict_set_int32_sizen(dict, "port", CLI_GLUSTERD_PORT);
if (ret)
goto out;
}
- ret = dict_get_int32(dict, "flags", &flags);
+ ret = dict_get_int32_sizen(dict, "flags", &flags);
if (ret) {
- ret = dict_set_int32(dict, "flags", 0);
+ ret = dict_set_int32_sizen(dict, "flags", 0);
if (ret)
goto out;
}
@@ -3768,12 +3729,12 @@ gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_list_friends(call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_peer_list_req req = {
@@ -3806,11 +3767,11 @@ out:
*/
frame->local = NULL;
}
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_state(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {
@@ -3821,24 +3782,18 @@ gf_cli_get_state(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_get_state_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_GET_STATE, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_next_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -3856,7 +3811,7 @@ gf_cli_get_next_volume(call_frame_t *frame, xlator_t *this, void *data)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_info_begin(local, 0, 0, "");
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -3884,15 +3839,15 @@ end_xml:
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_info_end(local);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3903,7 +3858,7 @@ gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *dict = NULL;
int32_t flags = 0;
- if (!frame || !this || !data) {
+ if (!this || !data) {
ret = -1;
goto out;
}
@@ -3918,13 +3873,13 @@ gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
}
if (ctx->volname) {
- ret = dict_set_str(dict, "volname", ctx->volname);
+ ret = dict_set_str_sizen(dict, "volname", ctx->volname);
if (ret)
goto out;
}
flags = ctx->flags;
- ret = dict_set_int32(dict, "flags", flags);
+ ret = dict_set_int32_sizen(dict, "flags", flags);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "failed to set flags");
goto out;
@@ -3933,7 +3888,7 @@ gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret) {
- gf_log(frame->this->name, GF_LOG_ERROR, "failed to serialize dict");
+ gf_log(frame->this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -3947,11 +3902,11 @@ out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli3_1_uuid_get(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3960,22 +3915,16 @@ gf_cli3_1_uuid_get(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli3_1_uuid_get_cbk,
(xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_UUID_GET,
this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli3_1_uuid_reset(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3984,22 +3933,16 @@ gf_cli3_1_uuid_reset(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli3_1_uuid_reset_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_create_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4008,26 +3951,19 @@ gf_cli_create_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_create_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_CREATE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_delete_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4036,25 +3972,18 @@ gf_cli_delete_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_delete_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_DELETE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_start_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4063,25 +3992,19 @@ gf_cli_start_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_start_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_START_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_stop_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4090,25 +4013,18 @@ gf_cli_stop_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = data;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_stop_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_STOP_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_defrag_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4117,25 +4033,18 @@ gf_cli_defrag_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_defrag_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_DEFRAG_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4154,7 +4063,7 @@ gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret < 0) {
- gf_log(this->name, GF_LOG_ERROR, "failed to serialize the data");
+ gf_log(this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -4165,12 +4074,12 @@ gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_reset_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4179,25 +4088,18 @@ gf_cli_reset_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_reset_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_RESET_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
-gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
+static int32_t
+gf_cli_ganesha(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
0,
@@ -4205,19 +4107,31 @@ gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
+ dict = data;
+
+ ret = cli_to_glusterd(&req, frame, gf_cli_ganesha_cbk,
+ (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_GANESHA,
+ this, cli_rpc_prog, NULL);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
+
+ return ret;
+}
+
+static int32_t
+gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
+{
+ gf_cli_req req = {{
+ 0,
+ }};
+ int ret = 0;
+ dict_t *dict = NULL;
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_set_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_SET_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
@@ -4231,38 +4145,21 @@ gf_cli_add_brick(call_frame_t *frame, xlator_t *this, void *data)
}};
int ret = 0;
dict_t *dict = NULL;
- char *volname = NULL;
- int32_t count = 0;
-
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
dict = data;
- ret = dict_get_str(dict, "volname", &volname);
-
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "count", &count);
- if (ret)
- goto out;
-
ret = cli_to_glusterd(&req, frame, gf_cli_add_brick_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_ADD_BRICK, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4276,21 +4173,16 @@ gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
int32_t command = 0;
- char *volname = NULL;
int32_t cmd = 0;
- if (!frame || !this || !data) {
+ if (!frame || !this) {
ret = -1;
goto out;
}
dict = data;
- ret = dict_get_str(dict, "volname", &volname);
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "command", &command);
+ ret = dict_get_int32_sizen(dict, "command", &command);
if (ret)
goto out;
@@ -4305,7 +4197,7 @@ gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
else
cmd |= GF_DEFRAG_CMD_STOP;
- ret = dict_set_int32(dict, "rebalance-command", (int32_t)cmd);
+ ret = dict_set_int32_sizen(dict, "rebalance-command", (int32_t)cmd);
if (ret) {
gf_log(this->name, GF_LOG_ERROR, "Failed to set dict");
goto out;
@@ -4318,7 +4210,7 @@ gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
}
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
@@ -4327,7 +4219,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4336,8 +4228,6 @@ gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
char *dst_brick = NULL;
- char *src_brick = NULL;
- char *volname = NULL;
char *op = NULL;
if (!frame || !this || !data) {
@@ -4347,49 +4237,34 @@ gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_str(dict, "operation", &op);
+ ret = dict_get_str_sizen(dict, "operation", &op);
if (ret) {
gf_log(this->name, GF_LOG_DEBUG, "dict_get on operation failed");
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on volname failed");
- goto out;
- }
-
- ret = dict_get_str(dict, "src-brick", &src_brick);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on src-brick failed");
- goto out;
- }
-
if (!strcmp(op, "GF_RESET_OP_COMMIT") ||
!strcmp(op, "GF_RESET_OP_COMMIT_FORCE")) {
- ret = dict_get_str(dict, "dst-brick", &dst_brick);
+ ret = dict_get_str_sizen(dict, "dst-brick", &dst_brick);
if (ret) {
gf_log(this->name, GF_LOG_DEBUG, "dict_get on dst-brick failed");
goto out;
}
}
- gf_log(this->name, GF_LOG_DEBUG, "Received command reset-brick %s on %s.",
- op, src_brick);
-
ret = cli_to_glusterd(&req, frame, gf_cli_reset_brick_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_RESET_BRICK, this, cli_rpc_prog, NULL);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4397,9 +4272,6 @@ gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data)
}};
int ret = 0;
dict_t *dict = NULL;
- char *src_brick = NULL;
- char *dst_brick = NULL;
- char *volname = NULL;
int32_t op = 0;
if (!frame || !this || !data) {
@@ -4409,47 +4281,25 @@ gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_int32(dict, "operation", &op);
+ ret = dict_get_int32_sizen(dict, "operation", &op);
if (ret) {
gf_log(this->name, GF_LOG_DEBUG, "dict_get on operation failed");
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on volname failed");
- goto out;
- }
-
- ret = dict_get_str(dict, "src-brick", &src_brick);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on src-brick failed");
- goto out;
- }
-
- ret = dict_get_str(dict, "dst-brick", &dst_brick);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on dst-brick failed");
- goto out;
- }
-
- gf_log(this->name, GF_LOG_DEBUG,
- "Received command replace-brick %s with "
- "%s with operation=%d",
- src_brick, dst_brick, op);
ret = cli_to_glusterd(&req, frame, gf_cli_replace_brick_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_REPLACE_BRICK, this, cli_rpc_prog, NULL);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_log_rotate(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4458,25 +4308,18 @@ gf_cli_log_rotate(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_log_rotate_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_LOG_ROTATE, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_sync_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -4485,25 +4328,18 @@ gf_cli_sync_volume(call_frame_t *frame, xlator_t *this, void *data)
}};
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_sync_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_SYNC_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data)
{
gf_getspec_req req = {
@@ -4513,14 +4349,14 @@ gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data)
dict_t *dict = NULL;
dict_t *op_dict = NULL;
- if (!frame || !this || !data) {
+ if (!frame || !this) {
ret = -1;
goto out;
}
dict = data;
- ret = dict_get_str(dict, "volid", &req.key);
+ ret = dict_get_str_sizen(dict, "volid", &req.key);
if (ret)
goto out;
@@ -4532,26 +4368,24 @@ gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data)
// Set the supported min and max op-versions, so glusterd can make a
// decision
- ret = dict_set_int32(op_dict, "min-op-version", GD_OP_VERSION_MIN);
+ ret = dict_set_int32_sizen(op_dict, "min-op-version", GD_OP_VERSION_MIN);
if (ret) {
gf_log(THIS->name, GF_LOG_ERROR,
- "Failed to set min-op-version"
- " in request dict");
+ "Failed to set min-op-version in request dict");
goto out;
}
- ret = dict_set_int32(op_dict, "max-op-version", GD_OP_VERSION_MAX);
+ ret = dict_set_int32_sizen(op_dict, "max-op-version", GD_OP_VERSION_MAX);
if (ret) {
gf_log(THIS->name, GF_LOG_ERROR,
- "Failed to set max-op-version"
- " in request dict");
+ "Failed to set max-op-version in request dict");
goto out;
}
ret = dict_allocate_and_serialize(op_dict, &req.xdata.xdata_val,
&req.xdata.xdata_len);
if (ret < 0) {
- gf_log(THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary");
+ gf_log(THIS->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -4564,12 +4398,12 @@ out:
dict_unref(op_dict);
}
GF_FREE(req.xdata.xdata_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_quota(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4578,24 +4412,17 @@ gf_cli_quota(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_quota_cbk,
(xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_QUOTA,
this, cli_rpc_prog, NULL);
-
-out:
GF_FREE(req.dict.dict_val);
-
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data)
{
pmap_port_by_brick_req req = {
@@ -4604,14 +4431,14 @@ gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
+ if (!frame || !this) {
ret = -1;
goto out;
}
dict = data;
- ret = dict_get_str(dict, "brick", &req.brick);
+ ret = dict_get_str_sizen(dict, "brick", &req.brick);
if (ret)
goto out;
@@ -4620,7 +4447,7 @@ gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_pmap_port_by_brick_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
@@ -4635,7 +4462,8 @@ gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
dict_t *dict = NULL;
int tr_count = 0;
- char key[256] = {0};
+ char key[64] = {0};
+ int keylen;
int i = 0;
char *old_state = NULL;
char *new_state = NULL;
@@ -4651,7 +4479,7 @@ gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -4673,33 +4501,33 @@ gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count,
&dict);
if (ret) {
- cli_err("bad response");
+ cli_err(DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "count", &tr_count);
- if (tr_count)
+ ret = dict_get_int32_sizen(dict, "count", &tr_count);
+ if (!ret && tr_count)
cli_out("number of transitions: %d", tr_count);
else
cli_err("No transitions");
for (i = 0; i < tr_count; i++) {
- snprintf(key, sizeof(key), "log%d-old-state", i);
- ret = dict_get_str(dict, key, &old_state);
+ keylen = snprintf(key, sizeof(key), "log%d-old-state", i);
+ ret = dict_get_strn(dict, key, keylen, &old_state);
if (ret)
goto out;
- snprintf(key, sizeof(key), "log%d-event", i);
- ret = dict_get_str(dict, key, &event);
+ keylen = snprintf(key, sizeof(key), "log%d-event", i);
+ ret = dict_get_strn(dict, key, keylen, &event);
if (ret)
goto out;
- snprintf(key, sizeof(key), "log%d-new-state", i);
- ret = dict_get_str(dict, key, &new_state);
+ keylen = snprintf(key, sizeof(key), "log%d-new-state", i);
+ ret = dict_get_strn(dict, key, keylen, &new_state);
if (ret)
goto out;
- snprintf(key, sizeof(key), "log%d-time", i);
- ret = dict_get_str(dict, key, &time);
+ keylen = snprintf(key, sizeof(key), "log%d-time", i);
+ ret = dict_get_strn(dict, key, keylen, &time);
if (ret)
goto out;
cli_out(
@@ -4722,7 +4550,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_fsm_log(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -4742,12 +4570,12 @@ gf_cli_fsm_log(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf1_cli_fsm_log_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_gsync_config_command(dict_t *dict)
{
runner_t runner = {
@@ -4762,7 +4590,7 @@ gf_cli_gsync_config_command(dict_t *dict)
int ret = -1;
char conf_path[PATH_MAX] = "";
- if (dict_get_str(dict, "subop", &subop) != 0)
+ if (dict_get_str_sizen(dict, "subop", &subop) != 0)
return -1;
if (strcmp(subop, "get") != 0 && strcmp(subop, "get-all") != 0) {
@@ -4770,16 +4598,16 @@ gf_cli_gsync_config_command(dict_t *dict)
return 0;
}
- if (dict_get_str(dict, "glusterd_workdir", &gwd) != 0 ||
- dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "glusterd_workdir", &gwd) != 0 ||
+ dict_get_str_sizen(dict, "slave", &slave) != 0)
return -1;
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = NULL;
- if (dict_get_str(dict, "op_name", &op_name) != 0)
+ if (dict_get_str_sizen(dict, "op_name", &op_name) != 0)
op_name = NULL;
- ret = dict_get_str(dict, "conf_path", &confpath);
+ ret = dict_get_str_sizen(dict, "conf_path", &confpath);
if (ret || !confpath) {
ret = snprintf(conf_path, sizeof(conf_path) - 1,
"%s/" GEOREP "/gsyncd_template.conf", gwd);
@@ -4801,7 +4629,7 @@ gf_cli_gsync_config_command(dict_t *dict)
return runner_run(&runner);
}
-int
+static int
gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
int *spacing, int gsync_count, int number_of_fields,
int is_detail)
@@ -4830,7 +4658,7 @@ gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
total_spacing += 4; /* For the spacing between the fields */
/* char pointers for each field */
- output_values = GF_CALLOC(number_of_fields, sizeof(char *),
+ output_values = GF_MALLOC(number_of_fields * sizeof(char *),
gf_common_mt_char);
if (!output_values) {
ret = -1;
@@ -4845,12 +4673,6 @@ gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
}
}
- hyphens = GF_CALLOC(total_spacing + 1, sizeof(char), gf_common_mt_char);
- if (!hyphens) {
- ret = -1;
- goto out;
- }
-
cli_out(" ");
/* setting the title "NODE", "MASTER", etc. from title_values[]
@@ -4873,6 +4695,12 @@ gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
output_values[10], output_values[11], output_values[12],
output_values[13], output_values[14], output_values[15]);
+ hyphens = GF_MALLOC((total_spacing + 1) * sizeof(char), gf_common_mt_char);
+ if (!hyphens) {
+ ret = -1;
+ goto out;
+ }
+
/* setting and printing the hyphens */
memset(hyphens, '-', total_spacing);
hyphens[total_spacing] = '\0';
@@ -4937,7 +4765,7 @@ gf_gsync_status_t_comparator(const void *p, const void *q)
return strcmp(slavekey1, slavekey2);
}
-int
+static int
gf_cli_read_status_data(dict_t *dict, gf_gsync_status_t **sts_vals,
int *spacing, int gsync_count, int number_of_fields)
{
@@ -4977,7 +4805,7 @@ out:
return ret;
}
-int
+static int
gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail)
{
int gsync_count = 0;
@@ -4988,47 +4816,43 @@ gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail)
char errmsg[1024] = "";
char *master = NULL;
char *slave = NULL;
- char *title_values[] = {"MASTER NODE",
- "MASTER VOL",
- "MASTER BRICK",
- "SLAVE USER",
- "SLAVE",
- "SLAVE NODE",
- "STATUS",
- "CRAWL STATUS",
- "LAST_SYNCED",
- "ENTRY",
- "DATA",
- "META",
- "FAILURES",
- "CHECKPOINT TIME",
- "CHECKPOINT COMPLETED",
- "CHECKPOINT COMPLETION TIME"};
+ static char *title_values[] = {"MASTER NODE",
+ "MASTER VOL",
+ "MASTER BRICK",
+ "SLAVE USER",
+ "SLAVE",
+ "SLAVE NODE",
+ "STATUS",
+ "CRAWL STATUS",
+ "LAST_SYNCED",
+ "ENTRY",
+ "DATA",
+ "META",
+ "FAILURES",
+ "CHECKPOINT TIME",
+ "CHECKPOINT COMPLETED",
+ "CHECKPOINT COMPLETION TIME"};
gf_gsync_status_t **sts_vals = NULL;
/* Checks if any session is active or not */
- ret = dict_get_int32(dict, "gsync-count", &gsync_count);
+ ret = dict_get_int32_sizen(dict, "gsync-count", &gsync_count);
if (ret) {
- ret = dict_get_str(dict, "master", &master);
+ ret = dict_get_str_sizen(dict, "master", &master);
- ret = dict_get_str(dict, "slave", &slave);
+ ret = dict_get_str_sizen(dict, "slave", &slave);
if (master) {
if (slave)
snprintf(errmsg, sizeof(errmsg),
- "No active "
- "geo-replication sessions between %s"
+ "No active geo-replication sessions between %s"
" and %s",
master, slave);
else
snprintf(errmsg, sizeof(errmsg),
- "No active "
- "geo-replication sessions for %s",
- master);
+ "No active geo-replication sessions for %s", master);
} else
snprintf(errmsg, sizeof(errmsg),
- "No active "
- "geo-replication sessions");
+ "No active geo-replication sessions");
gf_log("cli", GF_LOG_INFO, "%s", errmsg);
cli_out("%s", errmsg);
@@ -5048,14 +4872,6 @@ gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail)
ret = -1;
goto out;
}
- for (i = 0; i < gsync_count; i++) {
- sts_vals[i] = GF_CALLOC(1, sizeof(gf_gsync_status_t),
- gf_common_mt_char);
- if (!sts_vals[i]) {
- ret = -1;
- goto out;
- }
- }
ret = gf_cli_read_status_data(dict, sts_vals, spacing, gsync_count,
num_of_fields);
@@ -5084,13 +4900,13 @@ write_contents_to_common_pem_file(dict_t *dict, int output_count)
char *workdir = NULL;
char common_pem_file[PATH_MAX] = "";
char *output = NULL;
- char output_name[PATH_MAX] = "";
+ char output_name[32] = "";
int bytes_written = 0;
int fd = -1;
int ret = -1;
int i = -1;
- ret = dict_get_str(dict, "glusterd_workdir", &workdir);
+ ret = dict_get_str_sizen(dict, "glusterd_workdir", &workdir);
if (ret || !workdir) {
gf_log("", GF_LOG_ERROR, "Unable to fetch workdir");
ret = -1;
@@ -5104,17 +4920,15 @@ write_contents_to_common_pem_file(dict_t *dict, int output_count)
fd = open(common_pem_file, O_WRONLY | O_CREAT, 0600);
if (fd == -1) {
- gf_log("", GF_LOG_ERROR,
- "Failed to open %s"
- " Error : %s",
+ gf_log("", GF_LOG_ERROR, "Failed to open %s Error : %s",
common_pem_file, strerror(errno));
ret = -1;
goto out;
}
for (i = 1; i <= output_count; i++) {
- snprintf(output_name, sizeof(output_name), "output_%d", i);
- ret = dict_get_str(dict, output_name, &output);
+ ret = snprintf(output_name, sizeof(output_name), "output_%d", i);
+ ret = dict_get_strn(dict, output_name, ret, &output);
if (ret) {
gf_log("", GF_LOG_ERROR, "Failed to get %s.", output_name);
cli_out("Unable to fetch output.");
@@ -5122,9 +4936,7 @@ write_contents_to_common_pem_file(dict_t *dict, int output_count)
if (output) {
bytes_written = sys_write(fd, output, strlen(output));
if (bytes_written != strlen(output)) {
- gf_log("", GF_LOG_ERROR,
- "Failed to write "
- "to %s",
+ gf_log("", GF_LOG_ERROR, "Failed to write to %s",
common_pem_file);
ret = -1;
goto out;
@@ -5146,11 +4958,11 @@ out:
if (fd >= 0)
sys_close(fd);
- gf_log("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5159,7 +4971,7 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
int i = -1;
char *output = NULL;
char *command = NULL;
- char output_name[PATH_MAX] = "";
+ char output_name[32] = "";
gf_cli_rsp rsp = {
0,
};
@@ -5174,7 +4986,7 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5196,14 +5008,14 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- ret = dict_get_int32(dict, "output_count", &output_count);
+ ret = dict_get_int32_sizen(dict, "output_count", &output_count);
if (ret) {
cli_out("Command executed successfully.");
ret = 0;
goto out;
}
- ret = dict_get_str(dict, "command", &command);
+ ret = dict_get_str_sizen(dict, "command", &command);
if (ret) {
gf_log("", GF_LOG_ERROR, "Unable to get command from dict");
goto out;
@@ -5216,8 +5028,8 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
for (i = 1; i <= output_count; i++) {
- snprintf(output_name, sizeof(output_name), "output_%d", i);
- ret = dict_get_str(dict, output_name, &output);
+ ret = snprintf(output_name, sizeof(output_name), "output_%d", i);
+ ret = dict_get_strn(dict, output_name, ret, &output);
if (ret) {
gf_log("", GF_LOG_ERROR, "Failed to get %s.", output_name);
cli_out("Unable to fetch output.");
@@ -5237,7 +5049,7 @@ out:
return ret;
}
-int
+static int
gf_cli_copy_file_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5256,7 +5068,7 @@ gf_cli_copy_file_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5288,7 +5100,7 @@ out:
return ret;
}
-int
+static int
gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5312,7 +5124,7 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5332,7 +5144,7 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_gsync(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -5343,11 +5155,11 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- ret = dict_get_str(dict, "gsync-status", &gsync_status);
+ ret = dict_get_str_sizen(dict, "gsync-status", &gsync_status);
if (!ret)
cli_out("%s", gsync_status);
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
"failed to get type");
@@ -5357,29 +5169,25 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
switch (type) {
case GF_GSYNC_OPTION_TYPE_START:
case GF_GSYNC_OPTION_TYPE_STOP:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
cli_out(
- "%s " GEOREP
- " session between %s & %s"
- " has been successful",
+ "%s " GEOREP " session between %s & %s has been successful",
type == GF_GSYNC_OPTION_TYPE_START ? "Starting" : "Stopping",
master, slave);
break;
case GF_GSYNC_OPTION_TYPE_PAUSE:
case GF_GSYNC_OPTION_TYPE_RESUME:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
- cli_out("%s " GEOREP
- " session between %s & %s"
- " has been successful",
+ cli_out("%s " GEOREP " session between %s & %s has been successful",
type == GF_GSYNC_OPTION_TYPE_PAUSE ? "Pausing" : "Resuming",
master, slave);
break;
@@ -5395,24 +5203,22 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
break;
case GF_GSYNC_OPTION_TYPE_DELETE:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
cli_out("Deleting " GEOREP
- " session between %s & %s"
- " has been successful",
+ " session between %s & %s has been successful",
master, slave);
break;
case GF_GSYNC_OPTION_TYPE_CREATE:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
cli_out("Creating " GEOREP
- " session between %s & %s"
- " has been successful",
+ " session between %s & %s has been successful",
master, slave);
break;
@@ -5428,7 +5234,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_sys_exec(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -5437,23 +5243,22 @@ gf_cli_sys_exec(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this || !data) {
- ret = -1;
- gf_log("cli", GF_LOG_ERROR, "Invalid data");
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_sys_exec_cbk,
(xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_SYS_EXEC,
this, cli_rpc_prog, NULL);
-out:
+ if (ret)
+ if (!frame || !this || !data) {
+ ret = -1;
+ gf_log("cli", GF_LOG_ERROR, "Invalid data");
+ }
+
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_copy_file(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -5462,23 +5267,22 @@ gf_cli_copy_file(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this || !data) {
- ret = -1;
- gf_log("cli", GF_LOG_ERROR, "Invalid data");
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_copy_file_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_COPY_FILE, this, cli_rpc_prog, NULL);
-out:
+ if (ret)
+ if (!frame || !this || !data) {
+ ret = -1;
+ gf_log("cli", GF_LOG_ERROR, "Invalid data");
+ }
+
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_gsync_set(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -5487,18 +5291,11 @@ gf_cli_gsync_set(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_gsync_set_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_GSYNC_SET, this, cli_rpc_prog, NULL);
-
-out:
GF_FREE(req.dict.dict_val);
return ret;
@@ -5509,20 +5306,18 @@ cli_profile_info_percentage_cmp(void *a, void *b)
{
cli_profile_info_t *ia = NULL;
cli_profile_info_t *ib = NULL;
- int ret = 0;
ia = a;
ib = b;
if (ia->percentage_avg_latency < ib->percentage_avg_latency)
- ret = -1;
+ return -1;
else if (ia->percentage_avg_latency > ib->percentage_avg_latency)
- ret = 1;
- else
- ret = 0;
- return ret;
+ return 1;
+
+ return 0;
}
-void
+static void
cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
{
char key[256] = {0};
@@ -5546,7 +5341,8 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
double total_percentage_latency = 0;
for (i = 0; i < 32; i++) {
- snprintf(key, sizeof(key), "%d-%d-read-%d", count, interval, (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-read-%" PRIu32, count, interval,
+ (1U << i));
ret = dict_get_uint64(dict, key, &rb_counts[i]);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key);
@@ -5554,7 +5350,8 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
}
for (i = 0; i < 32; i++) {
- snprintf(key, sizeof(key), "%d-%d-write-%d", count, interval, (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-write-%" PRIu32, count, interval,
+ (1U << i));
ret = dict_get_uint64(dict, key, &wb_counts[i]);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key);
@@ -5640,7 +5437,8 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
if ((rb_counts[i] == 0) && (wb_counts[i] == 0))
continue;
per_line++;
- snprintf(output + index, sizeof(output) - index, "%19db+ ", (1 << i));
+ snprintf(output + index, sizeof(output) - index, "%19" PRIu32 "b+ ",
+ (1U << i));
if (rb_counts[i]) {
snprintf(read_blocks + index, sizeof(read_blocks) - index,
"%21" PRId64 " ", rb_counts[i]);
@@ -5717,7 +5515,7 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
cli_out(" ");
}
-int32_t
+static int32_t
gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5727,7 +5525,8 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
dict_t *dict = NULL;
gf1_cli_stats_op op = GF_CLI_STATS_NONE;
- char key[256] = {0};
+ char key[64] = {0};
+ int len;
int interval = 0;
int i = 1;
int32_t brick_count = 0;
@@ -5749,7 +5548,7 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5763,7 +5562,7 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -5771,15 +5570,15 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_profile(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_int32_sizen(dict, "op", (int32_t *)&op);
if (ret)
goto out;
- ret = dict_get_int32(dict, "op", (int32_t *)&op);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
@@ -5814,11 +5613,7 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- ret = dict_get_int32(dict, "info-op", (int32_t *)&info_op);
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "count", &brick_count);
+ ret = dict_get_int32_sizen(dict, "count", &brick_count);
if (ret)
goto out;
@@ -5827,9 +5622,13 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
+ ret = dict_get_int32_sizen(dict, "info-op", (int32_t *)&info_op);
+ if (ret)
+ goto out;
+
while (i <= brick_count) {
- snprintf(key, sizeof(key), "%d-brick", i);
- ret = dict_get_str(dict, key, &brick);
+ len = snprintf(key, sizeof(key), "%d-brick", i);
+ ret = dict_get_strn(dict, key, len, &brick);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Couldn't get brick name");
goto out;
@@ -5838,28 +5637,28 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
if (ret)
- snprintf(str, sizeof(str), "NFS Server : %s", brick);
+ len = snprintf(str, sizeof(str), "NFS Server : %s", brick);
else
- snprintf(str, sizeof(str), "Brick: %s", brick);
+ len = snprintf(str, sizeof(str), "Brick: %s", brick);
cli_out("%s", str);
- memset(str, '-', strlen(str));
+ memset(str, '-', len);
cli_out("%s", str);
if (GF_CLI_INFO_CLEAR == info_op) {
- snprintf(key, sizeof(key), "%d-stats-cleared", i);
- ret = dict_get_int32(dict, key, &stats_cleared);
+ len = snprintf(key, sizeof(key), "%d-stats-cleared", i);
+ ret = dict_get_int32n(dict, key, len, &stats_cleared);
if (ret)
goto out;
cli_out(stats_cleared ? "Cleared stats."
: "Failed to clear stats.");
} else {
- snprintf(key, sizeof(key), "%d-cumulative", i);
- ret = dict_get_int32(dict, key, &interval);
+ len = snprintf(key, sizeof(key), "%d-cumulative", i);
+ ret = dict_get_int32n(dict, key, len, &interval);
if (ret == 0)
cmd_profile_volume_brick_out(dict, i, interval);
- snprintf(key, sizeof(key), "%d-interval", i);
- ret = dict_get_int32(dict, key, &interval);
+ len = snprintf(key, sizeof(key), "%d-interval", i);
+ ret = dict_get_int32n(dict, key, len, &interval);
if (ret == 0)
cmd_profile_volume_brick_out(dict, i, interval);
}
@@ -5875,7 +5674,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_profile_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -5888,22 +5687,18 @@ gf_cli_profile_volume(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(this);
GF_ASSERT(data);
- if (!frame || !this || !data)
- goto out;
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_profile_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5914,6 +5709,7 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
dict_t *dict = NULL;
gf1_cli_stats_op op = GF_CLI_STATS_NONE;
char key[256] = {0};
+ int keylen;
int i = 0;
int32_t brick_count = 0;
char brick[1024];
@@ -5929,7 +5725,7 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
double time = 0;
int32_t time_sec = 0;
long int time_usec = 0;
- char timestr[256] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char *openfd_str = NULL;
@@ -5947,7 +5743,7 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5969,11 +5765,11 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "op", (int32_t *)&op);
+ ret = dict_get_int32_sizen(dict, "op", (int32_t *)&op);
if (op != GF_CLI_STATS_TOP) {
ret = 0;
@@ -5984,16 +5780,16 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_top(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
}
- ret = dict_get_int32(dict, "count", &brick_count);
+ ret = dict_get_int32_sizen(dict, "count", &brick_count);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%d-top-op", 1);
- ret = dict_get_int32(dict, key, (int32_t *)&top_op);
+ keylen = snprintf(key, sizeof(key), "%d-top-op", 1);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&top_op);
if (ret)
goto out;
@@ -6001,16 +5797,16 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
while (i < brick_count) {
i++;
- snprintf(brick, sizeof(brick), "%d-brick", i);
- ret = dict_get_str(dict, brick, &bricks);
+ keylen = snprintf(brick, sizeof(brick), "%d-brick", i);
+ ret = dict_get_strn(dict, brick, keylen, &bricks);
if (ret)
goto out;
nfs = dict_get_str_boolean(dict, "nfs", _gf_false);
if (clear_stats) {
- snprintf(key, sizeof(key), "%d-stats-cleared", i);
- ret = dict_get_int32(dict, key, &stats_cleared);
+ keylen = snprintf(key, sizeof(key), "%d-stats-cleared", i);
+ ret = dict_get_int32n(dict, key, keylen, &stats_cleared);
if (ret)
goto out;
cli_out(stats_cleared ? "Cleared stats for %s %s"
@@ -6024,8 +5820,8 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
else
cli_out("Brick: %s", bricks);
- snprintf(key, sizeof(key), "%d-members", i);
- ret = dict_get_int32(dict, key, &members);
+ keylen = snprintf(key, sizeof(key), "%d-members", i);
+ ret = dict_get_int32n(dict, key, keylen, &members);
switch (top_op) {
case GF_CLI_TOP_OPEN:
@@ -6037,13 +5833,12 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_get_uint64(dict, key, &max_nr_open);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%d-max-openfd-time", i);
- ret = dict_get_str(dict, key, &openfd_str);
+ keylen = snprintf(key, sizeof(key), "%d-max-openfd-time", i);
+ ret = dict_get_strn(dict, key, keylen, &openfd_str);
if (ret)
goto out;
- cli_out("Current open fds: %" PRIu64
- ", Max open"
- " fds: %" PRIu64 ", Max openfd time: %s",
+ cli_out("Current open fds: %" PRIu64 ", Max open fds: %" PRIu64
+ ", Max openfd time: %s",
nr_open, max_nr_open, openfd_str);
case GF_CLI_TOP_READ:
case GF_CLI_TOP_WRITE:
@@ -6081,8 +5876,8 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
for (j = 1; j <= members; j++) {
- snprintf(key, sizeof(key), "%d-filename-%d", i, j);
- ret = dict_get_str(dict, key, &filename);
+ keylen = snprintf(key, sizeof(key), "%d-filename-%d", i, j);
+ ret = dict_get_strn(dict, key, keylen, &filename);
if (ret)
break;
snprintf(key, sizeof(key), "%d-value-%d", i, j);
@@ -6091,12 +5886,12 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
if (top_op == GF_CLI_TOP_READ_PERF ||
top_op == GF_CLI_TOP_WRITE_PERF) {
- snprintf(key, sizeof(key), "%d-time-sec-%d", i, j);
- ret = dict_get_int32(dict, key, (int32_t *)&time_sec);
+ keylen = snprintf(key, sizeof(key), "%d-time-sec-%d", i, j);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&time_sec);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%d-time-usec-%d", i, j);
- ret = dict_get_int32(dict, key, (int32_t *)&time_usec);
+ keylen = snprintf(key, sizeof(key), "%d-time-usec-%d", i, j);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&time_usec);
if (ret)
goto out;
gf_time_fmt(timestr, sizeof timestr, time_sec, gf_timefmt_FT);
@@ -6130,7 +5925,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_top_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -6143,21 +5938,17 @@ gf_cli_top_volume(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(this);
GF_ASSERT(data);
- if (!frame || !this || !data)
- goto out;
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_top_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int
+static int
gf_cli_getwd_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -6175,7 +5966,7 @@ gf_cli_getwd_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_getwd_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -6200,7 +5991,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_getwd(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -6219,12 +6010,12 @@ gf_cli_getwd(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf1_cli_getwd_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-void
+static void
cli_print_volume_status_mempool(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -6237,16 +6028,18 @@ cli_print_volume_status_mempool(dict_t *dict, char *prefix)
int32_t maxalloc = 0;
uint64_t pool_misses = 0;
int32_t maxstdalloc = 0;
- char key[1024] = {
+ char key[128] = {
+ /* prefix is really small 'brick%d' really */
0,
};
+ int keylen;
int i = 0;
GF_ASSERT(dict);
GF_ASSERT(prefix);
- snprintf(key, sizeof(key), "%s.mempool-count", prefix);
- ret = dict_get_int32(dict, key, &mempool_count);
+ keylen = snprintf(key, sizeof(key), "%s.mempool-count", prefix);
+ ret = dict_get_int32n(dict, key, keylen, &mempool_count);
if (ret)
goto out;
@@ -6259,18 +6052,18 @@ cli_print_volume_status_mempool(dict_t *dict, char *prefix)
"------------");
for (i = 0; i < mempool_count; i++) {
- snprintf(key, sizeof(key), "%s.pool%d.name", prefix, i);
- ret = dict_get_str(dict, key, &name);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.name", prefix, i);
+ ret = dict_get_strn(dict, key, keylen, &name);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.hotcount", prefix, i);
- ret = dict_get_int32(dict, key, &hotcount);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.hotcount", prefix, i);
+ ret = dict_get_int32n(dict, key, keylen, &hotcount);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.coldcount", prefix, i);
- ret = dict_get_int32(dict, key, &coldcount);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.coldcount", prefix, i);
+ ret = dict_get_int32n(dict, key, keylen, &coldcount);
if (ret)
goto out;
@@ -6284,13 +6077,14 @@ cli_print_volume_status_mempool(dict_t *dict, char *prefix)
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.max_alloc", prefix, i);
- ret = dict_get_int32(dict, key, &maxalloc);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.max_alloc", prefix, i);
+ ret = dict_get_int32n(dict, key, keylen, &maxalloc);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.max-stdalloc", prefix, i);
- ret = dict_get_int32(dict, key, &maxstdalloc);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.max-stdalloc", prefix,
+ i);
+ ret = dict_get_int32n(dict, key, keylen, &maxstdalloc);
if (ret)
goto out;
@@ -6309,7 +6103,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6317,7 +6111,7 @@ cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick)
char *hostname = NULL;
char *path = NULL;
int online = -1;
- char key[1024] = {
+ char key[64] = {
0,
};
int brick_index_max = -1;
@@ -6328,15 +6122,15 @@ cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Memory status for volume : %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -6441,14 +6235,14 @@ out:
return;
}
-void
+static void
cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
int client_count = 0;
int current_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -6463,12 +6257,12 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Client connections for volume %s", volname);
- ret = dict_get_int32(dict, "client-count", &client_count);
+ ret = dict_get_int32_sizen(dict, "client-count", &client_count);
if (ret)
goto out;
@@ -6482,7 +6276,7 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
if (!strncmp(name, "fuse", 4)) {
if (!is_fuse_done) {
is_fuse_done = _gf_true;
- ret = dict_get_int32(dict, "fuse-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "fuse-count", &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6492,7 +6286,7 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strncmp(name, "gfapi", 5)) {
if (!is_gfapi_done) {
is_gfapi_done = _gf_true;
- ret = dict_get_int32(dict, "gfapi-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "gfapi-count", &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6502,7 +6296,8 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "rebalance")) {
if (!is_rebalance_done) {
is_rebalance_done = _gf_true;
- ret = dict_get_int32(dict, "rebalance-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "rebalance-count",
+ &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6512,7 +6307,8 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "glustershd")) {
if (!is_glustershd_done) {
is_glustershd_done = _gf_true;
- ret = dict_get_int32(dict, "glustershd-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "glustershd-count",
+ &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6522,7 +6318,8 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "quotad")) {
if (!is_quotad_done) {
is_quotad_done = _gf_true;
- ret = dict_get_int32(dict, "quotad-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "quotad-count",
+ &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6532,7 +6329,7 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "snapd")) {
if (!is_snapd_done) {
is_snapd_done = _gf_true;
- ret = dict_get_int32(dict, "snapd-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "snapd-count", &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6551,7 +6348,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6567,7 +6364,7 @@ cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick)
uint64_t bytesread = 0;
uint64_t byteswrite = 0;
uint32_t opversion = 0;
- char key[1024] = {
+ char key[128] = {
0,
};
int i = 0;
@@ -6575,15 +6372,15 @@ cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Client connections for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -6656,7 +6453,8 @@ out:
return;
}
-void
+#ifdef DEBUG /* this function is only used in debug */
+static void
cli_print_volume_status_inode_entry(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -6725,8 +6523,9 @@ cli_print_volume_status_inode_entry(dict_t *dict, char *prefix)
out:
return;
}
+#endif
-void
+static void
cli_print_volume_status_itables(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -6812,7 +6611,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6824,7 +6623,7 @@ cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick)
char *path = NULL;
int online = -1;
int conn_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -6832,15 +6631,15 @@ cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Inode tables for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -6897,7 +6696,7 @@ void
cli_print_volume_status_fdtable(dict_t *dict, char *prefix)
{
int ret = -1;
- char key[1024] = {
+ char key[256] = {
0,
};
int refcount = 0;
@@ -6965,7 +6764,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6977,7 +6776,7 @@ cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick)
char *path = NULL;
int online = -1;
int conn_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -6985,15 +6784,15 @@ cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("FD tables for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -7046,7 +6845,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_call_frame(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -7110,11 +6909,11 @@ cli_print_volume_status_call_frame(dict_t *dict, char *prefix)
cli_out(" Unwind To = %s", unwind_to);
}
-void
+static void
cli_print_volume_status_call_stack(dict_t *dict, char *prefix)
{
int ret = -1;
- char key[1024] = {
+ char key[256] = {
0,
};
int uid = 0;
@@ -7125,7 +6924,7 @@ cli_print_volume_status_call_stack(dict_t *dict, char *prefix)
int count = 0;
int i = 0;
- if (!dict || !prefix)
+ if (!prefix)
return;
snprintf(key, sizeof(key), "%s.uid", prefix);
@@ -7177,7 +6976,7 @@ cli_print_volume_status_call_stack(dict_t *dict, char *prefix)
cli_out(" ");
}
-void
+static void
cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -7189,7 +6988,7 @@ cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick)
char *path = NULL;
int online = -1;
int call_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -7197,15 +6996,15 @@ cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Pending calls for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -7282,16 +7081,16 @@ cli_print_volume_status_tasks(dict_t *dict)
};
char *brick = NULL;
- ret = dict_get_str(dict, "volname", &volname);
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "tasks", &task_count);
+ ret = dict_get_int32_sizen(dict, "tasks", &task_count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get tasks count");
return;
}
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+ if (ret)
+ goto out;
+
cli_out("Task Status of Volume %s", volname);
cli_print_line(CLI_BRICK_STATUS_LINE_LEN);
@@ -7360,7 +7159,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
int pid = -1;
uint32_t cmd = 0;
gf_boolean_t notbrick = _gf_false;
- char key[1024] = {
+ char key[64] = {
0,
};
char *hostname = NULL;
@@ -7385,7 +7184,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -7407,8 +7206,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
else
snprintf(msg, sizeof(msg),
- "Unable to obtain volume "
- "status information.");
+ "Unable to obtain volume status information.");
if (global_state->mode & GLUSTER_MODE_XML) {
if (!local->all)
@@ -7455,32 +7253,25 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
- (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) ||
- (cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB))
- notbrick = _gf_true;
-
if (global_state->mode & GLUSTER_MODE_XML) {
if (!local->all) {
ret = cli_xml_output_vol_status_begin(local, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto xml_end;
}
}
if (cmd & GF_CLI_STATUS_TASKS) {
ret = cli_xml_output_vol_status_tasks_detail(local, dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Error outputting "
- "to xml");
+ gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
goto xml_end;
}
} else {
ret = cli_xml_output_vol_status(local, dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto xml_end;
}
}
@@ -7489,18 +7280,17 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (!local->all) {
ret = cli_xml_output_vol_status_end(local);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
}
goto out;
}
- status.brick = GF_MALLOC(PATH_MAX + 256, gf_common_mt_strdup);
- if (!status.brick) {
- errno = ENOMEM;
- ret = -1;
- goto out;
- }
+ if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
+ (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) ||
+ (cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB))
+ notbrick = _gf_true;
+
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
cli_print_volume_status_mem(dict, notbrick);
@@ -7534,25 +7324,25 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
break;
}
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
index_max = brick_index_max + other_count;
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret)
goto out;
- ret = dict_get_int32(dict, "hot_brick_count", &hot_brick_count);
+ ret = dict_get_int32_sizen(dict, "hot_brick_count", &hot_brick_count);
if (ret)
goto out;
@@ -7563,6 +7353,14 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
"Gluster process", "TCP Port", "RDMA Port", "Online", "Pid");
cli_print_line(CLI_BRICK_STATUS_LINE_LEN);
}
+
+ status.brick = GF_MALLOC(PATH_MAX + 256, gf_common_mt_strdup);
+ if (!status.brick) {
+ errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+
for (i = 0; i <= index_max; i++) {
status.rdma_port = 0;
@@ -7579,7 +7377,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
/* Brick/not-brick is handled separately here as all
* types of nodes are contained in the default output
*/
- memset(status.brick, 0, PATH_MAX + 255);
+ status.brick[0] = '\0';
if (!strcmp(hostname, "NFS Server") ||
!strcmp(hostname, "Self-heal Daemon") ||
!strcmp(hostname, "Quota Daemon") ||
@@ -7651,7 +7449,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_status_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -7660,21 +7458,17 @@ gf_cli_status_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = -1;
dict_t *dict = NULL;
- if (!frame || !this || !data)
- goto out;
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_status_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_STATUS_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int
+static int
gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
{
int i = 0;
@@ -7710,7 +7504,7 @@ gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
if (ret)
goto out;
- ret = dict_get_int32((dict_t *)vol_dict, "vol_count", &vol_count);
+ ret = dict_get_int32_sizen((dict_t *)vol_dict, "vol_count", &vol_count);
if (ret) {
cli_err("Failed to get names of volumes");
goto out;
@@ -7724,7 +7518,7 @@ gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
// TODO: Pass proper op_* values
ret = cli_xml_output_vol_status_begin(local, 0, 0, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto xml_end;
}
}
@@ -7740,12 +7534,12 @@ gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
if (!dict)
goto out;
- snprintf(key, sizeof(key), "vol%d", i);
- ret = dict_get_str(vol_dict, key, &volname);
+ ret = snprintf(key, sizeof(key), "vol%d", i);
+ ret = dict_get_strn(vol_dict, key, ret, &volname);
if (ret)
goto out;
- ret = dict_set_str(dict, "volname", volname);
+ ret = dict_set_str_sizen(dict, "volname", volname);
if (ret)
goto out;
@@ -7802,7 +7596,7 @@ gf_cli_mount_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_mount_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -7826,7 +7620,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_mount(call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_mount_req req = {
@@ -7857,7 +7651,7 @@ gf_cli_mount(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
@@ -7879,7 +7673,7 @@ gf_cli_umount_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_umount_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -7897,7 +7691,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_umount_req req = {
@@ -7911,9 +7705,9 @@ gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_str(dict, "path", &req.path);
+ ret = dict_get_str_sizen(dict, "path", &req.path);
if (ret == 0)
- ret = dict_get_int32(dict, "lazy", &req.lazy);
+ ret = dict_get_int32_sizen(dict, "lazy", &req.lazy);
if (ret) {
ret = -1;
@@ -7925,11 +7719,11 @@ gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf1_cli_umount_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-void
+static void
cmd_heal_volume_statistics_out(dict_t *dict, int brick)
{
uint64_t num_entries = 0;
@@ -8010,18 +7804,18 @@ out:
return;
}
-void
+static void
cmd_heal_volume_brick_out(dict_t *dict, int brick)
{
uint64_t num_entries = 0;
int ret = 0;
- char key[256] = {0};
+ char key[64] = {0};
char *hostname = NULL;
char *path = NULL;
char *status = NULL;
uint64_t i = 0;
uint32_t time = 0;
- char timestr[32] = {0};
+ char timestr[GF_TIMESTR_SIZE] = {0};
char *shd_status = NULL;
snprintf(key, sizeof key, "%d-hostname", brick);
@@ -8036,7 +7830,7 @@ cmd_heal_volume_brick_out(dict_t *dict, int brick)
snprintf(key, sizeof key, "%d-status", brick);
ret = dict_get_str(dict, key, &status);
- if (status && strlen(status))
+ if (status && status[0] != '\0')
cli_out("Status: %s", status);
snprintf(key, sizeof key, "%d-shd-status", brick);
@@ -8072,12 +7866,12 @@ out:
return;
}
-void
+static void
cmd_heal_volume_statistics_heal_count_out(dict_t *dict, int brick)
{
uint64_t num_entries = 0;
int ret = 0;
- char key[256] = {0};
+ char key[64] = {0};
char *hostname = NULL;
char *path = NULL;
char *status = NULL;
@@ -8114,18 +7908,16 @@ out:
return;
}
-int
+static int
gf_is_cli_heal_get_command(gf_xl_afr_op_t heal_op)
{
/* If the command is get command value is 1 otherwise 0, for
invalid commands -1 */
- int get_cmds[GF_SHD_OP_HEAL_DISABLE + 1] = {
+ static int get_cmds[GF_SHD_OP_HEAL_DISABLE + 1] = {
[GF_SHD_OP_INVALID] = -1,
[GF_SHD_OP_HEAL_INDEX] = 0,
[GF_SHD_OP_HEAL_FULL] = 0,
[GF_SHD_OP_INDEX_SUMMARY] = 1,
- [GF_SHD_OP_HEALED_FILES] = 1,
- [GF_SHD_OP_HEAL_FAILED_FILES] = 1,
[GF_SHD_OP_SPLIT_BRAIN_FILES] = 1,
[GF_SHD_OP_STATISTICS] = 1,
[GF_SHD_OP_STATISTICS_HEAL_COUNT] = 1,
@@ -8154,9 +7946,9 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
int brick_count = 0;
int i = 0;
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
- char *operation = NULL;
- char *substr = NULL;
- char *heal_op_str = NULL;
+ const char *operation = NULL;
+ const char *substr = NULL;
+ const char *heal_op_str = NULL;
GF_ASSERT(myframe);
@@ -8172,25 +7964,23 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_int32(local->dict, "heal-op", (int32_t *)&heal_op);
+ ret = dict_get_int32_sizen(local->dict, "heal-op", (int32_t *)&heal_op);
// TODO: Proper XML output
//#if (HAVE_LIB_XML)
// if (global_state->mode & GLUSTER_MODE_XML) {
// ret = cli_xml_output_dict ("volHeal", dict, rsp.op_ret,
// rsp.op_errno, rsp.op_errstr);
// if (ret)
- // gf_log ("cli", GF_LOG_ERROR,
- // "Error outputting to xml");
+ // gf_log ("cli", GF_LOG_ERROR, XML_ERROR);
// goto out;
// }
//#endif
- ret = dict_get_str(local->dict, "volname", &volname);
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname");
goto out;
@@ -8204,26 +7994,16 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
case GF_SHD_OP_HEAL_INDEX:
operation = "Launching heal operation ";
heal_op_str = "to perform index self heal";
- substr =
- "\nUse heal info commands to check"
- " status.";
+ substr = "\nUse heal info commands to check status.";
break;
case GF_SHD_OP_HEAL_FULL:
operation = "Launching heal operation ";
heal_op_str = "to perform full self heal";
- substr =
- "\nUse heal info commands to check"
- " status.";
+ substr = "\nUse heal info commands to check status.";
break;
case GF_SHD_OP_INDEX_SUMMARY:
heal_op_str = "list of entries to be healed";
break;
- case GF_SHD_OP_HEALED_FILES:
- heal_op_str = "list of healed entries";
- break;
- case GF_SHD_OP_HEAL_FAILED_FILES:
- heal_op_str = "list of heal failed entries";
- break;
case GF_SHD_OP_SPLIT_BRAIN_FILES:
heal_op_str = "list of split brain entries";
break;
@@ -8236,12 +8016,14 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
heal_op_str = "count of entries to be healed per replica";
break;
- /* The below 4 cases are never hit; they're coded only to make
- * compiler warnings go away.*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
case GF_SHD_OP_HEAL_SUMMARY:
+ case GF_SHD_OP_HEALED_FILES:
+ case GF_SHD_OP_HEAL_FAILED_FILES:
+ /* These cases are never hit; they're coded just to silence the
+ * compiler warnings.*/
break;
case GF_SHD_OP_INVALID:
@@ -8291,11 +8073,11 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "count", &brick_count);
+ ret = dict_get_int32_sizen(dict, "count", &brick_count);
if (ret)
goto out;
@@ -8316,8 +8098,6 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
cmd_heal_volume_statistics_heal_count_out(dict, i);
break;
case GF_SHD_OP_INDEX_SUMMARY:
- case GF_SHD_OP_HEALED_FILES:
- case GF_SHD_OP_HEAL_FAILED_FILES:
case GF_SHD_OP_SPLIT_BRAIN_FILES:
for (i = 0; i < brick_count; i++)
cmd_heal_volume_brick_out(dict, i);
@@ -8336,7 +8116,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_heal_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -8345,26 +8125,20 @@ gf_cli_heal_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_heal_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_HEAL_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -8372,9 +8146,7 @@ gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
0,
};
int ret = -1;
- char msg[1024] = {
- 0,
- };
+ char msg[1024] = "Volume statedump successful";
GF_ASSERT(myframe);
@@ -8384,20 +8156,18 @@ gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to statedump");
if (rsp.op_ret)
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
- else
- snprintf(msg, sizeof(msg), "Volume statedump successful");
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str("volStatedump", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -8413,7 +8183,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_statedump_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -8422,23 +8192,18 @@ gf_cli_statedump_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(
&req, frame, gf_cli_statedump_volume_cbk, (xdrproc_t)xdr_gf_cli_req,
options, GLUSTER_CLI_STATEDUMP_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -8463,7 +8228,7 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -8475,7 +8240,7 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -8483,14 +8248,14 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_list(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
if (rsp.op_ret)
cli_err("%s", rsp.op_errstr);
else {
- ret = dict_get_int32(dict, "count", &vol_count);
+ ret = dict_get_int32_sizen(dict, "count", &vol_count);
if (ret)
goto out;
@@ -8499,8 +8264,8 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
for (i = 0; i < vol_count; i++) {
- snprintf(key, sizeof(key), "volume%d", i);
- ret = dict_get_str(dict, key, &volname);
+ ret = snprintf(key, sizeof(key), "volume%d", i);
+ ret = dict_get_strn(dict, key, ret, &volname);
if (ret)
goto out;
cli_out("%s", volname);
@@ -8518,7 +8283,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_list_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -8526,19 +8291,14 @@ gf_cli_list_volume(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this)
- goto out;
-
ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_LIST_VOLUME, NULL, this,
gf_cli_list_volume_cbk, (xdrproc_t)xdr_gf_cli_req);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -8547,7 +8307,6 @@ gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
};
int ret = -1;
char *lk_summary = NULL;
- char *volname = NULL;
dict_t *dict = NULL;
GF_ASSERT(myframe);
@@ -8558,7 +8317,7 @@ gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to clear-locks");
@@ -8584,24 +8343,14 @@ gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Unable to serialize response dictionary");
- goto out;
- }
-
- ret = dict_get_str(dict, "volname", &volname);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Unable to get volname "
- "from dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
ret = dict_get_str(dict, "lk-summary", &lk_summary);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Unable to get lock "
- "summary from dictionary");
+ "Unable to get lock summary from dictionary");
goto out;
}
cli_out("Volume clear-locks successful");
@@ -8618,7 +8367,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_clearlocks_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -8627,22 +8376,18 @@ gf_cli_clearlocks_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(
&req, frame, gf_cli_clearlocks_volume_cbk, (xdrproc_t)xdr_gf_cli_req,
options, GLUSTER_CLI_CLRLOCKS_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
{
int32_t ret = -1;
@@ -8656,7 +8401,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
local = frame->local;
- ret = dict_get_int32(dict, "sub-cmd", &delete_cmd);
+ ret = dict_get_int32_sizen(dict, "sub-cmd", &delete_cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get sub-cmd");
goto end;
@@ -8668,8 +8413,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
rsp->op_errno, rsp->op_errstr);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to create "
- "xml output for delete");
+ "Failed to create xml output for delete");
goto end;
}
}
@@ -8705,8 +8449,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
ret = cli_xml_snapshot_delete(local, dict, rsp);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to create "
- "xml output for snapshot delete command");
+ "Failed to create xml output for snapshot delete command");
goto out;
}
/* Error out in case of the op already failed */
@@ -8715,7 +8458,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
goto out;
}
} else {
- ret = dict_get_str(dict, "snapname", &snap_name);
+ ret = dict_get_str_sizen(dict, "snapname", &snap_name);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get snapname");
goto out;
@@ -8734,7 +8477,7 @@ end:
return ret;
}
-int
+static int
cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
{
char buf[PATH_MAX] = "";
@@ -8760,26 +8503,19 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
goto out;
}
- ret = dict_get_int32(dict, "config-command", &config_command);
+ ret = dict_get_int32_sizen(dict, "config-command", &config_command);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch config type");
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
- /* Ignore the error, as volname is optional */
-
- if (!volname) {
- volname = "System";
- }
-
ret = dict_get_uint64(dict, "snap-max-hard-limit", &hard_limit);
/* Ignore the error, as the key specified is optional */
ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit);
- ret = dict_get_str(dict, "auto-delete", &auto_delete);
+ ret = dict_get_str_sizen(dict, "auto-delete", &auto_delete);
- ret = dict_get_str(dict, "snap-activate-on-create", &snap_activate);
+ ret = dict_get_str_sizen(dict, "snap-activate-on-create", &snap_activate);
if (!hard_limit && !soft_limit &&
config_command != GF_SNAP_CONFIG_DISPLAY && !auto_delete &&
@@ -8789,13 +8525,19 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
goto out;
}
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+ /* Ignore the error, as volname is optional */
+
+ if (!volname) {
+ volname = "System";
+ }
+
switch (config_command) {
case GF_SNAP_CONFIG_TYPE_SET:
if (hard_limit && soft_limit) {
cli_out(
"snapshot config: snap-max-hard-limit "
- "& snap-max-soft-limit for system set "
- "successfully");
+ "& snap-max-soft-limit for system set successfully");
} else if (hard_limit) {
cli_out(
"snapshot config: snap-max-hard-limit "
@@ -8807,13 +8549,9 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
"for %s set successfully",
volname);
} else if (auto_delete) {
- cli_out(
- "snapshot config: auto-delete "
- "successfully set");
+ cli_out("snapshot config: auto-delete successfully set");
} else if (snap_activate) {
- cli_out(
- "snapshot config: activate-on-create "
- "successfully set");
+ cli_out("snapshot config: activate-on-create successfully set");
}
break;
@@ -8822,9 +8560,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
ret = dict_get_uint64(dict, "snap-max-hard-limit", &value);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- "snap_max_hard_limit for %s",
- volname);
+ "Could not fetch snap_max_hard_limit for %s", volname);
ret = -1;
goto out;
}
@@ -8833,9 +8569,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- "snap-max-soft-limit for %s",
- volname);
+ "Could not fetch snap-max-soft-limit for %s", volname);
ret = -1;
goto out;
}
@@ -8855,13 +8589,11 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
}
for (i = 0; i < voldisplaycount; i++) {
- snprintf(buf, sizeof(buf), "volume%" PRIu64 "-volname", i);
- ret = dict_get_str(dict, buf, &volname);
+ ret = snprintf(buf, sizeof(buf), "volume%" PRIu64 "-volname",
+ i);
+ ret = dict_get_strn(dict, buf, ret, &volname);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- " %s",
- buf);
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf);
ret = -1;
goto out;
}
@@ -8871,10 +8603,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
"volume%" PRIu64 "-snap-max-hard-limit", i);
ret = dict_get_uint64(dict, buf, &value);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- " %s",
- buf);
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf);
ret = -1;
goto out;
}
@@ -8886,8 +8615,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
if (ret) {
gf_log("cli", GF_LOG_ERROR,
"Could not fetch"
- " effective snap_max_hard_limit for "
- "%s",
+ " effective snap_max_hard_limit for %s",
volname);
ret = -1;
goto out;
@@ -8898,10 +8626,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
"volume%" PRIu64 "-snap-max-soft-limit", i);
ret = dict_get_uint64(dict, buf, &value);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- " %s",
- buf);
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf);
ret = -1;
goto out;
}
@@ -8926,7 +8651,7 @@ out:
* arg - 0, dict : Response Dictionary.
* arg - 1, prefix str : snaplist.snap{0..}.vol{0..}.*
*/
-int
+static int
cli_get_each_volinfo_in_snap(dict_t *dict, char *keyprefix,
gf_boolean_t snap_driven)
{
@@ -9013,7 +8738,7 @@ out:
* arg - 0, dict : Response dictionary.
* arg - 1, prefix_str : snaplist.snap{0..}.*
*/
-int
+static int
cli_get_volinfo_in_snap(dict_t *dict, char *keyprefix)
{
char key[PATH_MAX] = "";
@@ -9038,8 +8763,7 @@ cli_get_volinfo_in_snap(dict_t *dict, char *keyprefix)
ret = cli_get_each_volinfo_in_snap(dict, key, _gf_true);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not list "
- "details of volume in a snap");
+ "Could not list details of volume in a snap");
goto out;
}
cli_out(" ");
@@ -9049,7 +8773,7 @@ out:
return ret;
}
-int
+static int
cli_get_each_snap_info(dict_t *dict, char *prefix_str, gf_boolean_t snap_driven)
{
char key_buffer[PATH_MAX] = "";
@@ -9116,9 +8840,7 @@ cli_get_each_snap_info(dict_t *dict, char *prefix_str, gf_boolean_t snap_driven)
cli_out("%-12s", "Snap Volumes:\n");
ret = cli_get_volinfo_in_snap(dict, prefix_str);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Unable to list details "
- "of the snaps");
+ gf_log("cli", GF_LOG_ERROR, "Unable to list details of the snaps");
goto out;
}
}
@@ -9129,17 +8851,17 @@ out:
/* This is a generic function to print snap related information.
* arg - 0, dict : Response Dictionary
*/
-int
+static int
cli_call_snapshot_info(dict_t *dict, gf_boolean_t bool_snap_driven)
{
int snap_count = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
int ret = -1;
int i = 0;
GF_ASSERT(dict);
- ret = dict_get_int32(dict, "snapcount", &snap_count);
+ ret = dict_get_int32_sizen(dict, "snapcount", &snap_count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Unable to get snapcount");
goto out;
@@ -9164,33 +8886,33 @@ out:
return ret;
}
-int
+static int
cli_get_snaps_in_volume(dict_t *dict)
{
int ret = -1;
int i = 0;
int count = 0;
int avail = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
char *get_buffer = NULL;
GF_ASSERT(dict);
- ret = dict_get_str(dict, "origin-volname", &get_buffer);
+ ret = dict_get_str_sizen(dict, "origin-volname", &get_buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch origin-volname");
goto out;
}
cli_out(INDENT_MAIN_HEAD "%s", "Volume Name", ":", get_buffer);
- ret = dict_get_int32(dict, "snapcount", &avail);
+ ret = dict_get_int32_sizen(dict, "snapcount", &avail);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch snapcount");
goto out;
}
cli_out(INDENT_MAIN_HEAD "%d", "Snaps Taken", ":", avail);
- ret = dict_get_int32(dict, "snaps-available", &count);
+ ret = dict_get_int32_sizen(dict, "snaps-available", &count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch snaps-available");
goto out;
@@ -9212,8 +8934,7 @@ cli_get_snaps_in_volume(dict_t *dict)
ret = cli_get_each_volinfo_in_snap(dict, key, _gf_false);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not get volume "
- "related information");
+ "Could not get volume related information");
goto out;
}
@@ -9223,18 +8944,18 @@ out:
return ret;
}
-int
+static int
cli_snapshot_list(dict_t *dict)
{
int snapcount = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
int ret = -1;
int i = 0;
char *get_buffer = NULL;
GF_ASSERT(dict);
- ret = dict_get_int32(dict, "snapcount", &snapcount);
+ ret = dict_get_int32_sizen(dict, "snapcount", &snapcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch snap count");
goto out;
@@ -9250,7 +8971,7 @@ cli_snapshot_list(dict_t *dict)
goto out;
}
- ret = dict_get_str(dict, key, &get_buffer);
+ ret = dict_get_strn(dict, key, ret, &get_buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get %s ", key);
goto out;
@@ -9262,7 +8983,7 @@ out:
return ret;
}
-int
+static int
cli_get_snap_volume_status(dict_t *dict, char *key_prefix)
{
int ret = -1;
@@ -9363,11 +9084,11 @@ out:
return ret;
}
-int
+static int
cli_get_single_snap_status(dict_t *dict, char *keyprefix)
{
int ret = -1;
- char key[PATH_MAX] = "";
+ char key[64] = ""; /* keyprefix is ""status.snap0" */
int i = 0;
int volcount = 0;
char *get_buffer = NULL;
@@ -9426,7 +9147,7 @@ out:
return ret;
}
-int32_t
+static int32_t
cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index)
{
int32_t ret = -1;
@@ -9436,11 +9157,10 @@ cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index)
GF_ASSERT(snap_dict);
GF_ASSERT(dict);
- ret = dict_set_int32(snap_dict, "sub-cmd", GF_SNAP_DELETE_TYPE_ITER);
+ ret = dict_set_int32_sizen(snap_dict, "sub-cmd", GF_SNAP_DELETE_TYPE_ITER);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not save command "
- "type in snap dictionary");
+ "Could not save command type in snap dictionary");
goto out;
}
@@ -9461,7 +9181,7 @@ cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index)
goto out;
}
- ret = dict_set_int32(snap_dict, "type", GF_SNAP_OPTION_TYPE_DELETE);
+ ret = dict_set_int32_sizen(snap_dict, "type", GF_SNAP_OPTION_TYPE_DELETE);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to save command type");
goto out;
@@ -9476,7 +9196,7 @@ out:
return ret;
}
-int
+static int
cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
{
int ret = -1;
@@ -9488,9 +9208,7 @@ cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
ret = dict_set_uint32(snap_dict, "sub-cmd", GF_SNAP_STATUS_TYPE_ITER);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not save command "
- "type in snap dict");
+ gf_log("cli", GF_LOG_ERROR, "Could not save command type in snap dict");
goto out;
}
@@ -9499,21 +9217,19 @@ cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
goto out;
}
- ret = dict_get_str(dict, key, &buffer);
+ ret = dict_get_strn(dict, key, ret, &buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get snapname");
goto out;
}
- ret = dict_set_str(snap_dict, "snapname", buffer);
+ ret = dict_set_str_sizen(snap_dict, "snapname", buffer);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not save snapname "
- "in snap dict");
+ gf_log("cli", GF_LOG_ERROR, "Could not save snapname in snap dict");
goto out;
}
- ret = dict_set_int32(snap_dict, "type", GF_SNAP_OPTION_TYPE_STATUS);
+ ret = dict_set_int32_sizen(snap_dict, "type", GF_SNAP_OPTION_TYPE_STATUS);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not save command type");
goto out;
@@ -9525,7 +9241,7 @@ cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
goto out;
}
- ret = dict_set_int32(snap_dict, "hold_vol_locks", _gf_false);
+ ret = dict_set_int32_sizen(snap_dict, "hold_vol_locks", _gf_false);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Setting volume lock flag failed");
goto out;
@@ -9535,10 +9251,9 @@ out:
return ret;
}
-int
+static int
cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
{
- char key[PATH_MAX] = "";
int ret = -1;
int status_cmd = -1;
cli_local_t *local = NULL;
@@ -9559,8 +9274,7 @@ cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
rsp->op_errstr);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to set "
- "op_errstr in local dictionary");
+ "Failed to set op_errstr in local dictionary");
goto out;
}
}
@@ -9568,7 +9282,7 @@ cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
goto out;
}
- ret = dict_get_int32(dict, "sub-cmd", &status_cmd);
+ ret = dict_get_int32_sizen(dict, "sub-cmd", &status_cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch status type");
goto out;
@@ -9580,35 +9294,24 @@ cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
goto out;
}
- ret = snprintf(key, sizeof(key), "status.snap0");
- if (ret < 0) {
- goto out;
- }
-
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_snapshot_status_single_snap(local, dict, key);
+ ret = cli_xml_snapshot_status_single_snap(local, dict, "status.snap0");
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to create "
- "xml output for snapshot status");
- goto out;
+ "Failed to create xml output for snapshot status");
}
} else {
- ret = cli_get_single_snap_status(dict, key);
+ ret = cli_get_single_snap_status(dict, "status.snap0");
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- "status of snap");
- goto out;
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch status of snap");
}
}
- ret = 0;
out:
return ret;
}
-int
+static int
gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
char *snap_name, char *volname, char *snap_uuid,
char *clone_name)
@@ -9658,8 +9361,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_CREATED,
- "snapshot_name=%s;"
- "volume_name=%s;snapshot_uuid=%s",
+ "snapshot_name=%s;volume_name=%s;snapshot_uuid=%s",
snap_name, volname, snap_uuid);
ret = 0;
@@ -9686,9 +9388,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_ACTIVATED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s",
- snap_name, snap_uuid);
+ "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid);
ret = 0;
break;
@@ -9714,9 +9414,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_DEACTIVATED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s",
- snap_name, snap_uuid);
+ "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid);
ret = 0;
break;
@@ -9749,15 +9447,14 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_RESTORED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s;volume_name=%s",
+ "snapshot_name=%s;snapshot_uuid=%s;volume_name=%s",
snap_name, snap_uuid, volname);
ret = 0;
break;
case GF_SNAP_OPTION_TYPE_DELETE:
- ret = dict_get_int32(dict, "sub-cmd", &delete_cmd);
+ ret = dict_get_int32_sizen(dict, "sub-cmd", &delete_cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get sub-cmd");
goto out;
@@ -9793,9 +9490,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_DELETED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s",
- snap_name, snap_uuid);
+ "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid);
ret = 0;
break;
@@ -9813,9 +9508,8 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
if (rsp->op_ret != 0) {
gf_event(EVENT_SNAPSHOT_CLONE_FAILED,
- "snapshot_name=%s;clone_name=%s;"
- "error=%s",
- snap_name, clone_name,
+ "snapshot_name=%s;clone_name=%s;error=%s", snap_name,
+ clone_name,
rsp->op_errstr ? rsp->op_errstr
: "Please check log file for details");
ret = 0;
@@ -9829,9 +9523,8 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_CLONED,
- "snapshot_name=%s;"
- "clone_name=%s;clone_uuid=%s",
- snap_name, clone_name, snap_uuid);
+ "snapshot_name=%s;clone_name=%s;clone_uuid=%s", snap_name,
+ clone_name, snap_uuid);
ret = 0;
break;
@@ -9845,7 +9538,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
break;
}
- ret = dict_get_int32(dict, "config-command", &config_command);
+ ret = dict_get_int32_sizen(dict, "config-command", &config_command);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch config type");
goto out;
@@ -9859,8 +9552,9 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
/* These are optional parameters therefore ignore the error */
ret = dict_get_uint64(dict, "snap-max-hard-limit", &hard_limit);
ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit);
- ret = dict_get_str(dict, "auto-delete", &auto_delete);
- ret = dict_get_str(dict, "snap-activate-on-create", &snap_activate);
+ ret = dict_get_str_sizen(dict, "auto-delete", &auto_delete);
+ ret = dict_get_str_sizen(dict, "snap-activate-on-create",
+ &snap_activate);
if (!hard_limit && !soft_limit && !auto_delete && !snap_activate) {
ret = -1;
@@ -9872,9 +9566,6 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
goto out;
}
- volname = NULL;
- ret = dict_get_str(dict, "volname", &volname);
-
if (hard_limit || soft_limit) {
snprintf(option, sizeof(option), "%s=%" PRIu64,
hard_limit ? "hard_limit" : "soft_limit",
@@ -9885,6 +9576,9 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
auto_delete ? auto_delete : snap_activate);
}
+ volname = NULL;
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+
snprintf(msg, sizeof(msg), "config_type=%s;%s",
volname ? "volume_config" : "system_config", option);
@@ -9908,7 +9602,7 @@ out:
* Fetch necessary data from dict at one place instead of *
* repeating the same code again and again. *
*/
-int
+static int
gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name,
char **volname, char **snap_uuid,
int8_t *soft_limit_flag, char **clone_name)
@@ -9918,21 +9612,21 @@ gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name,
GF_VALIDATE_OR_GOTO("cli", dict, out);
if (snap_name) {
- ret = dict_get_str(dict, "snapname", snap_name);
+ ret = dict_get_str_sizen(dict, "snapname", snap_name);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get snapname from dict");
}
}
if (volname) {
- ret = dict_get_str(dict, "volname1", volname);
+ ret = dict_get_str_sizen(dict, "volname1", volname);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get volname1 from dict");
}
}
if (snap_uuid) {
- ret = dict_get_str(dict, "snapuuid", snap_uuid);
+ ret = dict_get_str_sizen(dict, "snapuuid", snap_uuid);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get snapuuid from dict");
}
@@ -9947,7 +9641,7 @@ gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name,
}
if (clone_name) {
- ret = dict_get_str(dict, "clonename", clone_name);
+ ret = dict_get_str_sizen(dict, "clonename", clone_name);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get clonename from dict");
}
@@ -9958,7 +9652,7 @@ out:
return ret;
}
-int
+static int
gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -9986,8 +9680,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -10003,7 +9696,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (ret)
goto out;
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "failed to get type");
goto out;
@@ -10032,7 +9725,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_snapshot(type, dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
@@ -10060,10 +9753,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "snapshot create: success: Snap %s created "
- "successfully",
- snap_name);
+ cli_out("snapshot create: success: Snap %s created successfully",
+ snap_name);
if (soft_limit_flag == 1) {
cli_out(
@@ -10094,10 +9785,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "snapshot clone: success: Clone %s created "
- "successfully",
- clone_name);
+ cli_out("snapshot clone: success: Clone %s created successfully",
+ clone_name);
ret = 0;
break;
@@ -10116,10 +9805,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "Snapshot restore: %s: Snap restored "
- "successfully",
- snap_name);
+ cli_out("Snapshot restore: %s: Snap restored successfully",
+ snap_name);
ret = 0;
break;
@@ -10137,10 +9824,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "Snapshot activate: %s: Snap activated "
- "successfully",
- snap_name);
+ cli_out("Snapshot activate: %s: Snap activated successfully",
+ snap_name);
ret = 0;
break;
@@ -10159,10 +9844,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "Snapshot deactivate: %s: Snap deactivated "
- "successfully",
- snap_name);
+ cli_out("Snapshot deactivate: %s: Snap deactivated successfully",
+ snap_name);
ret = 0;
break;
@@ -10195,8 +9878,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_snapshot_config_display(dict, &rsp);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to display "
- "snapshot config output.");
+ "Failed to display snapshot config output.");
goto out;
}
break;
@@ -10212,9 +9894,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_snapshot_list(dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to display "
- "snapshot list");
+ gf_log("cli", GF_LOG_ERROR, "Failed to display snapshot list");
goto out;
}
break;
@@ -10231,8 +9911,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_snapshot_status(dict, &rsp, frame);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to display "
- "snapshot status output.");
+ "Failed to display snapshot status output.");
goto out;
}
break;
@@ -10276,11 +9955,9 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
local = frame->local;
- ret = dict_get_int32(local->dict, "sub-cmd", &cmd);
+ ret = dict_get_int32_sizen(local->dict, "sub-cmd", &cmd);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to get "
- "sub-cmd");
+ gf_log("cli", GF_LOG_ERROR, "Failed to get sub-cmd");
goto out;
}
@@ -10290,11 +9967,9 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = dict_get_int32(local->dict, "snapcount", &snapcount);
+ ret = dict_get_int32_sizen(local->dict, "snapcount", &snapcount);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not get "
- "snapcount");
+ gf_log("cli", GF_LOG_ERROR, "Could not get snapcount");
goto out;
}
@@ -10304,8 +9979,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
local->writer, (xmlChar *)"snapCount", "%d", snapcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to write "
- "xml element \"snapCount\"");
+ "Failed to write xml element \"snapCount\"");
goto out;
}
#endif /* HAVE_LIB_XML */
@@ -10316,22 +9990,19 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
if (cmd == GF_SNAP_DELETE_TYPE_ALL) {
snprintf(question, sizeof(question),
- "System contains %d "
- "snapshot(s).\nDo you still "
+ "System contains %d snapshot(s).\nDo you still "
"want to continue and delete them? ",
snapcount);
} else {
- ret = dict_get_str(local->dict, "volname", &volname);
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to fetch "
- "volname from local dictionary");
+ "Failed to fetch volname from local dictionary");
goto out;
}
snprintf(question, sizeof(question),
- "Volume (%s) contains "
- "%d snapshot(s).\nDo you still want to "
+ "Volume (%s) contains %d snapshot(s).\nDo you still want to "
"continue and delete them? ",
volname, snapcount);
}
@@ -10340,8 +10011,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
if (GF_ANSWER_NO == answer) {
ret = 0;
gf_log("cli", GF_LOG_DEBUG,
- "User cancelled "
- "snapshot delete operation for snap delete");
+ "User cancelled snapshot delete operation for snap delete");
goto out;
}
@@ -10355,8 +10025,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
ret = cli_populate_req_dict_for_delete(snap_dict, local->dict, i);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not "
- "populate snap request dictionary");
+ "Could not populate snap request dictionary");
goto out;
}
@@ -10368,8 +10037,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
* snapshots is failed
*/
gf_log("cli", GF_LOG_ERROR,
- "cli_to_glusterd "
- "for snapshot delete failed");
+ "cli_to_glusterd for snapshot delete failed");
goto out;
}
dict_unref(snap_dict);
@@ -10384,7 +10052,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10404,7 +10072,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
local = frame->local;
- ret = dict_get_int32(local->dict, "sub-cmd", &cmd);
+ ret = dict_get_int32_sizen(local->dict, "sub-cmd", &cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get sub-cmd");
goto out;
@@ -10420,7 +10088,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = dict_get_int32(local->dict, "status.snapcount", &snapcount);
+ ret = dict_get_int32_sizen(local->dict, "status.snapcount", &snapcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get snapcount");
goto out;
@@ -10440,8 +10108,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
ret = cli_populate_req_dict_for_status(snap_dict, local->dict, i);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not "
- "populate snap request dictionary");
+ "Could not populate snap request dictionary");
goto out;
}
@@ -10478,7 +10145,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10491,24 +10158,18 @@ gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
char *err_str = NULL;
int type = -1;
- if (!frame || !this || !data)
- goto out;
-
- if (!frame->local)
+ if (!frame || !frame->local || !this || !data)
goto out;
local = frame->local;
options = data;
- ret = dict_get_int32(local->dict, "type", &type);
-
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_snapshot_begin_composite_op(local);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to begin "
- "snapshot xml composite op");
+ "Failed to begin snapshot xml composite op");
goto out;
}
}
@@ -10517,18 +10178,17 @@ gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf_cli_req, options, GLUSTER_CLI_SNAP,
this, cli_rpc_prog, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "cli_to_glusterd for "
- "snapshot failed");
+ gf_log("cli", GF_LOG_ERROR, "cli_to_glusterd for snapshot failed");
goto xmlend;
}
+ ret = dict_get_int32_sizen(local->dict, "type", &type);
+
if (GF_SNAP_OPTION_TYPE_STATUS == type) {
ret = gf_cli_snapshot_for_status(frame, this, data);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "cli to glusterd "
- "for snapshot status command failed");
+ "cli to glusterd for snapshot status command failed");
}
goto xmlend;
@@ -10538,8 +10198,7 @@ gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
ret = gf_cli_snapshot_for_delete(frame, this, data);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "cli to glusterd "
- "for snapshot delete command failed");
+ "cli to glusterd for snapshot delete command failed");
}
goto xmlend;
@@ -10552,25 +10211,23 @@ xmlend:
ret = cli_xml_snapshot_end_composite_op(local);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to end "
- "snapshot xml composite op");
+ "Failed to end snapshot xml composite op");
goto out;
}
}
out:
if (ret && local && GF_SNAP_OPTION_TYPE_STATUS == type) {
- tmp_ret = dict_get_str(local->dict, "op_err_str", &err_str);
+ tmp_ret = dict_get_str_sizen(local->dict, "op_err_str", &err_str);
if (tmp_ret || !err_str) {
cli_err("Snapshot Status : failed: %s",
- "Please "
- "check log file for details");
+ "Please check log file for details");
} else {
cli_err("Snapshot Status : failed: %s", err_str);
- dict_del(local->dict, "op_err_str");
+ dict_del_sizen(local->dict, "op_err_str");
}
}
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
@@ -10581,7 +10238,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_barrier_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -10598,7 +10255,7 @@ gf_cli_barrier_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to barrier");
@@ -10620,7 +10277,7 @@ out:
return ret;
}
-int
+static int
gf_cli_barrier_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10629,22 +10286,18 @@ gf_cli_barrier_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(&req, frame, gf_cli_barrier_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, options,
GLUSTER_CLI_BARRIER_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -10671,27 +10324,23 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to get volume option");
if (rsp.op_ret) {
if (strcmp(rsp.op_errstr, ""))
- snprintf(msg, sizeof(msg),
- "volume get option: "
- "failed: %s",
+ snprintf(msg, sizeof(msg), "volume get option: failed: %s",
rsp.op_errstr);
else
- snprintf(msg, sizeof(msg),
- "volume get option: "
- "failed");
+ snprintf(msg, sizeof(msg), "volume get option: failed");
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str("volGetopts", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
} else {
cli_err("%s", msg);
@@ -10708,7 +10357,7 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -10716,32 +10365,26 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_getopts(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "xml output generation "
- "failed");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
ret = 0;
}
goto out;
}
- ret = dict_get_str(dict, "warning", &value);
+ ret = dict_get_str_sizen(dict, "warning", &value);
if (!ret) {
cli_out("%s", value);
}
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to retrieve count "
- "from the dictionary");
+ "Failed to retrieve count from the dictionary");
goto out;
}
if (count <= 0) {
- gf_log("cli", GF_LOG_ERROR,
- "Value of count :%d is "
- "invalid",
- count);
+ gf_log("cli", GF_LOG_ERROR, "Value of count :%d is invalid", count);
ret = -1;
goto out;
}
@@ -10749,23 +10392,18 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
cli_out("%-40s%-40s", "Option", "Value");
cli_out("%-40s%-40s", "------", "-----");
for (i = 1; i <= count; i++) {
- snprintf(dict_key, sizeof dict_key, "key%d", i);
- ret = dict_get_str(dict, dict_key, &key);
+ ret = snprintf(dict_key, sizeof dict_key, "key%d", i);
+ ret = dict_get_strn(dict, dict_key, ret, &key);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to"
- " retrieve %s from the "
- "dictionary",
- dict_key);
+ "Failed to retrieve %s from the dictionary", dict_key);
goto out;
}
- snprintf(dict_key, sizeof dict_key, "value%d", i);
- ret = dict_get_str(dict, dict_key, &value);
+ ret = snprintf(dict_key, sizeof dict_key, "value%d", i);
+ ret = dict_get_strn(dict, dict_key, ret, &value);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to "
- "retrieve key value for %s from"
- "the dictionary",
+ "Failed to retrieve key value for %s from the dictionary",
dict_key);
goto out;
}
@@ -10787,7 +10425,7 @@ out_nolog:
return ret;
}
-int
+static int
gf_cli_get_vol_opt(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10796,22 +10434,18 @@ gf_cli_get_vol_opt(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(&req, frame, gf_cli_get_vol_opt_cbk,
(xdrproc_t)xdr_gf_cli_req, options,
GLUSTER_CLI_GET_VOL_OPT, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int
+static int
add_cli_cmd_timeout_to_dict(dict_t *dict)
{
int ret = 0;
@@ -10819,15 +10453,13 @@ add_cli_cmd_timeout_to_dict(dict_t *dict)
if (cli_default_conn_timeout > 120) {
ret = dict_set_uint32(dict, "timeout", cli_default_conn_timeout);
if (ret) {
- gf_log("cli", GF_LOG_INFO,
- "Failed to save"
- "timeout to dict");
+ gf_log("cli", GF_LOG_INFO, "Failed to save timeout to dict");
}
}
return ret;
}
-int
+static int
cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this,
rpc_clnt_prog_t *prog, struct iobref *iobref)
@@ -10839,12 +10471,7 @@ cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
const char **words = NULL;
cli_local_t *local = NULL;
- if (!this || !frame || !dict) {
- ret = -1;
- goto out;
- }
-
- if (!frame->local) {
+ if (!this || !frame || !frame->local || !dict) {
ret = -1;
goto out;
}
@@ -10861,12 +10488,12 @@ cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
while (words[i])
len += strlen(words[i++]) + 1;
- cmd = GF_CALLOC(1, len, gf_common_mt_char);
-
+ cmd = GF_MALLOC(len + 1, gf_common_mt_char);
if (!cmd) {
ret = -1;
goto out;
}
+ cmd[0] = '\0';
for (i = 0; words[i]; i++) {
strncat(cmd, words[i], len - 1);
@@ -10874,9 +10501,7 @@ cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
strncat(cmd, " ", len - 1);
}
- cmd[len - 1] = '\0';
-
- ret = dict_set_dynstr(dict, "cmd-str", cmd);
+ ret = dict_set_dynstr_sizen(dict, "cmd-str", cmd);
if (ret)
goto out;
@@ -10897,14 +10522,14 @@ out:
return ret;
}
-int
+static int
gf_cli_print_bitrot_scrub_status(dict_t *dict)
{
int i = 1;
int j = 0;
int ret = -1;
int count = 0;
- char key[256] = {
+ char key[64] = {
0,
};
char *volname = NULL;
@@ -10927,51 +10552,42 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
int8_t scrub_running = 0;
char *scrub_state_op = NULL;
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_int32_sizen(dict, "count", &count);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR,
+ "failed to get count value from dictionary");
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get volume name");
- ret = dict_get_str(dict, "features.scrub", &state_scrub);
+ ret = dict_get_str_sizen(dict, "features.scrub", &state_scrub);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get scrub state value");
- ret = dict_get_str(dict, "features.scrub-throttle", &scrub_impact);
+ ret = dict_get_str_sizen(dict, "features.scrub-throttle", &scrub_impact);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrub impact "
- "value");
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrub impact value");
- ret = dict_get_str(dict, "features.scrub-freq", &scrub_freq);
+ ret = dict_get_str_sizen(dict, "features.scrub-freq", &scrub_freq);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get scrub -freq value");
- ret = dict_get_str(dict, "bitrot_log_file", &bitrot_log_file);
+ ret = dict_get_str_sizen(dict, "bitrot_log_file", &bitrot_log_file);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get bitrot log file "
- "location");
+ gf_log("cli", GF_LOG_TRACE, "failed to get bitrot log file location");
- ret = dict_get_str(dict, "scrub_log_file", &scrub_log_file);
+ ret = dict_get_str_sizen(dict, "scrub_log_file", &scrub_log_file);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrubber log file "
- "location");
-
- ret = dict_get_int32(dict, "count", &count);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "count not get count value from"
- " dictionary");
- goto out;
- }
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrubber log file location");
for (i = 1; i <= count; i++) {
- snprintf(key, 256, "scrub-running-%d", i);
+ snprintf(key, sizeof(key), "scrub-running-%d", i);
ret = dict_get_int8(dict, key, &scrub_running);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrubbed "
- "files");
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrubbed files");
if (scrub_running)
break;
}
@@ -10998,56 +10614,41 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
node_name = NULL;
last_scrub = NULL;
scrub_time = 0;
- days = 0;
- hours = 0;
- minutes = 0;
- seconds = 0;
error_count = 0;
scrub_files = 0;
unsigned_files = 0;
- snprintf(key, 256, "node-name-%d", i);
+ snprintf(key, sizeof(key), "node-name-%d", i);
ret = dict_get_str(dict, key, &node_name);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get node-name");
- snprintf(key, 256, "scrubbed-files-%d", i);
+ snprintf(key, sizeof(key), "scrubbed-files-%d", i);
ret = dict_get_uint64(dict, key, &scrub_files);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrubbed "
- "files");
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrubbed files");
- snprintf(key, 256, "unsigned-files-%d", i);
+ snprintf(key, sizeof(key), "unsigned-files-%d", i);
ret = dict_get_uint64(dict, key, &unsigned_files);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get unsigned "
- "files");
+ gf_log("cli", GF_LOG_TRACE, "failed to get unsigned files");
- snprintf(key, 256, "scrub-duration-%d", i);
+ snprintf(key, sizeof(key), "scrub-duration-%d", i);
ret = dict_get_uint64(dict, key, &scrub_time);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get last scrub "
- "duration");
+ gf_log("cli", GF_LOG_TRACE, "failed to get last scrub duration");
- snprintf(key, 256, "last-scrub-time-%d", i);
+ snprintf(key, sizeof(key), "last-scrub-time-%d", i);
ret = dict_get_str(dict, key, &last_scrub);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get last scrub"
- " time");
- snprintf(key, 256, "error-count-%d", i);
+ gf_log("cli", GF_LOG_TRACE, "failed to get last scrub time");
+ snprintf(key, sizeof(key), "error-count-%d", i);
ret = dict_get_uint64(dict, key, &error_count);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get error "
- "count");
+ gf_log("cli", GF_LOG_TRACE, "failed to get error count");
cli_out("\n%s\n",
- "=========================================="
- "===============");
+ "=========================================================");
cli_out("%s: %s\n", "Node", node_name);
@@ -11076,7 +10677,7 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
cli_out("%s:\n", "Corrupted object's [GFID]");
/* Printing list of bad file's (Corrupted object's)*/
for (j = 0; j < error_count; j++) {
- snprintf(key, 256, "quarantine-%d-%d", j, i);
+ snprintf(key, sizeof(key), "quarantine-%d-%d", j, i);
ret = dict_get_str(dict, key, &bad_file_str);
if (!ret) {
cli_out("%s\n", bad_file_str);
@@ -11085,15 +10686,14 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
}
}
cli_out("%s\n",
- "=========================================="
- "===============");
+ "=========================================================");
out:
GF_FREE(scrub_state_op);
return 0;
}
-int
+static int
gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -11117,7 +10717,7 @@ gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -11146,81 +10746,67 @@ gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to unserialize "
- "req-buffer to dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
gf_log("cli", GF_LOG_DEBUG, "Received resp to bit rot command");
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get command type");
goto out;
}
+ if ((type == GF_BITROT_CMD_SCRUB_STATUS) &&
+ !(global_state->mode & GLUSTER_MODE_XML)) {
+ ret = gf_cli_print_bitrot_scrub_status(dict);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR, "Failed to print bitrot scrub status");
+ }
+ goto out;
+ }
+
/* Ignoring the error, as using dict val for cli output only */
- ret = dict_get_str(dict, "scrub-value", &scrub_cmd);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
- gf_log("cli", GF_LOG_TRACE, "Failed to get scrub command");
+ gf_log("cli", GF_LOG_TRACE, "failed to get volume name");
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "scrub-value", &scrub_cmd);
if (ret)
- gf_log("cli", GF_LOG_TRACE, "failed to get volume name");
+ gf_log("cli", GF_LOG_TRACE, "Failed to get scrub command");
- ret = dict_get_str(dict, "cmd-str", &cmd_str);
+ ret = dict_get_str_sizen(dict, "cmd-str", &cmd_str);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get command string");
if (cmd_str)
cmd_op = strrchr(cmd_str, ' ') + 1;
- if ((type == GF_BITROT_CMD_SCRUB_STATUS) &&
- !(global_state->mode & GLUSTER_MODE_XML)) {
- ret = gf_cli_print_bitrot_scrub_status(dict);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to print bitrot "
- "scrub status");
- }
- goto out;
- }
-
switch (type) {
case GF_BITROT_OPTION_TYPE_ENABLE:
- cli_out(
- "volume bitrot: success bitrot enabled "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: success bitrot enabled for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_OPTION_TYPE_DISABLE:
- cli_out(
- "volume bitrot: success bitrot disabled "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: success bitrot disabled for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_CMD_SCRUB_ONDEMAND:
- cli_out(
- "volume bitrot: scrubber started ondemand "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: scrubber started ondemand for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_OPTION_TYPE_SCRUB:
if (!strncmp("pause", scrub_cmd, sizeof("pause")))
- cli_out(
- "volume bitrot: scrubber paused "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: scrubber paused for volume %s",
+ volname);
if (!strncmp("resume", scrub_cmd, sizeof("resume")))
- cli_out(
- "volume bitrot: scrubber resumed "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: scrubber resumed for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_OPTION_TYPE_SCRUB_FREQ:
@@ -11244,7 +10830,7 @@ xml_output:
ret = cli_xml_output_vol_profile(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -11262,7 +10848,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_bitrot(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -11271,30 +10857,25 @@ gf_cli_bitrot(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(&req, frame, gf_cli_bitrot_cbk,
(xdrproc_t)xdr_gf_cli_req, options,
GLUSTER_CLI_BITROT, this, cli_rpc_prog, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "cli_to_glusterd for "
- "bitrot failed");
+ gf_log("cli", GF_LOG_ERROR, "cli_to_glusterd for bitrot failed");
goto out;
}
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
+static struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_NULL] = {"NULL", NULL},
[GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli_probe},
[GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe},
@@ -11342,6 +10923,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot},
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", gf_cli_get_state},
[GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", gf_cli_reset_brick},
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", gf_cli_ganesha},
};
struct rpc_clnt_program cli_prog = {
@@ -11352,7 +10934,7 @@ struct rpc_clnt_program cli_prog = {
.proctable = gluster_cli_actors,
};
-struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = {
+static struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = {
[GF_AGGREGATOR_NULL] = {"NULL", NULL},
[GF_AGGREGATOR_LOOKUP] = {"LOOKUP", NULL},
[GF_AGGREGATOR_GETLIMIT] = {"GETLIMIT", cli_quotad_getlimit},
diff --git a/cli/src/cli-xml-output.c b/cli/src/cli-xml-output.c
index 3accd9ce4bf..069de75801c 100644
--- a/cli/src/cli-xml-output.c
+++ b/cli/src/cli-xml-output.c
@@ -1661,15 +1661,15 @@ cli_xml_output_vol_top_rw_perf(xmlTextWriterPtr writer, dict_t *dict,
int ret = -1;
char *filename = NULL;
uint64_t throughput = 0;
- long int time_sec = 0;
- long int time_usec = 0;
- char timestr[256] = {
+ struct timeval tv = {
+ 0,
+ };
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char key[1024] = {
0,
};
- int len;
/* <file> */
ret = xmlTextWriterStartElement(writer, (xmlChar *)"file");
@@ -1692,19 +1692,16 @@ cli_xml_output_vol_top_rw_perf(xmlTextWriterPtr writer, dict_t *dict,
XML_RET_CHECK_AND_GOTO(ret, out);
snprintf(key, sizeof(key), "%d-time-sec-%d", brick_index, member_index);
- ret = dict_get_int32(dict, key, (int32_t *)&time_sec);
+ ret = dict_get_int32(dict, key, (int32_t *)&tv.tv_sec);
if (ret)
goto out;
snprintf(key, sizeof(key), "%d-time-usec-%d", brick_index, member_index);
- ret = dict_get_int32(dict, key, (int32_t *)&time_usec);
+ ret = dict_get_int32(dict, key, (int32_t *)&tv.tv_usec);
if (ret)
goto out;
- gf_time_fmt(timestr, sizeof timestr, time_sec, gf_timefmt_FT);
- len = strlen(timestr);
- snprintf(timestr + len, sizeof(timestr) - len, ".%" GF_PRI_SUSECONDS,
- time_usec);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar *)"time", "%s",
timestr);
XML_RET_CHECK_AND_GOTO(ret, out);
@@ -1953,11 +1950,11 @@ cli_xml_output_vol_profile_stats(xmlTextWriterPtr writer, dict_t *dict,
XML_RET_CHECK_AND_GOTO(ret, out);
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar *)"size",
- "%" PRIu32, (1 << i));
+ "%" PRIu32, (1U << i));
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%d-%d-read-%d", brick_index, interval,
- (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-read-%" PRIu32, brick_index, interval,
+ (1U << i));
ret = dict_get_uint64(dict, key, &read_count);
if (ret)
read_count = 0;
@@ -1965,8 +1962,8 @@ cli_xml_output_vol_profile_stats(xmlTextWriterPtr writer, dict_t *dict,
"%" PRIu64, read_count);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%d-%d-write-%d", brick_index, interval,
- (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-write-%" PRIu32, brick_index,
+ interval, (1U << i));
ret = dict_get_uint64(dict, key, &write_count);
if (ret)
write_count = 0;
@@ -2507,8 +2504,9 @@ cli_xml_output_vol_info(cli_local_t *local, dict_t *dict)
ret = dict_get_int32(dict, key, &dist_count);
if (ret)
goto out;
- ret = xmlTextWriterWriteFormatElement(
- local->writer, (xmlChar *)"distCount", "%d", dist_count);
+ ret = xmlTextWriterWriteFormatElement(local->writer,
+ (xmlChar *)"distCount", "%d",
+ (brick_count / dist_count));
XML_RET_CHECK_AND_GOTO(ret, out);
snprintf(key, sizeof(key), "volume%d.stripe_count", i);
@@ -2779,7 +2777,9 @@ cli_xml_output_peer_hostnames(xmlTextWriterPtr writer, dict_t *dict,
XML_RET_CHECK_AND_GOTO(ret, out);
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
+ ret = snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &hostname);
if (ret)
goto out;
@@ -3577,20 +3577,21 @@ cli_xml_output_vol_gsync_status(dict_t *dict, xmlTextWriterPtr writer)
char *volume_next = NULL;
char *slave = NULL;
char *slave_next = NULL;
- char *title_values[] = {"master_node", "", "master_brick", "slave_user",
- "slave", "slave_node", "status", "crawl_status",
- /* last_synced */
- "", "entry", "data", "meta", "failures",
- /* checkpoint_time */
- "", "checkpoint_completed",
- /* checkpoint_completion_time */
- "", "master_node_uuid",
- /* last_synced_utc */
- "last_synced",
- /* checkpoint_time_utc */
- "checkpoint_time",
- /* checkpoint_completion_time_utc */
- "checkpoint_completion_time"};
+ static const char *title_values[] = {
+ "master_node", "", "master_brick", "slave_user", "slave", "slave_node",
+ "status", "crawl_status",
+ /* last_synced */
+ "", "entry", "data", "meta", "failures",
+ /* checkpoint_time */
+ "", "checkpoint_completed",
+ /* checkpoint_completion_time */
+ "", "master_node_uuid",
+ /* last_synced_utc */
+ "last_synced",
+ /* checkpoint_time_utc */
+ "checkpoint_time",
+ /* checkpoint_completion_time_utc */
+ "checkpoint_completion_time"};
GF_ASSERT(dict);
@@ -4201,7 +4202,9 @@ cli_xml_snapshot_info_snap_vol(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = xmlTextWriterStartElement(writer, (xmlChar *)"snapVolume");
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.volname", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.volname", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4213,7 +4216,9 @@ cli_xml_snapshot_info_snap_vol(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.vol-status", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.vol-status", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4228,7 +4233,10 @@ cli_xml_snapshot_info_snap_vol(xmlTextWriterPtr writer, xmlDocPtr doc,
/* If the command is snap_driven then we need to show origin volume
* info. Else this is shown in the start of info display.*/
if (snap_driven) {
- snprintf(key, sizeof(key), "%s.", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.", keyprefix);
+ if (ret < 0)
+ goto out;
+
ret = cli_xml_snapshot_info_orig_vol(writer, doc, dict, key);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
@@ -4279,7 +4287,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = xmlTextWriterStartElement(writer, (xmlChar *)"snapshot");
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snapname", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snapname", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (ret) {
@@ -4291,7 +4301,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snap-id", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-id", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (ret) {
@@ -4303,7 +4315,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snap-desc", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-desc", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (!ret) {
@@ -4315,7 +4329,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
}
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snap-time", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-time", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (ret) {
@@ -4327,7 +4343,10 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.vol-count", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.vol-count", keyprefix);
+ if (ret < 0)
+ goto out;
+
ret = dict_get_int32(dict, key_buffer, &volcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Fail to get snap vol count");
@@ -4341,7 +4360,10 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = dict_get_int32(dict, key_buffer, &volcount);
/* Display info of each snapshot volume */
for (i = 1; i <= volcount; i++) {
- snprintf(key_buffer, sizeof(key_buffer), "%s.vol%d", keyprefix, i);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.vol%d", keyprefix,
+ i);
+ if (ret < 0)
+ goto out;
ret = cli_xml_snapshot_info_snap_vol(writer, doc, dict, key_buffer,
snap_driven);
@@ -4465,7 +4487,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
GF_ASSERT(dict);
GF_ASSERT(keyprefix);
- snprintf(key, sizeof(key), "%s.brickcount", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.brickcount", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_int32(dict, key, &brickcount);
if (ret) {
@@ -4483,7 +4507,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = xmlTextWriterStartElement(writer, (xmlChar *)"brick");
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.path", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.path", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4502,7 +4528,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.vgname", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.vgname", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4515,7 +4543,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.status", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.status", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4528,7 +4558,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.pid", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.pid", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_int32(dict, key, &pid);
if (ret) {
@@ -4541,7 +4573,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.data", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.data", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4554,7 +4588,10 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.lvsize", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.lvsize", keyprefix, i);
+ if (ret < 0)
+ goto out;
+
ret = dict_get_str(dict, key, &buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Unable to get LV Size");
diff --git a/cli/src/cli.c b/cli/src/cli.c
index fac32d3e9ca..a52b39c5fb8 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -33,12 +33,6 @@
#include <malloc.h>
#endif
-#ifdef HAVE_MALLOC_STATS
-#ifdef DEBUG
-#include <mcheck.h>
-#endif
-#endif
-
#include "cli.h"
#include "cli-quotad-client.h"
#include "cli-cmd.h"
@@ -61,7 +55,6 @@
#include "xdr-generic.h"
-extern int connected;
/* using argp for command line parsing */
const char *argp_program_version =
@@ -78,12 +71,16 @@ const char *argp_program_version =
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
struct rpc_clnt *global_quotad_rpc;
+
struct rpc_clnt *global_rpc;
rpc_clnt_prog_t *cli_rpc_prog;
extern struct rpc_clnt_program cli_prog;
+int cli_default_conn_timeout = 120;
+int cli_ten_minutes_timeout = 600;
+
static int
glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
{
@@ -306,14 +303,14 @@ cli_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
switch (event) {
case RPC_CLNT_CONNECT: {
- cli_cmd_broadcast_connected();
+ cli_cmd_broadcast_connected(_gf_true);
gf_log(this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
break;
}
case RPC_CLNT_DISCONNECT: {
+ cli_cmd_broadcast_connected(_gf_false);
gf_log(this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
- connected = 0;
if (!global_state->prompt && global_state->await_connected) {
ret = 1;
cli_out(
@@ -854,10 +851,6 @@ main(int argc, char *argv[])
if (ret)
goto out;
- ret = cli_cmd_cond_init();
- if (ret)
- goto out;
-
ret = cli_input_init(&state);
if (ret)
goto out;
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 7166991a7ff..c0d933e8f8a 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -17,6 +17,7 @@
#include <glusterfs/quota-common-utils.h>
#include "cli1-xdr.h"
+#include "gd-common-utils.h"
#if (HAVE_LIB_XML)
#include <libxml/encoding.h>
@@ -39,8 +40,8 @@ enum argp_option_keys {
ARGP_PORT_KEY = 'p',
};
-int cli_default_conn_timeout;
-int cli_ten_minutes_timeout;
+extern int cli_default_conn_timeout;
+extern int cli_ten_minutes_timeout;
typedef enum {
COLD_BRICK_COUNT,
@@ -188,6 +189,12 @@ typedef ssize_t (*cli_serialize_t)(struct iovec outmsg, void *args);
extern struct cli_state *global_state; /* use only in readline callback */
+extern struct rpc_clnt *global_quotad_rpc;
+
+extern struct rpc_clnt *global_rpc;
+
+extern rpc_clnt_prog_t *cli_rpc_prog;
+
typedef const char *(*cli_selector_t)(void *wcon);
char *
@@ -266,8 +273,8 @@ int32_t
cli_cmd_volume_reset_parse(const char **words, int wordcount, dict_t **opt);
int32_t
-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **opt,
- char **errstr);
+cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **opt, char **errstr);
int32_t
cli_cmd_quota_parse(const char **words, int wordcount, dict_t **opt);
@@ -283,6 +290,10 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
int32_t
+cli_cmd_ganesha_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **op_errstr);
+
+int32_t
cli_cmd_get_state_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
@@ -324,11 +335,14 @@ cli_local_get();
void
cli_local_wipe(cli_local_t *local);
+gf_boolean_t
+cli_cmd_connected();
+
int32_t
-cli_cmd_await_connected();
+cli_cmd_await_connected(unsigned timeout);
int32_t
-cli_cmd_broadcast_connected();
+cli_cmd_broadcast_connected(gf_boolean_t status);
int
cli_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
@@ -483,9 +497,6 @@ cli_xml_output_snapshot(int cmd_type, dict_t *dict, int op_ret, int op_errno,
int
cli_xml_snapshot_status_single_snap(cli_local_t *local, dict_t *dict,
char *key);
-char *
-is_server_debug_xlator(void *myframe);
-
int32_t
cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
struct cli_state *state);
diff --git a/configure.ac b/configure.ac
index bec743bf084..e2d6fd66cec 100644
--- a/configure.ac
+++ b/configure.ac
@@ -21,15 +21,13 @@ AM_INIT_AUTOMAKE([tar-pax foreign])
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES(yes)])
-if make --help 2>&1 | grep -q no-print-directory; then
- AM_MAKEFLAGS="$AM_MAKEFLAGS --no-print-directory";
-fi
-
AC_CONFIG_HEADERS([config.h site.h])
AC_CONFIG_FILES([Makefile
libglusterfs/Makefile
libglusterfs/src/Makefile
+ libglusterd/Makefile
+ libglusterd/src/Makefile
geo-replication/src/peer_gsec_create
geo-replication/src/peer_mountbroker
geo-replication/src/peer_mountbroker.py
@@ -46,11 +44,8 @@ AC_CONFIG_FILES([Makefile
rpc/rpc-transport/Makefile
rpc/rpc-transport/socket/Makefile
rpc/rpc-transport/socket/src/Makefile
- rpc/rpc-transport/rdma/Makefile
- rpc/rpc-transport/rdma/src/Makefile
rpc/xdr/Makefile
rpc/xdr/src/Makefile
- rpc/xdr/gen/Makefile
xlators/Makefile
xlators/meta/Makefile
xlators/meta/src/Makefile
@@ -169,6 +164,8 @@ AC_CONFIG_FILES([Makefile
xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile
xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile
xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile
+ xlators/features/metadisp/Makefile
+ xlators/features/metadisp/src/Makefile
xlators/playground/Makefile
xlators/playground/template/Makefile
xlators/playground/template/src/Makefile
@@ -196,6 +193,10 @@ AC_CONFIG_FILES([Makefile
extras/init.d/glustereventsd-Debian
extras/init.d/glustereventsd-Redhat
extras/init.d/glustereventsd-FreeBSD
+ extras/ganesha/Makefile
+ extras/ganesha/config/Makefile
+ extras/ganesha/scripts/Makefile
+ extras/ganesha/ocf/Makefile
extras/systemd/Makefile
extras/systemd/glusterd.service
extras/systemd/glustereventsd.service
@@ -274,36 +275,78 @@ AC_ARG_ENABLE([debug],
[Enable debug build options.]))
if test "x$enable_debug" = "xyes"; then
BUILD_DEBUG=yes
- GF_CFLAGS="${GF_CFLAGS} -g -rdynamic -O0 -DDEBUG"
+ GF_CFLAGS="${GF_CFLAGS} -g -O0 -DDEBUG"
else
BUILD_DEBUG=no
fi
+SANITIZER=none
+
AC_ARG_ENABLE([asan],
AC_HELP_STRING([--enable-asan],
[Enable Address Sanitizer support]))
if test "x$enable_asan" = "xyes"; then
- BUILD_ASAN=yes
- AC_CHECK_LIB([asan], [__asan_report_error], ,
- [AC_MSG_ERROR([libasan.so not found, this is required for --enable-asan])])
- GF_CFLAGS="${GF_CFLAGS} -O1 -g -fsanitize=address -fno-omit-frame-pointer"
- dnl -lasan always need to be the first library, otherwise libxml complains
- GF_LDFLAGS="-lasan ${GF_LDFLAGS}"
-else
- BUILD_ASAN=no
+ SANITIZER=asan
+ AC_CHECK_LIB([asan], [__asan_init], ,
+ [AC_MSG_ERROR([--enable-asan requires libasan.so, exiting])])
+ GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=address -fno-omit-frame-pointer"
+ GF_LDFLAGS="${GF_LDFLAGS} -lasan"
fi
-AC_ARG_ENABLE([atan],
+AC_ARG_ENABLE([tsan],
AC_HELP_STRING([--enable-tsan],
- [Enable ThreadSanitizer support]))
+ [Enable Thread Sanitizer support]))
if test "x$enable_tsan" = "xyes"; then
- BUILD_TSAN=yes
+ if test "x$SANITIZER" != "xnone"; then
+ AC_MSG_ERROR([only one sanitizer can be enabled at once])
+ fi
+ SANITIZER=tsan
AC_CHECK_LIB([tsan], [__tsan_init], ,
- [AC_MSG_ERROR([libtsan.so not found, this is required for --enable-tsan])])
- GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=thread"
+ [AC_MSG_ERROR([--enable-tsan requires libtsan.so, exiting])])
+ if test "x$ac_cv_lib_tsan___tsan_init" = xyes; then
+ AC_MSG_CHECKING([whether tsan API can be used])
+ saved_CFLAGS=${CFLAGS}
+ CFLAGS="${CFLAGS} -fsanitize=thread"
+ AC_COMPILE_IFELSE(
+ [AC_LANG_PROGRAM([
+ [#include <sanitizer/tsan_interface.h>]],
+ [[__tsan_create_fiber(0)]])],
+ [TSAN_API=yes], [TSAN_API=no])
+ AC_MSG_RESULT([$TSAN_API])
+ if test x$TSAN_API = "xyes"; then
+ AC_DEFINE(HAVE_TSAN_API, 1, [Define if tsan API can be used.])
+ fi
+ CFLAGS=${saved_CFLAGS}
+ fi
+ GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=thread -fno-omit-frame-pointer"
GF_LDFLAGS="${GF_LDFLAGS} -ltsan"
-else
- BUILD_TSAN=no
+fi
+
+AC_ARG_ENABLE([ubsan],
+ AC_HELP_STRING([--enable-ubsan],
+ [Enable Undefined Behavior Sanitizer support]))
+if test "x$enable_ubsan" = "xyes"; then
+ if test "x$SANITIZER" != "xnone"; then
+ AC_MSG_ERROR([only one sanitizer can be enabled at once])
+ fi
+ SANITIZER=ubsan
+ AC_CHECK_LIB([ubsan], [__ubsan_default_options], ,
+ [AC_MSG_ERROR([--enable-ubsan requires libubsan.so, exiting])])
+ GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=undefined -fno-omit-frame-pointer"
+ GF_LDFLAGS="${GF_LDFLAGS} -lubsan"
+fi
+
+# Initialize CFLAGS before usage
+BUILD_TCMALLOC=no
+AC_ARG_ENABLE([tcmalloc],
+ AC_HELP_STRING([--enable-tcmalloc],
+ [Enable linking with tcmalloc library.]))
+if test "x$enable_tcmalloc" = "xyes"; then
+ BUILD_TCMALLOC=yes
+ GF_CFLAGS="${GF_CFLAGS} -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free"
+ AC_CHECK_LIB([tcmalloc], [malloc], [],
+ [AC_MSG_ERROR([when --enable-tcmalloc is used, tcmalloc library needs to be present])])
+ GF_LDFLAGS="-ltcmalloc ${GF_LDFLAGS}"
fi
@@ -366,12 +409,27 @@ esac
# --enable-valgrind prevents calling dlclose(), this leaks memory
AC_ARG_ENABLE([valgrind],
- AC_HELP_STRING([--enable-valgrind],
- [Enable valgrind for resource leak debugging.]))
-if test "x$enable_valgrind" = "xyes"; then
- AC_DEFINE(RUN_WITH_VALGRIND, 1, [define if all processes should run under valgrind])
-fi
-
+ AC_HELP_STRING([--enable-valgrind@<:@=memcheck,drd@:>@],
+ [Enable valgrind for resource leak (memcheck, which is
+ the default) or thread synchronization (drd) debugging.]))
+case x$enable_valgrind in
+ xmemcheck|xyes)
+ AC_DEFINE(RUN_WITH_MEMCHECK, 1,
+ [Define if all processes should run under 'valgrind --tool=memcheck'.])
+ VALGRIND_TOOL=memcheck
+ ;;
+ xdrd)
+ AC_DEFINE(RUN_WITH_DRD, 1,
+ [Define if all processes should run under 'valgrind --tool=drd'.])
+ VALGRIND_TOOL=drd
+ ;;
+ x|xno)
+ VALGRIND_TOOL=no
+ ;;
+ *)
+ AC_MSG_ERROR([Please specify --enable-valgrind@<:@=memcheck,drd@:>@])
+ ;;
+esac
AC_ARG_WITH([previous-options],
[AS_HELP_STRING([--with-previous-options],
@@ -672,6 +730,14 @@ if test "x$enable_fuse_client" != "xno"; then
fi
AC_SUBST(FUSE_CLIENT_SUBDIR)
+
+AC_ARG_ENABLE([fuse-notifications],
+ AS_HELP_STRING([--disable-fuse-notifications], [Disable FUSE notifications]))
+
+AS_IF([test "x$enable_fuse_notifications" != "xno"], [
+ AC_DEFINE([HAVE_FUSE_NOTIFICATIONS], [1], [Use FUSE notifications])
+])
+
# end FUSE section
@@ -712,53 +778,6 @@ if test "x$enable_epoll" != "xno"; then
fi
# end EPOLL section
-
-# IBVERBS section
-AC_ARG_ENABLE([ibverbs],
- AC_HELP_STRING([--disable-ibverbs],
- [Do not build the ibverbs transport]))
-
-if test "x$enable_ibverbs" != "xno"; then
- AC_CHECK_LIB([ibverbs],
- [ibv_get_device_list],
- [HAVE_LIBIBVERBS="yes"],
- [HAVE_LIBIBVERBS="no"])
- AC_CHECK_LIB([rdmacm], [rdma_create_id], [HAVE_RDMACM="yes"], [HAVE_RDMACM="no"])
- if test "x$HAVE_RDMACM" = "xyes" ; then
- AC_CHECK_DECLS(
- [RDMA_OPTION_ID_REUSEADDR],
- [],
- [AC_ERROR([Need at least version 1.0.15 of librdmacm])],
- [[#include <rdma/rdma_cma.h>]])
- fi
-fi
-
-if test "x$enable_ibverbs" = "xyes"; then
- if test "x$HAVE_LIBIBVERBS" = "xno"; then
- echo "ibverbs-transport requested, but libibverbs is not present."
- exit 1
- fi
-
- if test "x$HAVE_RDMACM" = "xno"; then
- echo "ibverbs-transport requested, but librdmacm is not present."
- exit 1
- fi
-fi
-
-BUILD_RDMA=no
-BUILD_IBVERBS=no
-if test "x$enable_ibverbs" != "xno" -a "x$HAVE_LIBIBVERBS" = "xyes" -a "x$HAVE_RDMACM" = "xyes"; then
- IBVERBS_SUBDIR=ib-verbs
- BUILD_IBVERBS=yes
- RDMA_SUBDIR=rdma
- BUILD_RDMA=yes
-fi
-
-AC_SUBST(IBVERBS_SUBDIR)
-AC_SUBST(RDMA_SUBDIR)
-# end IBVERBS section
-
-
# SYNCDAEMON section
AC_ARG_ENABLE([georeplication],
AC_HELP_STRING([--disable-georeplication],
@@ -766,6 +785,9 @@ AC_ARG_ENABLE([georeplication],
BUILD_SYNCDAEMON=no
case $host_os in
+ freebsd*)
+#do nothing
+ ;;
linux*)
#do nothing
;;
@@ -805,6 +827,17 @@ fi
AC_SUBST(GEOREP_EXTRAS_SUBDIR)
AM_CONDITIONAL(USE_GEOREP, test "x$enable_georeplication" != "xno")
+# METADISP section
+AC_ARG_ENABLE([metadisp],
+ AC_HELP_STRING([--enable-metadisp],
+ [Enable the metadata dispersal xlator]))
+BUILD_METADISP=no
+if test "x${enable_metadisp}" = "xyes"; then
+ BUILD_METADISP=yes
+fi
+AM_CONDITIONAL([BUILD_METADISP], [test "x$BUILD_METADISP" = "xyes"])
+# end METADISP section
+
# Events section
AC_ARG_ENABLE([events],
AC_HELP_STRING([--disable-events],
@@ -943,6 +976,25 @@ if test "x${have_backtrace}" = "xyes"; then
fi
AC_SUBST(HAVE_BACKTRACE)
+dnl Old (before C11) compiler can compile (but not link) this:
+dnl
+dnl int main () {
+dnl _Static_assert(1, "True");
+dnl return 0;
+dnl }
+dnl
+dnl assuming that _Static_assert is an implicitly declared function. So
+dnl we're trying to link just to make sure that this is not the case.
+
+AC_MSG_CHECKING([whether $CC supports C11 _Static_assert])
+AC_TRY_LINK([], [_Static_assert(1, "True");],
+ [STATIC_ASSERT=yes], [STATIC_ASSERT=no])
+
+AC_MSG_RESULT([$STATIC_ASSERT])
+if test x$STATIC_ASSERT = "xyes"; then
+ AC_DEFINE(HAVE_STATIC_ASSERT, 1, [Define if C11 _Static_assert is supported.])
+fi
+
if test "x${have_backtrace}" != "xyes"; then
AC_TRY_COMPILE([#include <math.h>], [double x=0.0; x=ceil(0.0);],
[],
@@ -950,11 +1002,11 @@ AC_TRY_COMPILE([#include <math.h>], [double x=0.0; x=ceil(0.0);],
fi
dnl glusterfs prints memory usage to stderr by sending it SIGUSR1
-AC_CHECK_FUNC([malloc_stats], [have_malloc_stats=yes])
-if test "x${have_malloc_stats}" = "xyes"; then
- AC_DEFINE(HAVE_MALLOC_STATS, 1, [define if found malloc_stats])
+AC_CHECK_FUNC([mallinfo], [have_mallinfo=yes])
+if test "x${have_mallinfo}" = "xyes"; then
+ AC_DEFINE(HAVE_MALLINFO, 1, [define if found mallinfo])
fi
-AC_SUBST(HAVE_MALLOC_STATS)
+AC_SUBST(HAVE_MALLINFO)
dnl Linux, Solaris, Cygwin
AC_CHECK_MEMBERS([struct stat.st_atim.tv_nsec])
@@ -981,6 +1033,9 @@ case $host_os in
CFLAGS="${CFLAGS} -isystem /usr/local/include"
ARGP_LDADD=-largp
;;
+ *netbsd*)
+ ARGP_LDADD=-largp
+ ;;
esac
dnl argp-standalone does not provide a pkg-config file
AC_CHECK_HEADER([argp.h], AC_DEFINE(HAVE_ARGP, 1, [have argp]))
@@ -1050,6 +1105,19 @@ else
CFLAGS=${OLD_CFLAGS}
fi
+AC_CHECK_FUNC([syncfs], [have_syncfs=yes])
+if test "x${have_syncfs}" = "xyes"; then
+ AC_DEFINE(HAVE_SYNCFS, 1, [define if syncfs exists])
+else
+ OLD_CFLAGS=${CFLAGS}
+ CFLAGS="-D_GNU_SOURCE"
+ AC_CHECK_DECL([SYS_syncfs], , , [#include <sys/syscall.h>])
+ if test "x${ac_cv_have_decl_SYS_syncfs}" = "xyes"; then
+ AC_DEFINE(HAVE_SYNCFS_SYS, 1, [define if SYS_syncfs is available])
+ fi
+ CFLAGS=${OLD_CFLAGS}
+fi
+
BUILD_NANOSECOND_TIMESTAMPS=no
AC_CHECK_FUNC([utimensat], [have_utimensat=yes])
if test "x${have_utimensat}" = "xyes"; then
@@ -1188,7 +1256,6 @@ if test "x$exec_prefix" = xNONE; then
exec_prefix="$(eval echo $prefix)"
fi
GLUSTERFS_LIBEXECDIR="$(eval echo $libexecdir)/glusterfs"
-GLUSTERFSD_MISCDIR="$(eval echo $prefix)/var/lib/misc/glusterfsd"
prefix=$old_prefix
exec_prefix=$old_exec_prefix
@@ -1200,6 +1267,8 @@ fi
localstatedir="$(eval echo ${localstatedir})"
LOCALSTATEDIR=$localstatedir
+GLUSTERFSD_MISCDIR="$(eval echo ${localstatedir})/lib/misc/glusterfsd"
+
old_prefix=$prefix
if test "x$prefix" = xNONE; then
prefix=$ac_default_prefix
@@ -1241,10 +1310,6 @@ case $host_os in
;;
*freebsd*)
GF_HOST_OS="GF_BSD_HOST_OS"
- GF_CFLAGS="${GF_CFLAGS} -O0"
- GF_CFLAGS="${GF_CFLAGS} -DTHREAD_UNSAFE_BASENAME"
- GF_CFLAGS="${GF_CFLAGS} -DTHREAD_UNSAFE_DIRNAME"
- GF_CFLAGS="${GF_CFLAGS} -D_LIBGEN_H_"
GF_CFLAGS="${GF_CFLAGS} -DO_DSYNC=0"
GF_CFLAGS="${GF_CFLAGS} -Dxdr_quad_t=xdr_longlong_t"
GF_CFLAGS="${GF_CFLAGS} -Dxdr_u_quad_t=xdr_u_longlong_t"
@@ -1364,6 +1429,7 @@ AC_ARG_ENABLE([gnfs],
[Enable legacy gnfs server xlator.]))
if test "x${with_server}" = "xyes" -a "x$enable_gnfs" = "xyes"; then
BUILD_GNFS="yes"
+ GF_CFLAGS="$GF_CFLAGS -DBUILD_GNFS"
RPCBIND_SERVICE="rpcbind.service"
fi
AM_CONDITIONAL([BUILD_GNFS], [test x$BUILD_GNFS = xyes])
@@ -1524,9 +1590,9 @@ case $host_os in
;;
esac
dnl GF_XLATOR_DEFAULT_LDFLAGS is for most xlators that expose a common set of symbols
-GF_XLATOR_DEFAULT_LDFLAGS='-avoid-version -export-symbols $(top_srcdir)/xlators/xlator.sym $(UUID_LIBS) $(GF_NO_UNDEFINED)'
+GF_XLATOR_DEFAULT_LDFLAGS='-avoid-version -export-symbols $(top_srcdir)/xlators/xlator.sym $(UUID_LIBS) $(GF_NO_UNDEFINED) $(TIRPC_LIBS)'
dnl GF_XLATOR_LDFLAGS is for xlators that expose extra symbols, e.g. dht
-GF_XLATOR_LDFLAGS='-avoid-version $(UUID_LIBS) $(GF_NO_UNDEFINED)'
+GF_XLATOR_LDFLAGS='-avoid-version $(UUID_LIBS) $(GF_NO_UNDEFINED) $(TIRPC_LIBS)'
AC_SUBST(GF_HOST_OS)
AC_SUBST(GF_CFLAGS)
@@ -1541,6 +1607,13 @@ AC_SUBST(AM_LIBTOOLFLAGS)
AC_SUBST(GF_NO_UNDEFINED)
AC_SUBST(GF_XLATOR_DEFAULT_LDFLAGS)
AC_SUBST(GF_XLATOR_LDFLAGS)
+AC_SUBST(GF_XLATOR_MGNT_LIBADD)
+
+case $host_os in
+ *freebsd*)
+ GF_XLATOR_MGNT_LIBADD="-lutil -lprocstat"
+ ;;
+esac
CONTRIBDIR='$(top_srcdir)/contrib'
AC_SUBST(CONTRIBDIR)
@@ -1606,15 +1679,14 @@ echo
echo "GlusterFS configure summary"
echo "==========================="
echo "FUSE client : $BUILD_FUSE_CLIENT"
-echo "Infiniband verbs : $BUILD_IBVERBS"
echo "epoll IO multiplex : $BUILD_EPOLL"
echo "fusermount : $BUILD_FUSERMOUNT"
echo "readline : $BUILD_READLINE"
echo "georeplication : $BUILD_SYNCDAEMON"
echo "Linux-AIO : $BUILD_LIBAIO"
echo "Enable Debug : $BUILD_DEBUG"
-echo "Enable ASAN : $BUILD_ASAN"
-echo "Enable TSAN : $BUILD_TSAN"
+echo "Run with Valgrind : $VALGRIND_TOOL"
+echo "Sanitizer enabled : $SANITIZER"
echo "Use syslog : $USE_SYSLOG"
echo "XML output : $BUILD_XML_OUTPUT"
echo "Unit Tests : $BUILD_UNITTEST"
@@ -1632,16 +1704,14 @@ echo "IPV6 default : $with_ipv6_default"
echo "Use TIRPC : $with_libtirpc"
echo "With Python : ${PYTHON_VERSION}"
echo "Cloudsync : $BUILD_CLOUDSYNC"
+echo "Metadata dispersal : $BUILD_METADISP"
+echo "Link with TCMALLOC : $BUILD_TCMALLOC"
echo
-if test "x$BUILD_ASAN" = "xyes"; then
- echo "### Run below command before executing your tests if your system"
- echo "### has 'gcc --version' above 7.x (works on Fedora 27 and Above)"
- echo "export ASAN_OPTIONS=log_path=/var/log/glusterfs/asan-output.log"
- echo ""
- echo "### Above is required to get details of asan run, as glusterfs"
- echo "### processes are daemon processes. Further details and more"
- echo "### options can be found under 'Run-time flags' at"
- echo "### https://github.com/google/sanitizers/wiki/AddressSanitizerFlags"
- echo
+# dnl Note: ${X^^} capitalization assumes bash >= 4.x
+if test "x$SANITIZER" != "xnone"; then
+ echo "Note: since glusterfs processes are daemon processes, use"
+ echo "'export ${SANITIZER^^}_OPTIONS=log_path=/path/to/xxx.log' to collect"
+ echo "sanitizer output. Further details and more options can be"
+ echo "found at https://github.com/google/sanitizers."
fi
diff --git a/contrib/fuse-lib/mount.c b/contrib/fuse-lib/mount.c
index ffa0a4b6316..06ff191f542 100644
--- a/contrib/fuse-lib/mount.c
+++ b/contrib/fuse-lib/mount.c
@@ -52,12 +52,16 @@ gf_fuse_unmount (const char *mountpoint, int fd)
if (geteuid () == 0) {
fuse_mnt_umount ("fuse", mountpoint, mountpoint, 1);
return;
+ } else {
+ GFFUSE_LOGERR ("fuse: Effective-uid: %d", geteuid());
}
res = umount2 (mountpoint, 2);
if (res == 0)
return;
+ GFFUSE_LOGERR ("fuse: failed to unmount %s: %s",
+ mountpoint, strerror (errno));
pid = fork ();
if (pid == -1)
return;
@@ -67,6 +71,8 @@ gf_fuse_unmount (const char *mountpoint, int fd)
"--", mountpoint, NULL };
execvp (FUSERMOUNT_PROG, (char **)argv);
+ GFFUSE_LOGERR ("fuse: failed to execute fuserumount: %s",
+ strerror (errno));
_exit (1);
}
waitpid (pid, NULL, 0);
@@ -384,6 +390,7 @@ fuse_mount_sys (const char *mountpoint, char *fsname,
build_iovec (&iov, &iovlen, "from", "/dev/fuse", -1);
build_iovec (&iov, &iovlen, "volname", source, -1);
build_iovec (&iov, &iovlen, "fd", fdstr, -1);
+ build_iovec (&iov, &iovlen, "allow_other", NULL, -1);
ret = nmount (iov, iovlen, mountflags);
#else
ret = mount (source, mountpoint, fstype, mountflags,
diff --git a/contrib/sunrpc/xdr_sizeof.c b/contrib/sunrpc/xdr_sizeof.c
deleted file mode 100644
index ca1f7bf0a5e..00000000000
--- a/contrib/sunrpc/xdr_sizeof.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
- * Reserved. This file contains Original Code and/or Modifications of
- * Original Code as defined in and that are subject to the Apple Public
- * Source License Version 1.1 (the "License"). You may not use this file
- * except in compliance with the License. Please obtain a copy of the
- * License at http://www.apple.com/publicsource and read it before using
- * this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON- INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/*
- * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
- * unrestricted use provided that this legend is included on all tape
- * media and as a part of the software program in whole or part. Users
- * may copy or modify Sun RPC without charge, but are not authorized
- * to license or distribute it to anyone else except as part of a product or
- * program developed by the user.
- *
- * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
- * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
- *
- * Sun RPC is provided with no support and without any obligation on the
- * part of Sun Microsystems, Inc. to assist in its use, correction,
- * modification or enhancement.
- *
- * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
- * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
- * OR ANY PART THEREOF.
- *
- * In no event will Sun Microsystems, Inc. be liable for any lost revenue
- * or profits or other special, indirect and consequential damages, even if
- * Sun has been advised of the possibility of such damages.
- *
- * Sun Microsystems, Inc.
- * 2550 Garcia Avenue
- * Mountain View, California 94043
- */
-
-/*
- * xdr_sizeof.c
- *
- * Copyright 1990 Sun Microsystems, Inc.
- *
- * General purpose routine to see how much space something will use
- * when serialized using XDR.
- */
-
-#ifdef GF_DARWIN_HOST_OS
-
-#include <rpc/types.h>
-#include <rpc/xdr.h>
-#include <sys/types.h>
-#include <sys/cdefs.h>
-
-#include <stdlib.h>
-
-/* ARGSUSED */
-#ifdef GF_DARWIN_HOST_OS
-static bool_t
-x_putlong (XDR *xdrs, const int *longp)
-{
- xdrs->x_handy += BYTES_PER_XDR_UNIT;
- return TRUE;
-}
-
-#else
-static bool_t
-x_putlong (XDR *xdrs, const long *longp)
-{
- xdrs->x_handy += BYTES_PER_XDR_UNIT;
- return TRUE;
-}
-#endif
-
-/* ARGSUSED */
-static bool_t
-x_putbytes (XDR *xdrs, const char *bp, u_int len)
-{
- xdrs->x_handy += len;
- return TRUE;
-}
-
-#ifdef GF_DARWIN_HOST_OS
-static u_int
-x_getpostn (XDR *xdrs)
-{
- return xdrs->x_handy;
-}
-#else
-static u_int
-x_getpostn (const XDR *xdrs)
-{
- return xdrs->x_handy;
-}
-#endif
-
-/* ARGSUSED */
-static bool_t
-x_setpostn (XDR *xdrs, u_int len)
-{
- /* This is not allowed */
- return FALSE;
-}
-
-static int32_t *
-x_inline (XDR *xdrs, u_int len)
-{
- if (len == 0)
- return NULL;
- if (xdrs->x_op != XDR_ENCODE)
- return NULL;
- if (len < (u_int) (long int) xdrs->x_base)
- {
- /* x_private was already allocated */
- xdrs->x_handy += len;
- return (int32_t *) xdrs->x_private;
- }
- else
- {
- /* Free the earlier space and allocate new area */
- free (xdrs->x_private);
- if ((xdrs->x_private = (caddr_t) malloc (len)) == NULL)
- {
- xdrs->x_base = 0;
- return NULL;
- }
- xdrs->x_base = (void *) (long) len;
- xdrs->x_handy += len;
- return (int32_t *) xdrs->x_private;
- }
-}
-
-static int
-harmless (void)
-{
- /* Always return FALSE/NULL, as the case may be */
- return 0;
-}
-
-static void
-x_destroy (XDR *xdrs)
-{
- xdrs->x_handy = 0;
- xdrs->x_base = 0;
- if (xdrs->x_private)
- {
- free (xdrs->x_private);
- xdrs->x_private = NULL;
- }
- return;
-}
-
-unsigned long
-xdr_sizeof (xdrproc_t func, void *data)
-{
- XDR x;
- struct xdr_ops ops;
- bool_t stat;
-
-#ifdef GF_DARWIN_HOST_OS
- typedef bool_t (*dummyfunc1) (XDR *, int *);
-#else
- typedef bool_t (*dummyfunc1) (XDR *, long *);
-#endif
- typedef bool_t (*dummyfunc2) (XDR *, caddr_t, u_int);
-
- ops.x_putlong = x_putlong;
- ops.x_putbytes = x_putbytes;
- ops.x_inline = x_inline;
- ops.x_getpostn = x_getpostn;
- ops.x_setpostn = x_setpostn;
- ops.x_destroy = x_destroy;
-
- /* the other harmless ones */
- ops.x_getlong = (dummyfunc1) harmless;
- ops.x_getbytes = (dummyfunc2) harmless;
-
- x.x_op = XDR_ENCODE;
- x.x_ops = &ops;
- x.x_handy = 0;
- x.x_private = (caddr_t) NULL;
- x.x_base = (caddr_t) 0;
-
- stat = func (&x, data, 0);
- if (x.x_private)
- free (x.x_private);
- return (stat == TRUE ? (unsigned) x.x_handy : 0);
-}
-#endif /* GF_DARWIN_HOST_OS */
diff --git a/doc/README.md b/doc/README.md
index 8a92bd990d0..6aa28642ef4 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -18,7 +18,7 @@ The Gluster features which are 'in progress' or implemented can be found at [git
## Upgrade Guide ##
-The gluster upgrade guide is maintained at [github](https://github.com/gluster/glusterdocs). The browsable upgrade guide can be found [here](http://docs.gluster.org/en/latest/Upgrade-Guide/README/)
+The gluster upgrade guide is maintained at [github](https://github.com/gluster/glusterdocs). The browsable upgrade guide can be found [here](http://docs.gluster.org/en/latest/Upgrade-Guide)
The doc patch has to be sent against the above mentioned repository.
diff --git a/doc/debugging/gfid-to-path.md b/doc/debugging/gfid-to-path.md
index 49e9aa09a3f..1917bf2cca1 100644
--- a/doc/debugging/gfid-to-path.md
+++ b/doc/debugging/gfid-to-path.md
@@ -64,9 +64,5 @@ trusted.glusterfs.pathinfo="(<DISTRIBUTE:test-dht> <POSIX(/mnt/brick-test/b):vm1
```
---
-### Get file path from GFID (Method 3):
-https://gist.github.com/semiosis/4392640
-
----
#### References and links:
[posix: placeholders for GFID to path conversion](http://review.gluster.org/5951)
diff --git a/doc/debugging/statedump.md b/doc/debugging/statedump.md
index 9d594320ddc..9dfdce15fad 100644
--- a/doc/debugging/statedump.md
+++ b/doc/debugging/statedump.md
@@ -328,7 +328,7 @@ cur-stdalloc=214
max-stdalloc=220
```
-Here, with cold count being 0 by default, `cur-stdalloc` indicated the number of `dict_t` objects that were allocated in heap using `mem_get()`, and yet to be freed using `mem_put()` (refer to this [page](https://github.com/gluster/glusterfs/blob/master/doc/data-structures/mem-pool.md) for more details on how mempool works). After the test case (name selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for `dict_t`.
+Here, with cold count being 0 by default, `cur-stdalloc` indicated the number of `dict_t` objects that were allocated in heap using `mem_get()`, and yet to be freed using `mem_put()` (refer to this [page](../developer-guide/datastructure-mem-pool.md) for more details on how mempool works). After the test case (name selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for `dict_t`.
After these leaks were fixed, glusterfs was again compiled with -DDEBUG flags, and the same steps were performed again and statedump was taken before and after executing the test case, of the mount. This was done to ascertain the validity of the fix. And the following are the results:
diff --git a/doc/developer-guide/README.md b/doc/developer-guide/README.md
index 1501eeb9207..aaf9c7476b0 100644
--- a/doc/developer-guide/README.md
+++ b/doc/developer-guide/README.md
@@ -51,11 +51,10 @@ Daemon Management Framework
Translators
-----------
-- [Block Device Tanslator](./bd-xlator.md)
- [Performance/write-Behind Translator](./write-behind.md)
- [Translator Development](./translator-development.md)
- [Storage/posix Translator](./posix.md)
-- [Compression translator](./network_compression.md)
+
Brick multiplex
---------------
@@ -74,7 +73,7 @@ Testing/Debugging
- [Using the Gluster Test
Framework](./Using-Gluster-Test-Framework.md) - Step by
step instructions for running the Gluster Test Framework
-- [Coredump Analysis](./coredump-analysis.md) - Steps to analize coredumps generated by regression machines.
+- [Coredump Analysis](../debugging/analyzing-regression-cores.md) - Steps to analize coredumps generated by regression machines.
- [Identifying Resource Leaks](./identifying-resource-leaks.md)
Release Process
diff --git a/doc/developer-guide/commit-guidelines.md b/doc/developer-guide/commit-guidelines.md
index 9b191dac178..38bbe525cbd 100644
--- a/doc/developer-guide/commit-guidelines.md
+++ b/doc/developer-guide/commit-guidelines.md
@@ -118,7 +118,7 @@ The 'bug' line can reference a bug in a few ways. Gerrit creates a link to the b
**Updates: bz#1193929** -- use 'Updates: bz#NNNN' if the commit is only a partial fix and more work is needed.
**Updates: #175** -- use 'Updates: #NNNN' if the commit is only a partial fix and more work is needed for the feature completion.
-We encourage the use of Co-Authored-By: name <name@example.com> in commit messages to indicate people who worked on a particular patch. It's a convention for recognizing multiple authors, and our projects would encourage the stats tools to observe it when collecting statistics.
+We encourage the use of `Co-Authored-By: name <name@example.com>` in commit messages to indicate people who worked on a particular patch. It's a convention for recognizing multiple authors, and our projects would encourage the stats tools to observe it when collecting statistics.
### Summary of Git commit message structure
diff --git a/doc/developer-guide/fuse-interrupt.md b/doc/developer-guide/fuse-interrupt.md
index f92b5532eaf..ec991b81ec5 100644
--- a/doc/developer-guide/fuse-interrupt.md
+++ b/doc/developer-guide/fuse-interrupt.md
@@ -23,9 +23,10 @@ not exported to a header file).
```
enum fuse_interrupt_state {
- INTERRUPT_NONE,
+ /* ... */
INTERRUPT_SQUELCHED,
INTERRUPT_HANDLED,
+ /* ... */
};
typedef enum fuse_interrupt_state fuse_interrupt_state_t;
struct fuse_interrupt_record;
@@ -62,8 +63,58 @@ dummy implementation only for demonstration purposes.) Flush is chosen
because a `FLUSH` interrupt is easy to trigger (see
*tests/features/interrupt.t*). Interrupt handling for flush is switched on
by `--fuse-flush-handle-interrupt` (a hidden glusterfs command line flag).
-The flush interrupt handling code is guarded by the
-`flush_handle_interrupt` Boolean member of `fuse_private_t`.
+The implementation of flush interrupt is contained in the
+`fuse_flush_interrupt_handler()` function and blocks guarded by the
+
+```
+if (priv->flush_handle_interrupt) { ...
+```
+
+conditional (where `priv` is a `*fuse_private_t`).
+
+### Overview
+
+"Regular" fuse fops and interrupt handlers interact via a list containing
+interrupt records.
+
+If a fop wishes to have its interrupts handled, it needs to set up an
+interrupt record and insert it into the list; also when it's to finish
+(ie. in its "cbk" stage) it needs to delete the record from the list.
+
+If no interrupt happens, basically that's all to it - a list insertion
+and deletion.
+
+However, if an interrupt comes for the fop, the interrupt FUSE request
+will carry the data identifying an ongoing fop (that is, its `unique`),
+and based on that, the interrupt record will be looked up in the list, and
+the specific interrupt handler (a member of the interrupt record) will be
+called.
+
+Usually the fop needs to share some data with the interrupt handler to
+enable it to perform its task (also shared via the interrupt record).
+The interrupt API offers two approaches to manage shared data:
+- _Async or reference-counting strategy_: from the point on when the interrupt
+ record is inserted to the list, it's owned jointly by the regular fop and
+ the prospective interrupt handler. Both of them need to check before they
+ return if the other is still holding a reference; if not, then they are
+ responsible for reclaiming the shared data.
+- _Sync or borrow strategy_: the interrupt handler is considered a borrower
+ of the shared data. The interrupt handler should not reclaim the shared
+ data. The fop will wait for the interrupt handler to finish (ie., the borrow
+ to be returned), then it has to reclaim the shared data.
+
+The user of the interrupt API need to call the following functions to
+instrument this control flow:
+- `fuse_interrupt_record_insert()` in the fop to insert the interrupt record to
+ the list;
+- `fuse_interrupt_finish_fop()`in the fop (cbk) and
+- `fuse_interrupt_finish_interrupt()`in the interrupt handler
+
+to perform needed synchronization at the end their tenure. The data management
+strategies are implemented by the `fuse_interrupt_finish_*()` functions (which
+have an argument to specify which strategy to use); these routines take care
+of freeing the interrupt record itself, while the reclamation of the shared data
+is left to the API user.
### Usage
@@ -75,12 +126,15 @@ steps:
call (directly or as async callback) `fuse_interrupt_finish_interrupt()`.
The `intstat` argument to `fuse_interrupt_finish_interrupt` should be
either `INTERRUPT_SQUELCHED` or `INTERRUPT_HANDLED`.
- - `INTERRUPT_SQUELCHED` means that we choose not to handle the interrupt
+ - `INTERRUPT_SQUELCHED` means that the interrupt could not be delivered
and the fop is going on uninterrupted.
- `INTERRUPT_HANDLED` means that the interrupt was actually handled. In
this case the fop will be answered from interrupt context with errno
`EINTR` (that is, the fop should not send a response to the kernel).
+ (the enum `fuse_interrupt_state` includes further members, which are reserved
+ for internal use).
+
We return to the `sync` and `datap` arguments later.
- In the `fuse_<FOP>` function create an interrupt record using
`fuse_interrupt_record_new()`, passing the incoming `fuse_in_header` and
@@ -92,10 +146,10 @@ steps:
`fuse_interrupt_record_insert()`.
- In `fuse_<FOP>_cbk` call `fuse_interrupt_finish_fop()`.
- `fuse_interrupt_finish_fop()` returns a Boolean according to whether the
- interrupt was handled. If it was, then the fuse request is already
+ interrupt was handled. If it was, then the FUSE request is already
answered and the stack gets destroyed in `fuse_interrupt_finish_fop` so
- `fuse_<FOP>_cbk` can just return (zero). Otherwise follow the standard
- cbk logic (answer the fuse request and destroy the stack -- these are
+ `fuse_<FOP>_cbk()` can just return (zero). Otherwise follow the standard
+ cbk logic (answer the FUSE request and destroy the stack -- these are
typically accomplished by `fuse_err_cbk()`).
- The last two argument of `fuse_interrupt_finish_fop()` and
`fuse_interrupt_finish_interrupt()` are `gf_boolean_t sync` and
@@ -124,7 +178,34 @@ steps:
then that pointer will be directed to the `data` member of the interrupt
record and it's up to the caller what it's doing with it.
- If `sync` is true, interrupt handler can use `datap = NULL`, and
- fop handler will have `datap` set.
+ fop handler will have `datap` point to a valid pointer.
- If `sync` is false, and handlers pass a pointer to a pointer for
`datap`, they should check if the pointed pointer is NULL before
attempting to deal with the data.
+
+### FUSE answer for the interrupted fop
+
+The kernel acknowledges a successful interruption for a given FUSE request
+if the filesystem daemon answers it with errno EINTR; upon that, the syscall
+which induced the request will be abruptly terminated with an interrupt, rather
+than returning a value.
+
+In glusterfs, this can be arranged in two ways.
+
+- If the interrupt handler wins the race for the interrupt record, ie.
+ `fuse_interrupt_finish_fop()` returns true to `fuse_<FOP>_cbk()`, then, as
+ said above, `fuse_<FOP>_cbk()` does not need to answer the FUSE request.
+ That's because then the interrupt handler will take care about answering
+ it (with errno EINTR).
+- If `fuse_interrupt_finish_fop()` returns false to `fuse_<FOP>_cbk()`, then
+ this return value does not inform the fop handler whether there was an interrupt
+ or not. This return value occurs both when fop handler won the race for the
+ interrupt record against the interrupt handler, and when there was no interrupt
+ at all.
+
+ However, the internal logic of the fop handler might detect from other
+ circumstances that an interrupt was delivered. For example, the fop handler
+ might be sleeping, waiting for some data to arrive, so that a premature
+ wakeup (with no data present) occurs if the interrupt handler intervenes. In
+ such cases it's the responsibility of the fop handler to reply the FUSE
+ request with errro EINTR.
diff --git a/doc/developer-guide/identifying-resource-leaks.md b/doc/developer-guide/identifying-resource-leaks.md
index 851fc4424bc..950cae79b0a 100644
--- a/doc/developer-guide/identifying-resource-leaks.md
+++ b/doc/developer-guide/identifying-resource-leaks.md
@@ -174,3 +174,27 @@ In this case, the resource leak can be addressed by adding a single line to the
Running the same Valgrind command and comparing the output will show that the
memory leak in `xlators/meta/src/meta.c:init` is not reported anymore.
+
+### Running DRD, the Valgrind thread error detector
+
+When configuring GlusterFS with:
+
+```shell
+./configure --enable-valgrind
+```
+
+the default Valgrind tool (Memcheck) is enabled. But it's also possble to select
+one of Memcheck or DRD by using:
+
+```shell
+./configure --enable-valgrind=memcheck
+```
+
+or:
+
+```shell
+./configure --enable-valgrind=drd
+```
+
+respectively. When using DRD, it's recommended to consult
+https://valgrind.org/docs/manual/drd-manual.html before running.
diff --git a/doc/developer-guide/options-to-contribute.md b/doc/developer-guide/options-to-contribute.md
index 5dd1895cb1c..3f0d84e7645 100644
--- a/doc/developer-guide/options-to-contribute.md
+++ b/doc/developer-guide/options-to-contribute.md
@@ -60,7 +60,7 @@ Reference: https://review.gluster.org/20925/
5. Now, pick a `.h` file, and see if a structure is very large, and see
-if re-aligning them as per [coding-standard](./conding-standard.md) gives any size benefit,
+if re-aligning them as per [coding-standard](./coding-standard.md) gives any size benefit,
if yes, go ahead and change it. Make sure you check all the structures
in the file for similar pattern.
diff --git a/doc/developer-guide/thread-naming.md b/doc/developer-guide/thread-naming.md
index 74efba28e45..513140d4437 100644
--- a/doc/developer-guide/thread-naming.md
+++ b/doc/developer-guide/thread-naming.md
@@ -29,10 +29,10 @@ gf_thread_create_detached (pthread_t *thread,
As max name length for a thread in POSIX is only 16 characters including the
'\0' character, you have to be a little creative with naming. Also, it is
important that all Gluster threads have common prefix. Considering these
-conditions, we have "gluster" as prefix for all the threads created by these
+conditions, we have "glfs_" as prefix for all the threads created by these
wrapper functions. It is responsibility of the owner of thread to provide the
suffix part of the name. It does not have to be a descriptive name, as it has
-only 8 letters to work with. However, it should be unique enough such that it
+only 10 letters to work with. However, it should be unique enough such that it
can be matched with a table which describes it.
If n number of threads are spwaned to perform same function, it is must that the
@@ -87,6 +87,7 @@ such that it can be matched with a table below without ambiguity.
- posixfsy - posix fsync
- posixhc - posix heal
- posixjan - posix janitor
+- posixrsv - posix reserve
- quiesce - quiesce dequeue
- rdmaAsyn - rdma async event handler
- rdmaehan - rdma completion handler
diff --git a/doc/developer-guide/translator-development.md b/doc/developer-guide/translator-development.md
index 8f67bb1cae9..f75935519f6 100644
--- a/doc/developer-guide/translator-development.md
+++ b/doc/developer-guide/translator-development.md
@@ -680,4 +680,4 @@ Original author's site:
Gluster community site:
- * [Translators](http://www.gluster.org/community/documentation/index.php/Translators)
+ * [Translators](https://docs.gluster.org/en/latest/Quick-Start-Guide/Architecture/#translators)
diff --git a/doc/developer-guide/xlator-classification.md b/doc/developer-guide/xlator-classification.md
index 36d924d0934..6073df9375f 100644
--- a/doc/developer-guide/xlator-classification.md
+++ b/doc/developer-guide/xlator-classification.md
@@ -23,7 +23,7 @@ This document is intended for the following community participants,
- Existing xlator maintainers
- Packaging and gluster management stack maintainers
-For a more user facing understanding it is recommended to read section [tbd](TBD)
+For a more user facing understanding it is recommended to read section (TBD)
in the gluster documentation.
## Categories
diff --git a/doc/gluster.8 b/doc/gluster.8
index 99a8d5e5048..ba595edca15 100644
--- a/doc/gluster.8
+++ b/doc/gluster.8
@@ -41,7 +41,7 @@ List all volumes in cluster
\fB\ volume status [all | <VOLNAME> [nfs|shd|<BRICK>|quotad]] [detail|clients|mem|inode|fd|callpool|tasks|client-list] \fR
Display status of all or specified volume(s)/brick
.TP
-\fB\ volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... <TA-BRICK> \fR
+\fB\ volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... <TA-BRICK> \fR
Create a new volume of the specified type using the specified bricks and transport type (the default transport type is tcp).
To create a volume with both transports (tcp and rdma), give 'transport tcp,rdma' as an option.
.TP
@@ -113,6 +113,9 @@ Rotate the log file for corresponding volume/brick.
\fB\ volume profile <VOLNAME> {start|info [peek|incremental [peek]|cumulative|clear]|stop} [nfs] \fR
Profile operations on the volume. Once started, volume profile <volname> info provides cumulative statistics of the FOPs performed.
.TP
+\fB\ volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick <brick>] [list-cnt <value>] | {read-perf|write-perf} [bs <size> count <count>] [brick <brick>] [list-cnt <value>] \fR
+Generates a profile of a volume representing the performance and bottlenecks/hotspots of each brick.
+.TP
\fB\ volume statedump <VOLNAME> [[nfs|quotad] [all|mem|iobuf|callpool|priv|fd|inode|history]... | [client <hostname:process-id>]] \fR
Dumps the in memory state of the specified process or the bricks of the volume.
.TP
@@ -215,6 +218,12 @@ Use "!<OPTION>" to reset option <OPTION> to default value.
\fB\ volume bitrot <VOLNAME> {enable|disable} \fR
Enable/disable bitrot for volume <VOLNAME>
.TP
+\fB\ volume bitrot <VOLNAME> signing-time <time-in-secs> \fR
+Waiting time for an object after last fd is closed to start signing process.
+.TP
+\fB\ volume bitrot <VOLNAME> signer-threads <count> \fR
+Number of signing process threads. Usually set to number of available cores.
+.TP
\fB\ volume bitrot <VOLNAME> scrub-throttle {lazy|normal|aggressive} \fR
Scrub-throttle value is a measure of how fast or slow the scrubber scrubs the filesystem for volume <VOLNAME>
.TP
diff --git a/doc/glusterfs.8 b/doc/glusterfs.8
index e36bd6fbcfe..3d359ea85e4 100644
--- a/doc/glusterfs.8
+++ b/doc/glusterfs.8
@@ -63,8 +63,8 @@ and \fB\-\-log\-file\fR to console.
\fB\-\-enable\-ino32=BOOL\fR
Use 32-bit inodes when mounting to workaround application that doesn't support 64-bit inodes.
.TP
-\fB\-\-fopen\-keep\-cache\fR
-Do not purge the cache on file open.
+\fB\-\-fopen\-keep\-cache[=BOOL]\fR
+Do not purge the cache on file open (default: false).
.TP
\fB\-\-mac\-compat=BOOL\fR
Provide stubs for attributes needed for seamless operation on Macs (the default is off).
diff --git a/doc/mount.glusterfs.8 b/doc/mount.glusterfs.8
index 286631b9c5c..ce16e9e40b7 100644
--- a/doc/mount.glusterfs.8
+++ b/doc/mount.glusterfs.8
@@ -44,8 +44,8 @@ INFO and NONE [default: INFO]
\fBacl
Mount the filesystem with POSIX ACL support
.TP
-\fBfopen\-keep\-cache
-Do not purge the cache on file open
+\fBfopen\-keep\-cache[=BOOL]
+Do not purge the cache on file open (default: false)
.TP
\fBworm
Mount the filesystem in 'worm' mode
@@ -123,7 +123,12 @@ Provide list of backup volfile servers in the following format [default: None]
.TP
.TP
\fBlru-limit=\fRN
-Set fuse module's limit for number of inodes kept in LRU list to N [default: 131072]
+Set fuse module's limit for number of inodes kept in LRU list to N [default: 65536]
+.TP
+.TP
+\fBinvalidate-limit=\fRN
+Suspend fuse invalidations implied by 'lru-limit' if number of outstanding
+invalidations reaches N
.TP
.TP
\fBbackground-qlen=\fRN
diff --git a/doc/release-notes/7.0.md b/doc/release-notes/7.0.md
deleted file mode 100644
index 2bcf4b28396..00000000000
--- a/doc/release-notes/7.0.md
+++ /dev/null
@@ -1,265 +0,0 @@
-# Release notes for Gluster 7.0
-
-This is a major release that includes a range of code improvements and stability
-fixes along with a few features as noted below.
-
-A selection of the key features and changes are documented in this page.
-A full list of bugs that have been addressed is included further below.
-
-- [Announcements](#announcements)
-- [Major changes and features](#major-changes-and-features)
-- [Major issues](#major-issues)
-- [Bugs addressed in the release](#bugs-addressed)
-
-## Announcements
-
-1. Releases that receive maintenance updates post release 7 are, 6 and 7
-([reference](https://www.gluster.org/release-schedule/))
-
-2. Release 7 will receive maintenance updates around the 10th of every month
-for the first 3 months post release (i.e Nov'19, Dec'19, Jan'20). Post the
-initial 3 months, it will receive maintenance updates every 2 months till EOL.
-
-
-
-## Major changes and features
-
-### Highlights
-
-- Several stability fixes addressing,
- - coverity, clang-scan, address sanitizer and valgrind reported issues
- - removal of unused and hence, deprecated code and features
-- Performance Improvements
-
-
-Features
-
-#### 1. Rpcbind not required in glusterd.service when gnfs isn't built.
-
-#### 2. Latency based read child to improve read workload latency in a cluster, especially in a cloud setup. Also provides a load balancing with the outstanding pending request.
-
-#### 3. Glusterfind: integrate with gfid2path, to improve performance.
-
-#### 4. Issue #532: Work towards implementing global thread pooling has started
-
-#### 5. This release includes extra coverage for glfs public APIs in our regression tests, so we don't break anything.
-
-
-
-## Major issues
-
-**None**
-
-## Note
-
-Any new volumes created with the release will have the `fips-mode-rchecksum` volume option set to `on` by default.
-
-If a client older than glusterfs-4.x (i.e. 3.x clients) accesses a volume which has the `fips-mode-rchecksum` volume option enabled, it can cause erroneous checksum computation/ unwanted behaviour during afr self-heal. This option is to be enabled only when all clients are also >=4.x. So if you are using these older clients, please explicitly turn this option `off`.
-
-
-## Bugs addressed
-
-Bugs addressed since release-6 are listed below.
-
-- [#789278](https://bugzilla.redhat.com/789278): Issues reported by Coverity static analysis tool
-- [#1098991](https://bugzilla.redhat.com/1098991): Dist-geo-rep: Invalid slave url (::: three or more colons) error out with unclear error message.
-- [#1193929](https://bugzilla.redhat.com/1193929): GlusterFS can be improved
-- [#1241494](https://bugzilla.redhat.com/1241494): [Backup]: Glusterfind CLI commands need to verify the accepted names for session/volume, before failing with error(s)
-- [#1512093](https://bugzilla.redhat.com/1512093): Value of pending entry operations in detail status output is going up after each synchronization.
-- [#1535511](https://bugzilla.redhat.com/1535511): Gluster CLI shouldn't stop if log file couldn't be opened
-- [#1542072](https://bugzilla.redhat.com/1542072): Syntactical errors in hook scripts for managing SELinux context on bricks #2 (S10selinux-label-brick.sh + S10selinux-del-fcontext.sh)
-- [#1573226](https://bugzilla.redhat.com/1573226): eventsapi: ABRT report for package glusterfs has reached 10 occurrences
-- [#1580315](https://bugzilla.redhat.com/1580315): gluster volume status inode getting timed out after 30 minutes with no output/error
-- [#1590385](https://bugzilla.redhat.com/1590385): Refactor dht lookup code
-- [#1593224](https://bugzilla.redhat.com/1593224): [Disperse] : Client side heal is not removing dirty flag for some of the files.
-- [#1596787](https://bugzilla.redhat.com/1596787): glusterfs rpc-clnt.c: error returned while attempting to connect to host: (null), port 0
-- [#1622665](https://bugzilla.redhat.com/1622665): clang-scan report: glusterfs issues
-- [#1624701](https://bugzilla.redhat.com/1624701): error-out {inode,entry}lk fops with all-zero lk-owner
-- [#1628194](https://bugzilla.redhat.com/1628194): tests/dht: Additional tests for dht operations
-- [#1633930](https://bugzilla.redhat.com/1633930): ASan (address sanitizer) fixes - Blanket bug
-- [#1634664](https://bugzilla.redhat.com/1634664): Inconsistent quorum checks during open and fd based operations
-- [#1635688](https://bugzilla.redhat.com/1635688): Keep only the valid (maintained/supported) components in the build
-- [#1642168](https://bugzilla.redhat.com/1642168): changes to cloudsync xlator
-- [#1642810](https://bugzilla.redhat.com/1642810): remove glupy from code and build
-- [#1648169](https://bugzilla.redhat.com/1648169): Fuse mount would crash if features.encryption is on in the version from 3.13.0 to 4.1.5
-- [#1648768](https://bugzilla.redhat.com/1648768): Tracker bug for all leases related issues
-- [#1650095](https://bugzilla.redhat.com/1650095): Regression tests for geo-replication on EC volume is not available. It should be added.
-- [#1651246](https://bugzilla.redhat.com/1651246): Failed to dispatch handler
-- [#1651439](https://bugzilla.redhat.com/1651439): gluster-NFS crash while expanding volume
-- [#1651445](https://bugzilla.redhat.com/1651445): [RFE] storage.reserve option should take size of disk as input instead of percentage
-- [#1652887](https://bugzilla.redhat.com/1652887): Geo-rep help looks to have a typo.
-- [#1654021](https://bugzilla.redhat.com/1654021): Gluster volume heal causes continuous info logging of "invalid argument"
-- [#1654270](https://bugzilla.redhat.com/1654270): glusterd crashed with seg fault possibly during node reboot while volume creates and deletes were happening
-- [#1659334](https://bugzilla.redhat.com/1659334): FUSE mount seems to be hung and not accessible
-- [#1659708](https://bugzilla.redhat.com/1659708): Optimize by not stopping (restart) selfheal deamon (shd) when a volume is stopped unless it is the last volume
-- [#1664934](https://bugzilla.redhat.com/1664934): glusterfs-fuse client not benefiting from page cache on read after write
-- [#1670031](https://bugzilla.redhat.com/1670031): performance regression seen with smallfile workload tests
-- [#1672480](https://bugzilla.redhat.com/1672480): Bugs Test Module tests failing on s390x
-- [#1672711](https://bugzilla.redhat.com/1672711): Upgrade from glusterfs 3.12 to gluster 4/5 broken
-- [#1672727](https://bugzilla.redhat.com/1672727): Fix timeouts so the tests pass on AWS
-- [#1672851](https://bugzilla.redhat.com/1672851): With parallel-readdir enabled, deleting a directory containing stale linkto files fails with "Directory not empty"
-- [#1674389](https://bugzilla.redhat.com/1674389): [thin arbiter] : rpm - add thin-arbiter package
-- [#1674406](https://bugzilla.redhat.com/1674406): glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
-- [#1674412](https://bugzilla.redhat.com/1674412): listing a file while writing to it causes deadlock
-- [#1675076](https://bugzilla.redhat.com/1675076): [posix]: log the actual path wherever possible
-- [#1676400](https://bugzilla.redhat.com/1676400): rm -rf fails with "Directory not empty"
-- [#1676430](https://bugzilla.redhat.com/1676430): distribute: Perf regression in mkdir path
-- [#1676736](https://bugzilla.redhat.com/1676736): tests: ./tests/bugs/distribute/bug-1161311.t times out
-- [#1676797](https://bugzilla.redhat.com/1676797): server xlator doesn't handle dict unserialization failures correctly
-- [#1677559](https://bugzilla.redhat.com/1677559): gNFS crashed when processing "gluster v profile [vol] info nfs"
-- [#1678726](https://bugzilla.redhat.com/1678726): Integer Overflow possible in md-cache.c due to data type inconsistency
-- [#1679401](https://bugzilla.redhat.com/1679401): Geo-rep setup creates an incorrectly formatted authorized_keys file
-- [#1679406](https://bugzilla.redhat.com/1679406): glustereventsd does not start on Ubuntu 16.04 LTS
-- [#1680587](https://bugzilla.redhat.com/1680587): Building RPM packages with _for_fedora_koji_builds enabled fails on el6
-- [#1683352](https://bugzilla.redhat.com/1683352): remove experimental xlators informations from glusterd-volume-set.c
-- [#1683594](https://bugzilla.redhat.com/1683594): nfs ltp ftest* fstat gets mismatch size as except after turn on md-cache
-- [#1683816](https://bugzilla.redhat.com/1683816): Memory leak when peer detach fails
-- [#1684385](https://bugzilla.redhat.com/1684385): [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
-- [#1684404](https://bugzilla.redhat.com/1684404): Multiple shd processes are running on brick_mux environmet
-- [#1685027](https://bugzilla.redhat.com/1685027): Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
-- [#1685120](https://bugzilla.redhat.com/1685120): upgrade from 3.12, 4.1 and 5 to 6 broken
-- [#1685414](https://bugzilla.redhat.com/1685414): glusterd memory usage grows at 98 MB/h while running "gluster v profile" in a loop
-- [#1685944](https://bugzilla.redhat.com/1685944): WORM-XLator: Maybe integer overflow when computing new atime
-- [#1686371](https://bugzilla.redhat.com/1686371): Cleanup nigel access and document it
-- [#1686398](https://bugzilla.redhat.com/1686398): Thin-arbiter minor fixes
-- [#1686568](https://bugzilla.redhat.com/1686568): [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
-- [#1686711](https://bugzilla.redhat.com/1686711): [Thin-arbiter] : send correct error code in case of failure
-- [#1687326](https://bugzilla.redhat.com/1687326): [RFE] Revoke access from nodes using Certificate Revoke List in SSL
-- [#1687705](https://bugzilla.redhat.com/1687705): Brick process has coredumped, when starting glusterd
-- [#1687811](https://bugzilla.redhat.com/1687811): core dump generated while running the test ./tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
-- [#1688068](https://bugzilla.redhat.com/1688068): Proper error message needed for FUSE mount failure when /var is filled.
-- [#1688106](https://bugzilla.redhat.com/1688106): Remove implementation of number of files opened in posix xlator
-- [#1688116](https://bugzilla.redhat.com/1688116): Spurious failure in test ./tests/bugs/glusterfs/bug-844688.t
-- [#1688287](https://bugzilla.redhat.com/1688287): ganesha crash on glusterfs with shard volume
-- [#1689097](https://bugzilla.redhat.com/1689097): gfapi: provide an option for changing statedump path in glfs-api.
-- [#1689799](https://bugzilla.redhat.com/1689799): [cluster/ec] : Fix handling of heal info cases without locks
-- [#1689920](https://bugzilla.redhat.com/1689920): lots of "Matching lock not found for unlock xxx" when using disperse (ec) xlator
-- [#1690753](https://bugzilla.redhat.com/1690753): Volume stop when quorum not met is successful
-- [#1691164](https://bugzilla.redhat.com/1691164): glusterd leaking memory when issued gluster vol status all tasks continuosly
-- [#1691616](https://bugzilla.redhat.com/1691616): client log flooding with intentional socket shutdown message when a brick is down
-- [#1692093](https://bugzilla.redhat.com/1692093): Network throughput usage increased x5
-- [#1692612](https://bugzilla.redhat.com/1692612): Locking issue when restarting bricks
-- [#1692666](https://bugzilla.redhat.com/1692666): ssh-port config set is failing
-- [#1693575](https://bugzilla.redhat.com/1693575): gfapi: do not block epoll thread for upcall notifications
-- [#1693648](https://bugzilla.redhat.com/1693648): Geo-re: Geo replication failing in "cannot allocate memory"
-- [#1693692](https://bugzilla.redhat.com/1693692): Increase code coverage from regression tests
-- [#1694820](https://bugzilla.redhat.com/1694820): Geo-rep: Data inconsistency while syncing heavy renames with constant destination name
-- [#1694925](https://bugzilla.redhat.com/1694925): GF_LOG_OCCASSIONALLY API doesn't log at first instance
-- [#1695327](https://bugzilla.redhat.com/1695327): regression test fails with brick mux enabled.
-- [#1696046](https://bugzilla.redhat.com/1696046): Log level changes do not take effect until the process is restarted
-- [#1696077](https://bugzilla.redhat.com/1696077): Add pause and resume test case for geo-rep
-- [#1696136](https://bugzilla.redhat.com/1696136): gluster fuse mount crashed, when deleting 2T image file from oVirt Manager UI
-- [#1696512](https://bugzilla.redhat.com/1696512): glusterfs build is failing on rhel-6
-- [#1696599](https://bugzilla.redhat.com/1696599): Fops hang when inodelk fails on the first fop
-- [#1697316](https://bugzilla.redhat.com/1697316): Getting SEEK-2 and SEEK7 errors with [Invalid argument] in the bricks' logs
-- [#1697486](https://bugzilla.redhat.com/1697486): bug-1650403.t && bug-858215.t are throwing error "No such file" at the time of access glustershd pidfile
-- [#1697866](https://bugzilla.redhat.com/1697866): Provide a way to detach a failed node
-- [#1697907](https://bugzilla.redhat.com/1697907): ctime feature breaks old client to connect to new server
-- [#1697930](https://bugzilla.redhat.com/1697930): Thin-Arbiter SHD minor fixes
-- [#1698078](https://bugzilla.redhat.com/1698078): ctime: Creation of tar file on gluster mount throws warning "file changed as we read it"
-- [#1698449](https://bugzilla.redhat.com/1698449): thin-arbiter lock release fixes
-- [#1699025](https://bugzilla.redhat.com/1699025): Brick is not able to detach successfully in brick_mux environment
-- [#1699176](https://bugzilla.redhat.com/1699176): rebalance start command doesn't throw up error message if the command fails
-- [#1699189](https://bugzilla.redhat.com/1699189): fix truncate lock to cover the write in tuncate clean
-- [#1699339](https://bugzilla.redhat.com/1699339): With 1800+ vol and simultaneous 2 gluster pod restarts, running gluster commands gives issues once all pods are up
-- [#1699394](https://bugzilla.redhat.com/1699394): [geo-rep]: Geo-rep goes FAULTY with OSError
-- [#1699866](https://bugzilla.redhat.com/1699866): I/O error on writes to a disperse volume when replace-brick is executed
-- [#1700078](https://bugzilla.redhat.com/1700078): disablle + reenable of bitrot leads to files marked as bad
-- [#1700865](https://bugzilla.redhat.com/1700865): FUSE mount seems to be hung and not accessible
-- [#1701337](https://bugzilla.redhat.com/1701337): issues with 'building' glusterfs packages if we do 'git clone --depth 1'
-- [#1701457](https://bugzilla.redhat.com/1701457): ctime: Logs are flooded with "posix set mdata failed, No ctime" error during open
-- [#1702131](https://bugzilla.redhat.com/1702131): The source file is left in EC volume after rename when glusterfsd out of service
-- [#1702185](https://bugzilla.redhat.com/1702185): coredump reported by test ./tests/bugs/glusterd/bug-1699339.t
-- [#1702299](https://bugzilla.redhat.com/1702299): Custom xattrs are not healed on newly added brick
-- [#1702303](https://bugzilla.redhat.com/1702303): Enable enable fips-mode-rchecksum for new volumes by default
-- [#1702952](https://bugzilla.redhat.com/1702952): remove tier related information from manual pages
-- [#1703020](https://bugzilla.redhat.com/1703020): The cluster.heal-timeout option is unavailable for ec volume
-- [#1703629](https://bugzilla.redhat.com/1703629): statedump is not capturing info related to glusterd
-- [#1703948](https://bugzilla.redhat.com/1703948): Self-heal daemon resources are not cleaned properly after a ec fini
-- [#1704252](https://bugzilla.redhat.com/1704252): Creation of bulkvoldict thread logic is not correct while brick_mux is enabled for single volume
-- [#1704888](https://bugzilla.redhat.com/1704888): delete the snapshots and volume at the end of uss.t
-- [#1705865](https://bugzilla.redhat.com/1705865): VM stuck in a shutdown because of a pending fuse request
-- [#1705884](https://bugzilla.redhat.com/1705884): Image size as reported from the fuse mount is incorrect
-- [#1706603](https://bugzilla.redhat.com/1706603): Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
-- [#1707081](https://bugzilla.redhat.com/1707081): Self heal daemon not coming up after upgrade to glusterfs-6.0-2 (intermittently) on a brick mux setup
-- [#1707700](https://bugzilla.redhat.com/1707700): maintain consistent values across for options when fetched at cluster level or volume level
-- [#1707728](https://bugzilla.redhat.com/1707728): geo-rep: Sync hangs with tarssh as sync-engine
-- [#1707742](https://bugzilla.redhat.com/1707742): tests/geo-rep: arequal checksum comparison always succeeds
-- [#1707746](https://bugzilla.redhat.com/1707746): AFR-v2 does not log before attempting data self-heal
-- [#1708051](https://bugzilla.redhat.com/1708051): Capture memory consumption for gluster process at the time of throwing no memory available message
-- [#1708156](https://bugzilla.redhat.com/1708156): ec ignores lock contention notifications for partially acquired locks
-- [#1708163](https://bugzilla.redhat.com/1708163): tests: fix bug-1319374.c compile warnings.
-- [#1708926](https://bugzilla.redhat.com/1708926): Invalid memory access while executing cleanup_and_exit
-- [#1708929](https://bugzilla.redhat.com/1708929): Add more test coverage for shd mux
-- [#1709248](https://bugzilla.redhat.com/1709248): [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
-- [#1709653](https://bugzilla.redhat.com/1709653): geo-rep: With heavy rename workload geo-rep log if flooded
-- [#1710054](https://bugzilla.redhat.com/1710054): Optimize the glustershd manager to send reconfigure
-- [#1710159](https://bugzilla.redhat.com/1710159): glusterd: While upgrading (3-node cluster) 'gluster v status' times out on node to be upgraded
-- [#1711240](https://bugzilla.redhat.com/1711240): [GNFS] gf_nfs_mt_inode_ctx serious memory leak
-- [#1711250](https://bugzilla.redhat.com/1711250): bulkvoldict thread is not handling all volumes while brick multiplex is enabled
-- [#1711297](https://bugzilla.redhat.com/1711297): Optimize glusterd code to copy dictionary in handshake code path
-- [#1711764](https://bugzilla.redhat.com/1711764): Files inaccessible if one rebalance process is killed in a multinode volume
-- [#1711820](https://bugzilla.redhat.com/1711820): Typo in cli return string.
-- [#1711827](https://bugzilla.redhat.com/1711827): test case bug-1399598-uss-with-ssl.t is generating crash
-- [#1712322](https://bugzilla.redhat.com/1712322): Brick logs inundated with [2019-04-27 22:14:53.378047] I [dict.c:541:dict_get] (-->/usr/lib64/glusterfs/6.0/xlator/features/worm.so(+0x7241) [0x7fe857bb3241] -->/usr/lib64/glusterfs/6.0/xlator/features/locks.so(+0x1c219) [0x7fe857dda219] [Invalid argumen
-- [#1712668](https://bugzilla.redhat.com/1712668): Remove-brick shows warning cluster.force-migration enabled where as cluster.force-migration is disabled on the volume
-- [#1712741](https://bugzilla.redhat.com/1712741): glusterd_svcs_stop should call individual wrapper function to stop rather than calling the glusterd_svc_stop
-- [#1713730](https://bugzilla.redhat.com/1713730): Failure when glusterd is configured to bind specific IPv6 address. If bind-address is IPv6, *addr_len will be non-zero and it goes to ret = -1 branch, which will cause listen failure eventually
-- [#1714098](https://bugzilla.redhat.com/1714098): Make debugging hung frames easier
-- [#1714415](https://bugzilla.redhat.com/1714415): Script to make it easier to find hung frames
-- [#1714973](https://bugzilla.redhat.com/1714973): upgrade after tier code removal results in peer rejection.
-- [#1715921](https://bugzilla.redhat.com/1715921): uss.t tests times out with brick-mux regression
-- [#1716695](https://bugzilla.redhat.com/1716695): Fix memory leaks that are present even after an xlator fini [client side xlator]
-- [#1716766](https://bugzilla.redhat.com/1716766): [Thin-arbiter] TA process is not picking 24007 as port while starting up
-- [#1716812](https://bugzilla.redhat.com/1716812): Failed to create volume which transport_type is "tcp,rdma"
-- [#1716830](https://bugzilla.redhat.com/1716830): DHT: directory permissions are wiped out
-- [#1717757](https://bugzilla.redhat.com/1717757): WORM: Segmentation Fault if bitrot stub do signature
-- [#1717782](https://bugzilla.redhat.com/1717782): gluster v get <VolumeName> all still showing storage.fips-mode-rchecksum off
-- [#1717819](https://bugzilla.redhat.com/1717819): Changes to self-heal logic w.r.t. detecting metadata split-brains
-- [#1717953](https://bugzilla.redhat.com/1717953): SELinux context labels are missing for newly added bricks using add-brick command
-- [#1718191](https://bugzilla.redhat.com/1718191): Regression: Intermittent test failure for quick-read-with-upcall.t
-- [#1718273](https://bugzilla.redhat.com/1718273): markdown formatting errors in files present under /doc directory of the project
-- [#1718316](https://bugzilla.redhat.com/1718316): Ganesha-gfapi logs are flooded with error messages related to "gf_uuid_is_null(gfid)) [Invalid argument]" when lookups are running from multiple clients
-- [#1718338](https://bugzilla.redhat.com/1718338): Upcall: Avoid sending upcalls for invalid Inode
-- [#1718848](https://bugzilla.redhat.com/1718848): False positive logging of mount failure
-- [#1718998](https://bugzilla.redhat.com/1718998): Fix test case "tests/basic/afr/split-brain-favorite-child-policy.t" failure
-- [#1720201](https://bugzilla.redhat.com/1720201): Healing not proceeding during in-service upgrade on a disperse volume
-- [#1720290](https://bugzilla.redhat.com/1720290): ctime changes: tar still complains file changed as we read it if uss is enabled
-- [#1720615](https://bugzilla.redhat.com/1720615): [RHEL-8.1] yum update fails for rhel-8 glusterfs client packages 6.0-5.el8
-- [#1720993](https://bugzilla.redhat.com/1720993): tests/features/subdir-mount.t is failing for brick_mux regrssion
-- [#1721385](https://bugzilla.redhat.com/1721385): glusterfs-libs: usage of inet_addr() may impact IPv6
-- [#1721435](https://bugzilla.redhat.com/1721435): DHT: Internal xattrs visible on the mount
-- [#1721441](https://bugzilla.redhat.com/1721441): geo-rep: Fix permissions for GEOREP_DIR in non-root setup
-- [#1721601](https://bugzilla.redhat.com/1721601): [SHD] : logs of one volume are going to log file of other volume
-- [#1722541](https://bugzilla.redhat.com/1722541): stale shd process files leading to heal timing out and heal deamon not coming up for all volumes
-- [#1703322](https://bugzilla.redhat.com/1703322): Need to document about fips-mode-rchecksum in gluster-7 release notes.
-- [#1722802](https://bugzilla.redhat.com/1722802): Incorrect power of two calculation in mem_pool_get_fn
-- [#1723890](https://bugzilla.redhat.com/1723890): Crash in glusterd when running test script bug-1699339.t
-- [#1728770](https://bugzilla.redhat.com/1728770): Failures in remove-brick due to [Input/output error] errors
-- [#1736481](https://bugzilla.redhat.com/1736481): capture stat failure error while setting the gfid
-- [#1739424](https://bugzilla.redhat.com/1739424): Disperse volume : data corruption with ftruncate data in 4+2 config
-- [#1739426](https://bugzilla.redhat.com/1739426): Open fd heal should filter O_APPEND/O_EXCL
-- [#1739427](https://bugzilla.redhat.com/1739427): An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
-- [#1741041](https://bugzilla.redhat.com/1741041): atime/mtime is not restored after healing for entry self heals
-- [#1743200](https://bugzilla.redhat.com/1743200): ./tests/bugs/glusterd/bug-1595320.t is failing
-- [#1744874](https://bugzilla.redhat.com/1744874): interrupts leak memory
-- [#1745422](https://bugzilla.redhat.com/1745422): ./tests/bugs/glusterd/bug-1595320.t is failing
-- [#1745914](https://bugzilla.redhat.com/1745914): ESTALE change in fuse breaks get_real_filename implementation
-- [#1746142](https://bugzilla.redhat.com/1746142): ctime: If atime is updated via utimensat syscall ctime is not getting updated
-- [#1746145](https://bugzilla.redhat.com/1746145): CentOs 6 GlusterFS client creates files with time 01/01/1970
-- [#1747301](https://bugzilla.redhat.com/1747301): Setting cluster.heal-timeout requires volume restart
-- [#1747746](https://bugzilla.redhat.com/1747746): The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
-- [#1748448](https://bugzilla.redhat.com/1748448): syncop: Bail out if frame creation fails
-- [#1748774](https://bugzilla.redhat.com/1748774): Incorrect power of two calculation in mem_pool_get_fn
-- [#1749155](https://bugzilla.redhat.com/1749155): bug-1402841.t-mt-dir-scan-race.t fails spuriously
-- [#1749305](https://bugzilla.redhat.com/1749305): Failures in remove-brick due to [Input/output error] errors
-- [#1749664](https://bugzilla.redhat.com/1749664): The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
-- [#1751556](https://bugzilla.redhat.com/1751556): syncop: Bail out if frame creation fails
-- [#1752245](https://bugzilla.redhat.com/1752245): Crash in glusterd when running test script bug-1699339.t
-- [#1752429](https://bugzilla.redhat.com/1752429): Ctime: Cannot see the "trusted.glusterfs.mdata" xattr for directory on a new brick after rebalance
-- [#1755212](https://bugzilla.redhat.com/1755212): geo-rep: performance improvement while syncing heavy renames with existing destination
-- [#1755213](https://bugzilla.redhat.com/1755213): geo-rep: non-root session going fault due improper sub-command
-- [#1755678](https://bugzilla.redhat.com/1755678): Segmentation fault occurs while truncate file
-- [#1756002](https://bugzilla.redhat.com/1756002): git clone fails on gluster volumes exported via nfs-ganesha
-
-
diff --git a/events/src/eventsapiconf.py.in b/events/src/eventsapiconf.py.in
index 76b5954d325..700093bee60 100644
--- a/events/src/eventsapiconf.py.in
+++ b/events/src/eventsapiconf.py.in
@@ -28,6 +28,8 @@ def get_glusterd_workdir():
return glusterd_workdir
SERVER_ADDRESS = "0.0.0.0"
+SERVER_ADDRESSv4 = "0.0.0.0"
+SERVER_ADDRESSv6 = "::1"
DEFAULT_CONFIG_FILE = "@SYSCONF_DIR@/glusterfs/eventsconfig.json"
CUSTOM_CONFIG_FILE_TO_SYNC = "/events/config.json"
CUSTOM_CONFIG_FILE = get_glusterd_workdir() + CUSTOM_CONFIG_FILE_TO_SYNC
diff --git a/events/src/glustereventsd.py b/events/src/glustereventsd.py
index c4c7b65e332..341a3b60947 100644
--- a/events/src/glustereventsd.py
+++ b/events/src/glustereventsd.py
@@ -13,6 +13,7 @@
from __future__ import print_function
import sys
import signal
+import threading
try:
import socketserver
except ImportError:
@@ -23,10 +24,17 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter
from eventtypes import all_events
import handlers
import utils
-from eventsapiconf import SERVER_ADDRESS, PID_FILE
+from eventsapiconf import SERVER_ADDRESSv4, SERVER_ADDRESSv6, PID_FILE
from eventsapiconf import AUTO_BOOL_ATTRIBUTES, AUTO_INT_ATTRIBUTES
from utils import logger, PidFile, PidFileLockFailed, boolify
+# Subclass so that specifically IPv4 packets are captured
+class UDPServerv4(socketserver.ThreadingUDPServer):
+ address_family = socket.AF_INET
+
+# Subclass so that specifically IPv6 packets are captured
+class UDPServerv6(socketserver.ThreadingUDPServer):
+ address_family = socket.AF_INET6
class GlusterEventsRequestHandler(socketserver.BaseRequestHandler):
@@ -89,6 +97,10 @@ def signal_handler_sigusr2(sig, frame):
utils.restart_webhook_pool()
+def UDP_server_thread(sock):
+ sock.serve_forever()
+
+
def init_event_server():
utils.setup_logger()
utils.load_all()
@@ -99,15 +111,26 @@ def init_event_server():
sys.stderr.write("Unable to get Port details from Config\n")
sys.exit(1)
- # Start the Eventing Server, UDP Server
+ # Creating the Eventing Server, UDP Server for IPv4 packets
+ try:
+ serverv4 = UDPServerv4((SERVER_ADDRESSv4, port),
+ GlusterEventsRequestHandler)
+ except socket.error as e:
+ sys.stderr.write("Failed to start Eventsd for IPv4: {0}\n".format(e))
+ sys.exit(1)
+ # Creating the Eventing Server, UDP Server for IPv6 packets
try:
- server = socketserver.ThreadingUDPServer(
- (SERVER_ADDRESS, port),
- GlusterEventsRequestHandler)
+ serverv6 = UDPServerv6((SERVER_ADDRESSv6, port),
+ GlusterEventsRequestHandler)
except socket.error as e:
- sys.stderr.write("Failed to start Eventsd: {0}\n".format(e))
+ sys.stderr.write("Failed to start Eventsd for IPv6: {0}\n".format(e))
sys.exit(1)
- server.serve_forever()
+ server_thread1 = threading.Thread(target=UDP_server_thread,
+ args=(serverv4,))
+ server_thread2 = threading.Thread(target=UDP_server_thread,
+ args=(serverv6,))
+ server_thread1.start()
+ server_thread2.start()
def get_args():
diff --git a/events/src/peer_eventsapi.py b/events/src/peer_eventsapi.py
index 26b77a09179..4d2e5f35b1c 100644
--- a/events/src/peer_eventsapi.py
+++ b/events/src/peer_eventsapi.py
@@ -353,8 +353,7 @@ class WebhookModCmd(Cmd):
errcode=ERROR_WEBHOOK_NOT_EXISTS,
json_output=args.json)
- if isinstance(data[args.url], str) or \
- isinstance(data[args.url], unicode):
+ if isinstance(data[args.url], str):
data[args.url]["token"] = data[args.url]
if args.bearer_token != "":
diff --git a/events/src/utils.py b/events/src/utils.py
index 38b707a1b28..6d4e0791a2b 100644
--- a/events/src/utils.py
+++ b/events/src/utils.py
@@ -13,6 +13,7 @@ import sys
import json
import os
import logging
+import logging.handlers
import fcntl
from errno import EBADF
from threading import Thread
@@ -98,7 +99,7 @@ def setup_logger():
logger.setLevel(logging.INFO)
# create the logging file handler
- fh = logging.FileHandler(LOG_FILE)
+ fh = logging.handlers.WatchedFileHandler(LOG_FILE)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s "
"[%(module)s - %(lineno)s:%(funcName)s] "
diff --git a/extras/Makefile.am b/extras/Makefile.am
index ff5ca9bdb83..983f014cca6 100644
--- a/extras/Makefile.am
+++ b/extras/Makefile.am
@@ -11,7 +11,8 @@ EditorModedir = $(docdir)
EditorMode_DATA = glusterfs-mode.el glusterfs.vim
SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \
- $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python
+ $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python \
+ ganesha
confdir = $(sysconfdir)/glusterfs
if WITH_SERVER
diff --git a/extras/cliutils/README.md b/extras/cliutils/README.md
index e11166774e3..309beb1ca25 100644
--- a/extras/cliutils/README.md
+++ b/extras/cliutils/README.md
@@ -221,7 +221,7 @@ required.(Under `%files` section)
- gluster-mountbroker http://review.gluster.org/14544
- gluster-eventsapi http://review.gluster.org/14248
- gluster-georep-sshkey http://review.gluster.org/14732
-- gluster-restapi https://github.com/aravindavk/glusterfs-restapi
+- gluster-restapi https://github.com/gluster/restapi
## Limitations/TODOs
- Not yet possible to create CLI without any subcommand, For example
diff --git a/extras/distributed-testing/distributed-test-runner.py b/extras/distributed-testing/distributed-test-runner.py
index 7bfb6c9652a..5a07e2feab1 100755
--- a/extras/distributed-testing/distributed-test-runner.py
+++ b/extras/distributed-testing/distributed-test-runner.py
@@ -383,14 +383,17 @@ class Handlers:
return self.shell.call("make install") == 0
@synchronized
- def prove(self, id, test, timeout, valgrind=False, asan_noleaks=True):
+ def prove(self, id, test, timeout, valgrind="no", asan_noleaks=True):
assert id == self.client_id
self.shell.cd(self.gluster_root)
env = "DEBUG=1 "
- if valgrind:
+ if valgrind == "memcheck" or valgrind == "yes":
cmd = "valgrind"
cmd += " --tool=memcheck --leak-check=full --track-origins=yes"
cmd += " --show-leak-kinds=all -v prove -v"
+ elif valgrind == "drd":
+ cmd = "valgrind"
+ cmd += " --tool=drd -v prove -v"
elif asan_noleaks:
cmd = "prove -v"
env += "ASAN_OPTIONS=detect_leaks=0 "
@@ -827,8 +830,9 @@ parser.add_argument("--port", help="server port to listen",
type=int, default=DEFAULT_PORT)
# test role
parser.add_argument("--tester", help="start tester", action="store_true")
-parser.add_argument("--valgrind", help="run tests under valgrind",
- action="store_true")
+parser.add_argument("--valgrind[=memcheck,drd]",
+ help="run tests with valgrind tool 'memcheck' or 'drd'",
+ default="no")
parser.add_argument("--asan", help="test with asan enabled",
action="store_true")
parser.add_argument("--asan-noleaks", help="test with asan but no mem leaks",
diff --git a/extras/ec-heal-script/README.md b/extras/ec-heal-script/README.md
new file mode 100644
index 00000000000..aaefd6681f6
--- /dev/null
+++ b/extras/ec-heal-script/README.md
@@ -0,0 +1,69 @@
+# gluster-heal-scripts
+Scripts to correct extended attributes of fragments of files to make them healble.
+
+Following are the guidelines/suggestions to use these scripts.
+
+1 - Passwordless ssh should be setup for all the nodes of the cluster.
+
+2 - Scripts should be executed from one of these nodes.
+
+3 - Make sure NO "IO" is going on for the files for which we are running
+these two scripts.
+
+4 - There should be no heal going on for the file for which xattrs are being
+set by correct_pending_heals.sh. Disable the self heal while running this script.
+
+5 - All the bricks of the volume should be UP to identify good and bad fragments
+and to decide if an entry is healble or not.
+
+6 - If correct_pending_heals.sh is stopped in the middle while it was processing
+healble entries, it is suggested to re-run gfid_needing_heal_parallel.sh to create
+latest list of healble and non healble entries and "potential_heal" "can_not_heal" files.
+
+7 - Based on the number of entries, these files might take time to get and set the
+stats and xattrs of entries.
+
+8 - A backup of the fragments will be taken on <brick path>/.glusterfs/correct_pending_heals
+ directory with a file name same as gfid.
+
+9 - Once the correctness of the file gets verified by user, these backup should be removed.
+
+10 - Make sure we have enough space on bricks to take these backups.
+
+11 - At the end this will create two files -
+ 1 - modified_and_backedup_files - Contains list of files which have been modified and should be healed.
+ 2 - can_not_heal - Contains list of files which can not be healed.
+
+12 - It is suggested that the integrity of the data of files, which were modified and healed,
+ should be checked by the user.
+
+
+Usage:
+
+Following are the sequence of steps to use these scripts -
+
+1 - ./gfid_needing_heal_parallel.sh <volume name>
+
+ Execute gfid_needing_heal_parallel.sh with volume name to create list of files which could
+ be healed and can not be healed. It creates "potential_heal" and "can_not_heal" files.
+ During execution, it also displays the list of files on consol with the verdict.
+
+2 - ./correct_pending_heals.sh
+
+ Execute correct_pending_heals.sh without any argument. This script processes entries present
+ in "heal" file. It asks user to enter how many files we want to process in one attempt.
+ Once the count is provided, this script will fetch the entries one by one from "potential_heal" file and takes necessary action.
+ If at this point also a file can not be healed, it will be pushed to "can_not_heal" file.
+ If a file can be healed, this script will modify the xattrs of that file fragments and create an entry in "modified_and_backedup_files" file
+
+3 - At the end, all the entries of "potential_heal" will be processed and based on the processing only two files will be left.
+
+ 1 - modified_and_backedup_files - Contains list of files which have been modified and should be healed.
+ 2 - can_not_heal - Contains list of files which can not be healed.
+
+Logs and other files -
+
+1 - modified_and_backedup_files - It contains all the files which could be healed and the location of backup of each fragments.
+2 - can_not_heal - It contains all the files which can not be healed.
+3 - potential_heal - List of files which could be healed and should be processed by "correct_pending_heals.sh"
+4 - /var/log/glusterfs/ec-heal-script.log - It contains logs of both the files.
diff --git a/extras/ec-heal-script/correct_pending_heals.sh b/extras/ec-heal-script/correct_pending_heals.sh
new file mode 100755
index 00000000000..c9f19dd7c89
--- /dev/null
+++ b/extras/ec-heal-script/correct_pending_heals.sh
@@ -0,0 +1,415 @@
+#!/bin/bash
+# Copyright (c) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+# This script finally resets the xattrs of all the fragments of a file
+# which can be healed as per gfid_needing_heal_parallel.sh.
+# gfid_needing_heal_parallel.sh will produce two files, potential_heal and can_not_heal.
+# This script takes potential_heal as input and resets xattrs of all the fragments
+# of those files present in this file and which could be healed as per
+# trusted.ec.size xattar of the file else it will place the entry in can_not_heal
+# file. Those entries which must be healed will be place in must_heal file
+# after setting xattrs so that user can track those files.
+
+
+MOD_BACKUP_FILES="modified_and_backedup_files"
+CAN_NOT_HEAL="can_not_heal"
+LOG_DIR="/var/log/glusterfs"
+LOG_FILE="$LOG_DIR/ec-heal-script.log"
+LINE_SEP="==================================================="
+
+function heal_log()
+{
+ echo "$1" >> "$LOG_FILE"
+}
+
+function desc ()
+{
+ echo ""
+ echo "This script finally resets the xattrs of all the fragments of a file
+which can be healed as per gfid_needing_heal_parallel.sh.
+gfid_needing_heal_parallel.sh will produce two files, potential_heal and can_not_heal.
+This script takes potential_heal as input and resets xattrs of all the fragments
+of those files present in this file and which could be healed as per
+trusted.ec.size xattar of the file else it will place the entry in can_not_heal
+file. Those entries which must be healed will be place in must_heal file
+after setting xattrs so that user can track those files."
+}
+
+function _init ()
+{
+ if [ $# -ne 0 ]
+ then
+ echo "usage: $0"
+ desc
+ exit 2
+ fi
+
+ if [ ! -f "potential_heal" ]
+ then
+ echo "Nothing to correct. File "potential_heal" does not exist"
+ echo ""
+ desc
+ exit 2
+ fi
+}
+
+function total_file_size_in_hex()
+{
+ local frag_size=$1
+ local size=0
+ local hex_size=""
+
+ size=$((frag_size * 4))
+ hex_size=$(printf '0x%016x' $size)
+ echo "$hex_size"
+}
+
+function backup_file_fragment()
+{
+ local file_host=$1
+ local file_entry=$2
+ local gfid_actual_paths=$3
+ local brick_root=""
+ local temp=""
+ local backup_dir=""
+ local cmd=""
+ local gfid=""
+
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+ temp=$(echo "$(basename "$BASH_SOURCE")" | cut -d '.' -f 1)
+ backup_dir=$(echo "${brick_root}/.glusterfs/${temp}")
+ file_entry=${file_entry//#}
+
+ gfid=$(echo "${gfid_actual_paths}" | cut -d '|' -f 1 | cut -d '/' -f 5)
+ echo "${file_host}:${backup_dir}/${gfid}" >> "$MOD_BACKUP_FILES"
+
+ cmd="mkdir -p ${backup_dir} && yes | cp -af ${file_entry} ${backup_dir}/${gfid} 2>/dev/null"
+ ssh -n "${file_host}" "${cmd}"
+}
+
+function set_frag_xattr ()
+{
+ local file_host=$1
+ local file_entry=$2
+ local good=$3
+ local cmd1=""
+ local cmd2=""
+ local cmd=""
+ local version="0x00000000000000010000000000000001"
+ local dirty="0x00000000000000010000000000000001"
+
+ if [[ $good -eq 0 ]]
+ then
+ version="0x00000000000000000000000000000000"
+ fi
+
+ cmd1=" setfattr -n trusted.ec.version -v ${version} ${file_entry} &&"
+ cmd2=" setfattr -n trusted.ec.dirty -v ${dirty} ${file_entry}"
+ cmd=${cmd1}${cmd2}
+ ssh -n "${file_host}" "${cmd}"
+}
+
+function set_version_dirty_xattr ()
+{
+ local file_paths=$1
+ local good=$2
+ local gfid_actual_paths=$3
+ local file_entry=""
+ local file_host=""
+ local bpath=""
+
+ for bpath in ${file_paths//,/ }
+ do
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ backup_file_fragment "$file_host" "$file_entry" "$gfid_actual_paths"
+ file_entry=${file_entry//#}
+ set_frag_xattr "$file_host" "$file_entry" "$good"
+ done
+}
+
+function match_size_xattr_quorum ()
+{
+ local file_paths=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local size_xattr=""
+ local bpath=""
+ declare -A xattr_count
+
+ for bpath in ${file_paths//,/ }
+ do
+ size_xattr=""
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ file_entry=${file_entry//#}
+
+ cmd="getfattr -n trusted.ec.size -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.size" | cut -d '=' -f 2"
+ size_xattr=$(ssh -n "${file_host}" "${cmd}")
+ if [[ -n $size_xattr ]]
+ then
+ count=$((xattr_count["$size_xattr"] + 1))
+ xattr_count["$size_xattr"]=${count}
+ if [[ $count -ge 4 ]]
+ then
+ echo "${size_xattr}"
+ return
+ fi
+ fi
+ done
+ echo "False"
+}
+
+function match_version_xattr ()
+{
+ local file_paths=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local version=""
+ local bpath=""
+ declare -A ver_count
+
+ for bpath in ${file_paths//,/ }
+ do
+ version=""
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ file_entry=${file_entry//#}
+
+ cmd="getfattr -n trusted.ec.version -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.version" | cut -d '=' -f 2"
+ version=$(ssh -n "${file_host}" "${cmd}")
+ ver_count["$version"]=$((ver_count["$version"] + 1))
+ done
+ for key in "${ver_count[@]}"
+ do
+ if [[ $key -ge 4 ]]
+ then
+ echo "True"
+ return
+ else
+ echo "False"
+ return
+ fi
+ done
+}
+
+function match_stat_size_with_xattr ()
+{
+ local bpath=$1
+ local size=$2
+ local file_stat=$3
+ local xattr=$4
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local stat_output=""
+ local hex_size=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+
+ file_entry=${file_entry//#}
+ cmd="stat --format=%F:%B:%s $file_entry 2>/dev/null"
+ stat_output=$(ssh -n "${file_host}" "${cmd}")
+ echo "$stat_output" | grep -w "${file_stat}" > /dev/null
+
+ if [[ $? -eq 0 ]]
+ then
+ cmd="getfattr -n trusted.ec.size -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.size" | cut -d '=' -f 2"
+ hex_size=$(ssh -n "${file_host}" "${cmd}")
+
+ if [[ -z $hex_size || "$hex_size" != "$xattr" ]]
+ then
+ echo "False"
+ return
+ fi
+ size_diff=$(printf '%d' $(( size - hex_size )))
+ if [[ $size_diff -gt 2047 ]]
+ then
+ echo "False"
+ return
+ else
+ echo "True"
+ return
+ fi
+ else
+ echo "False"
+ return
+ fi
+}
+
+function find_file_paths ()
+{
+ local bpath=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local brick_root=""
+ local gfid=""
+ local actual_path=""
+ local gfid_path=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+
+ gfid=$(echo "${file_entry}" | grep ".glusterfs")
+ if [[ -n "$gfid" ]]
+ then
+ gfid_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep -v '.glusterfs' "
+ actual_path=$(ssh -n "${file_host}" "${cmd}")
+ #removing absolute path so that user can refer this from mount point
+ actual_path=${actual_path#"$brick_root"}
+ else
+ actual_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep '.glusterfs' "
+ gfid_path=$(ssh -n "${file_host}" "${cmd}")
+ gfid_path=${gfid_path#"$brick_root"}
+ fi
+
+ echo "${gfid_path}|${actual_path}"
+}
+
+function log_can_not_heal ()
+{
+ local gfid_actual_paths=$1
+ local file_paths=$2
+ file_paths=${file_paths//#}
+
+ echo "${LINE_SEP}" >> "$CAN_NOT_HEAL"
+ echo "Can Not Heal : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$CAN_NOT_HEAL"
+ for bpath in ${file_paths//,/ }
+ do
+ echo "${bpath}" >> "$CAN_NOT_HEAL"
+ done
+}
+
+function check_all_frag_and_set_xattr ()
+{
+ local file_paths=$1
+ local total_size=$2
+ local file_stat=$3
+ local bpath=""
+ local healthy_count=0
+ local match="False"
+ local matching_bricks=""
+ local bad_bricks=""
+ local gfid_actual_paths=""
+
+ for bpath in ${file_paths//,/ }
+ do
+ if [[ -n "$gfid_actual_paths" ]]
+ then
+ break
+ fi
+ gfid_actual_paths=$(find_file_paths "$bpath")
+ done
+
+ match=$(match_size_xattr_quorum "$file_paths")
+
+# echo "${match} : $bpath" >> "$MOD_BACKUP_FILES"
+
+ if [[ "$match" != "False" ]]
+ then
+ xattr="$match"
+ for bpath in ${file_paths//,/ }
+ do
+ match="False"
+ match=$(match_stat_size_with_xattr "$bpath" "$total_size" "$file_stat" "$xattr")
+ if [[ "$match" == "True" ]]
+ then
+ matching_bricks="${bpath},${matching_bricks}"
+ healthy_count=$((healthy_count + 1))
+ else
+ bad_bricks="${bpath},${bad_bricks}"
+ fi
+ done
+ fi
+
+ if [[ $healthy_count -ge 4 ]]
+ then
+ match="True"
+ echo "${LINE_SEP}" >> "$MOD_BACKUP_FILES"
+ echo "Modified : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$MOD_BACKUP_FILES"
+ set_version_dirty_xattr "$matching_bricks" 1 "$gfid_actual_paths"
+ set_version_dirty_xattr "$bad_bricks" 0 "$gfid_actual_paths"
+ else
+ log_can_not_heal "$gfid_actual_paths" "${file_paths}"
+ fi
+
+ echo "$match"
+}
+function set_xattr()
+{
+ local count=$1
+ local heal_entry=""
+ local file_stat=""
+ local frag_size=""
+ local total_size=""
+ local file_paths=""
+ local num=""
+ local can_heal_count=0
+
+ heal_log "Started $(basename $BASH_SOURCE) on $(date) "
+
+ while read -r heal_entry
+ do
+ heal_log "$LINE_SEP"
+ heal_log "${heal_entry}"
+
+ file_stat=$(echo "$heal_entry" | cut -d "|" -f 1)
+ frag_size=$(echo "$file_stat" | rev | cut -d ":" -f 1 | rev)
+ total_size="$(total_file_size_in_hex "$frag_size")"
+ file_paths=$(echo "$heal_entry" | cut -d "|" -f 2)
+ match=$(check_all_frag_and_set_xattr "$file_paths" "$total_size" "$file_stat")
+ if [[ "$match" == "True" ]]
+ then
+ can_heal_count=$((can_heal_count + 1))
+ fi
+
+ sed -i '1d' potential_heal
+ count=$((count - 1))
+ if [ $count == 0 ]
+ then
+ num=$(cat potential_heal | wc -l)
+ heal_log "$LINE_SEP"
+ heal_log "${1} : Processed"
+ heal_log "${can_heal_count} : Modified to Heal"
+ heal_log "$((${1} - can_heal_count)) : Moved to can_not_heal."
+ heal_log "${num} : Pending as Potential Heal"
+ exit 0
+ fi
+
+ done < potential_heal
+}
+
+function main ()
+{
+ local count=0
+
+ read -p "Number of files to correct: [choose between 1-1000] (0 for All):" count
+ if [[ $count -lt 0 || $count -gt 1000 ]]
+ then
+ echo "Provide correct value:"
+ exit 2
+ fi
+
+ if [[ $count -eq 0 ]]
+ then
+ count=$(cat potential_heal | wc -l)
+ fi
+ set_xattr "$count"
+}
+
+_init "$@" && main "$@"
diff --git a/extras/ec-heal-script/gfid_needing_heal_parallel.sh b/extras/ec-heal-script/gfid_needing_heal_parallel.sh
new file mode 100755
index 00000000000..d7f53c97c33
--- /dev/null
+++ b/extras/ec-heal-script/gfid_needing_heal_parallel.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+# Copyright (c) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+# This script provides a list of all the files which can be healed or not healed.
+# It also generates two files, potential_heal and can_not_heal, which contains the information
+# of all theose files. These files could be used by correct_pending_heals.sh to correct
+# the fragmnets so that files could be healed by shd.
+
+CAN_NOT_HEAL="can_not_heal"
+CAN_HEAL="potential_heal"
+LINE_SEP="==================================================="
+LOG_DIR="/var/log/glusterfs"
+LOG_FILE="$LOG_DIR/ec-heal-script.log"
+
+function heal_log()
+{
+ echo "$1" >> "$LOG_FILE"
+}
+
+function _init ()
+{
+ if [ $# -ne 1 ]; then
+ echo "usage: $0 <gluster volume name>";
+ echo "This script provides a list of all the files which can be healed or not healed.
+It also generates two files, potential_heal and can_not_heal, which contains the information
+of all theose files. These files could be used by correct_pending_heals.sh to correct
+the fragmnets so that files could be healed by shd."
+ exit 2;
+ fi
+
+ volume=$1;
+}
+
+function get_pending_entries ()
+{
+ local volume_name=$1
+
+ gluster volume heal "$volume_name" info | grep -v ":/" | grep -v "Number of entries" | grep -v "Status:" | sort -u | sed '/^$/d'
+}
+
+function get_entry_path_on_brick()
+{
+ local path="$1"
+ local gfid_string=""
+ if [[ "${path:0:1}" == "/" ]];
+ then
+ echo "$path"
+ else
+ gfid_string="$(echo "$path" | cut -f2 -d':' | cut -f1 -d '>')"
+ echo "/.glusterfs/${gfid_string:0:2}/${gfid_string:2:2}/$gfid_string"
+ fi
+}
+
+function run_command_on_server()
+{
+ local subvolume="$1"
+ local host="$2"
+ local cmd="$3"
+ local output
+ output=$(ssh -n "${host}" "${cmd}")
+ if [ -n "$output" ]
+ then
+ echo "$subvolume:$output"
+ fi
+}
+
+function get_entry_path_all_bricks ()
+{
+ local entry="$1"
+ local bricks="$2"
+ local cmd=""
+ for brick in $bricks
+ do
+ echo "${brick}#$(get_entry_path_on_brick "$entry")"
+ done | tr '\n' ','
+}
+
+function get_stat_for_entry_from_all_bricks ()
+{
+ local entry="$1"
+ local bricks="$2"
+ local subvolume=0
+ local host=""
+ local bpath=""
+ local cmd=""
+
+ for brick in $bricks
+ do
+ if [[ "$((subvolume % 6))" == "0" ]]
+ then
+ subvolume=$((subvolume+1))
+ fi
+ host=$(echo "$brick" | cut -f1 -d':')
+ bpath=$(echo "$brick" | cut -f2 -d':')
+
+ cmd="stat --format=%F:%B:%s $bpath$(get_entry_path_on_brick "$entry") 2>/dev/null"
+ run_command_on_server "$subvolume" "${host}" "${cmd}" &
+ done | sort | uniq -c | sort -rnk1
+}
+
+function get_bricks_from_volume()
+{
+ local v=$1
+ gluster volume info "$v" | grep -E "^Brick[0-9][0-9]*:" | cut -f2- -d':'
+}
+
+function print_entry_gfid()
+{
+ local host="$1"
+ local dirpath="$2"
+ local entry="$3"
+ local gfid
+ gfid="$(ssh -n "${host}" "getfattr -d -m. -e hex $dirpath/$entry 2>/dev/null | grep trusted.gfid=|cut -f2 -d'='")"
+ echo "$entry" - "$gfid"
+}
+
+function print_brick_directory_info()
+{
+ local h="$1"
+ local dirpath="$2"
+ while read -r e
+ do
+ print_entry_gfid "${h}" "${dirpath}" "${e}"
+ done < <(ssh -n "${h}" "ls $dirpath 2>/dev/null")
+}
+
+function print_directory_info()
+{
+ local entry="$1"
+ local bricks="$2"
+ local h
+ local b
+ local gfid
+ for brick in $bricks;
+ do
+ h="$(echo "$brick" | cut -f1 -d':')"
+ b="$(echo "$brick" | cut -f2 -d':')"
+ dirpath="$b$(get_entry_path_on_brick "$entry")"
+ print_brick_directory_info "${h}" "${dirpath}" &
+ done | sort | uniq -c
+}
+
+function print_entries_needing_heal()
+{
+ local quorum=0
+ local entry="$1"
+ local bricks="$2"
+ while read -r line
+ do
+ quorum=$(echo "$line" | awk '{print $1}')
+ if [[ "$quorum" -lt 4 ]]
+ then
+ echo "$line - Not in Quorum"
+ else
+ echo "$line - In Quorum"
+ fi
+ done < <(print_directory_info "$entry" "$bricks")
+}
+
+function find_file_paths ()
+{
+ local bpath=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local brick_root=""
+ local gfid=""
+ local actual_path=""
+ local gfid_path=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+
+ gfid=$(echo "${file_entry}" | grep ".glusterfs")
+
+ if [[ -n "$gfid" ]]
+ then
+ gfid_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep -v '.glusterfs' "
+ actual_path=$(ssh -n "${file_host}" "${cmd}")
+ #removing absolute path so that user can refer this from mount point
+ actual_path=${actual_path#"$brick_root"}
+ else
+ actual_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep '.glusterfs' "
+ gfid_path=$(ssh -n "${file_host}" "${cmd}")
+ gfid_path=${gfid_path#"$brick_root"}
+ fi
+
+ echo "${gfid_path}|${actual_path}"
+}
+
+function log_can_not_heal ()
+{
+ local gfid_actual_paths=$1
+ local file_paths=$2
+ file_paths=${file_paths//#}
+
+ echo "${LINE_SEP}" >> "$CAN_NOT_HEAL"
+ echo "Can Not Heal : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$CAN_NOT_HEAL"
+ for bpath in ${file_paths//,/ }
+ do
+ echo "${bpath}" >> "$CAN_NOT_HEAL"
+ done
+}
+
+function main ()
+{
+ local bricks=""
+ local quorum=0
+ local stat_info=""
+ local file_type=""
+ local gfid_actual_paths=""
+ local bpath=""
+ local file_paths=""
+ local good=0
+ local bad=0
+ bricks=$(get_bricks_from_volume "$volume")
+ rm -f "$CAN_HEAL"
+ rm -f "$CAN_NOT_HEAL"
+ mkdir "$LOG_DIR" -p
+
+ heal_log "Started $(basename "$BASH_SOURCE") on $(date) "
+ while read -r heal_entry
+ do
+ heal_log "------------------------------------------------------------------"
+ heal_log "$heal_entry"
+
+ gfid_actual_paths=""
+ file_paths="$(get_entry_path_all_bricks "$heal_entry" "$bricks")"
+ stat_info="$(get_stat_for_entry_from_all_bricks "$heal_entry" "$bricks")"
+ heal_log "$stat_info"
+
+ quorum=$(echo "$stat_info" | head -1 | awk '{print $1}')
+ good_stat=$(echo "$stat_info" | head -1 | awk '{print $3}')
+ file_type="$(echo "$stat_info" | head -1 | cut -f2 -d':')"
+ if [[ "$file_type" == "directory" ]]
+ then
+ print_entries_needing_heal "$heal_entry" "$bricks"
+ else
+ if [[ "$quorum" -ge 4 ]]
+ then
+ good=$((good + 1))
+ heal_log "Verdict: Healable"
+
+ echo "${good_stat}|$file_paths" >> "$CAN_HEAL"
+ else
+ bad=$((bad + 1))
+ heal_log "Verdict: Not Healable"
+ for bpath in ${file_paths//,/ }
+ do
+ if [[ -z "$gfid_actual_paths" ]]
+ then
+ gfid_actual_paths=$(find_file_paths "$bpath")
+ else
+ break
+ fi
+ done
+ log_can_not_heal "$gfid_actual_paths" "${file_paths}"
+ fi
+ fi
+ done < <(get_pending_entries "$volume")
+ heal_log "========================================="
+ heal_log "Total number of potential heal : ${good}"
+ heal_log "Total number of can not heal : ${bad}"
+ heal_log "========================================="
+}
+
+_init "$@" && main "$@"
diff --git a/extras/ganesha/Makefile.am b/extras/ganesha/Makefile.am
new file mode 100644
index 00000000000..9eaa401b6c8
--- /dev/null
+++ b/extras/ganesha/Makefile.am
@@ -0,0 +1,2 @@
+SUBDIRS = scripts config ocf
+CLEANFILES =
diff --git a/extras/ganesha/config/Makefile.am b/extras/ganesha/config/Makefile.am
new file mode 100644
index 00000000000..c729273096e
--- /dev/null
+++ b/extras/ganesha/config/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST= ganesha-ha.conf.sample
+
+confdir = $(sysconfdir)/ganesha
+conf_DATA = ganesha-ha.conf.sample
diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample
new file mode 100644
index 00000000000..c22892bde56
--- /dev/null
+++ b/extras/ganesha/config/ganesha-ha.conf.sample
@@ -0,0 +1,19 @@
+# Name of the HA cluster created.
+# must be unique within the subnet
+HA_NAME="ganesha-ha-360"
+#
+# N.B. you may use short names or long names; you may not use IP addrs.
+# Once you select one, stay with it as it will be mildly unpleasant to
+# clean up if you switch later on. Ensure that all names - short and/or
+# long - are in DNS or /etc/hosts on all machines in the cluster.
+#
+# The subset of nodes of the Gluster Trusted Pool that form the ganesha
+# HA cluster. Hostname is specified.
+HA_CLUSTER_NODES="server1,server2,..."
+#HA_CLUSTER_NODES="server1.lab.redhat.com,server2.lab.redhat.com,..."
+#
+# Virtual IPs for each of the nodes specified above.
+VIP_server1="10.0.2.1"
+VIP_server2="10.0.2.2"
+#VIP_server1_lab_redhat_com="10.0.2.1"
+#VIP_server2_lab_redhat_com="10.0.2.2"
diff --git a/extras/ganesha/ocf/Makefile.am b/extras/ganesha/ocf/Makefile.am
new file mode 100644
index 00000000000..990a609f254
--- /dev/null
+++ b/extras/ganesha/ocf/Makefile.am
@@ -0,0 +1,11 @@
+EXTRA_DIST= ganesha_grace ganesha_mon ganesha_nfsd
+
+# The root of the OCF resource agent hierarchy
+# Per the OCF standard, it's always "lib",
+# not "lib64" (even on 64-bit platforms).
+ocfdir = $(prefix)/lib/ocf
+
+# The provider directory
+radir = $(ocfdir)/resource.d/heartbeat
+
+ra_SCRIPTS = ganesha_grace ganesha_mon ganesha_nfsd
diff --git a/extras/ganesha/ocf/ganesha_grace b/extras/ganesha/ocf/ganesha_grace
new file mode 100644
index 00000000000..825f7164597
--- /dev/null
+++ b/extras/ganesha/ocf/ganesha_grace
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# Copyright (c) 2014 Anand Subramanian anands@redhat.com
+# Copyright (c) 2015 Red Hat Inc.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+if [ -n "$OCF_DEBUG_LIBRARY" ]; then
+ . $OCF_DEBUG_LIBRARY
+else
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+fi
+
+OCF_RESKEY_grace_active_default="grace-active"
+: ${OCF_RESKEY_grace_active=${OCF_RESKEY_grace_active_default}}
+
+ganesha_meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ganesha_grace">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource agent acts as a dummy
+resource agent for nfs-ganesha.
+</longdesc>
+
+<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
+
+<parameters>
+<parameter name="grace_active">
+<longdesc lang="en">NFS-Ganesha grace active attribute</longdesc>
+<shortdesc lang="en">NFS-Ganesha grace active attribute</shortdesc>
+<content type="string" default="grace-active" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="40s" />
+<action name="status" timeout="20s" interval="60s" />
+<action name="monitor" depth="0" timeout="10s" interval="5s" />
+<action name="notify" timeout="10s" />
+<action name="meta-data" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+return ${OCF_SUCCESS}
+}
+
+ganesha_grace_usage() {
+ echo "ganesha.nfsd USAGE"
+}
+
+# Make sure meta-data and usage always succeed
+case $__OCF_ACTION in
+ meta-data) ganesha_meta_data
+ exit ${OCF_SUCCESS}
+ ;;
+ usage|help) ganesha_usage
+ exit ${OCF_SUCCESS}
+ ;;
+ *)
+ ;;
+esac
+
+ganesha_grace_start()
+{
+ local rc=${OCF_ERR_GENERIC}
+ local host=$(hostname -s)
+
+ ocf_log debug "ganesha_grace_start()"
+ # give ganesha_mon RA a chance to set the crm_attr first
+ # I mislike the sleep, but it's not clear that looping
+ # with a small sleep is necessarily better
+ # start has a 40sec timeout, so a 5sec sleep here is okay
+ sleep 5
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null)
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null )
+ if [ $? -ne 0 ]; then
+ ocf_log info "grace start: crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} failed"
+ fi
+ fi
+
+ # Three possibilities:
+ # 1. There is no attribute at all and attr_updater returns
+ # a zero length string. This happens when
+ # ganesha_mon::monitor hasn't run at least once to set
+ # the attribute. The assumption here is that the system
+ # is coming up. We pretend, for now, that the node is
+ # healthy, to allow the system to continue coming up.
+ # It will cure itself in a few seconds
+ # 2. There is an attribute, and it has the value "1"; this
+ # node is healthy.
+ # 3. There is an attribute, but it has no value or the value
+ # "0"; this node is not healthy.
+
+ # case 1
+ if [[ -z "${attr}" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ # case 2
+ if [[ "${attr}" = *"value=1" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ # case 3
+ return ${OCF_NOT_RUNNING}
+}
+
+ganesha_grace_stop()
+{
+
+ ocf_log debug "ganesha_grace_stop()"
+ return ${OCF_SUCCESS}
+}
+
+ganesha_grace_notify()
+{
+ # since this is a clone RA we should only ever see pre-start
+ # or post-stop
+ mode="${OCF_RESKEY_CRM_meta_notify_type}-${OCF_RESKEY_CRM_meta_notify_operation}"
+ case "${mode}" in
+ pre-start | post-stop)
+ dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${OCF_RESKEY_CRM_meta_notify_stop_uname}
+ if [ $? -ne 0 ]; then
+ ocf_log info "dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${OCF_RESKEY_CRM_meta_notify_stop_uname} failed"
+ fi
+ ;;
+ esac
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_grace_monitor()
+{
+ local host=$(hostname -s)
+
+ ocf_log debug "monitor"
+
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null)
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null)
+ if [ $? -ne 0 ]; then
+ ocf_log info "crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} failed"
+ fi
+ fi
+
+ # if there is no attribute (yet), maybe it's because
+ # this RA started before ganesha_mon (nfs-mon) has had
+ # chance to create it. In which case we'll pretend
+ # everything is okay this time around
+ if [[ -z "${attr}" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ if [[ "${attr}" = *"value=1" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ return ${OCF_NOT_RUNNING}
+}
+
+ganesha_grace_validate()
+{
+ return ${OCF_SUCCESS}
+}
+
+ganesha_grace_validate
+
+# Translate each action into the appropriate function call
+case $__OCF_ACTION in
+start) ganesha_grace_start
+ ;;
+stop) ganesha_grace_stop
+ ;;
+status|monitor) ganesha_grace_monitor
+ ;;
+notify) ganesha_grace_notify
+ ;;
+*) ganesha_grace_usage
+ exit ${OCF_ERR_UNIMPLEMENTED}
+ ;;
+esac
+
+rc=$?
+
+# The resource agent may optionally log a debug message
+ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
+exit $rc
diff --git a/extras/ganesha/ocf/ganesha_mon b/extras/ganesha/ocf/ganesha_mon
new file mode 100644
index 00000000000..2b4a9d6da84
--- /dev/null
+++ b/extras/ganesha/ocf/ganesha_mon
@@ -0,0 +1,234 @@
+#!/bin/bash
+#
+# Copyright (c) 2014 Anand Subramanian anands@redhat.com
+# Copyright (c) 2015 Red Hat Inc.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+if [ -n "${OCF_DEBUG_LIBRARY}" ]; then
+ . ${OCF_DEBUG_LIBRARY}
+else
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+fi
+
+# Defaults
+OCF_RESKEY_ganesha_active_default="ganesha-active"
+OCF_RESKEY_grace_active_default="grace-active"
+OCF_RESKEY_grace_delay_default="5"
+
+: ${OCF_RESKEY_ganesha_active=${OCF_RESKEY_ganesha_active_default}}
+: ${OCF_RESKEY_grace_active=${OCF_RESKEY_grace_active_default}}
+: ${OCF_RESKEY_grace_delay=${OCF_RESKEY_grace_delay_default}}
+
+ganesha_meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ganesha_mon">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource agent acts as a dummy
+resource agent for nfs-ganesha.
+</longdesc>
+
+<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
+
+<parameters>
+<parameter name="ganesha_active">
+<longdesc lang="en">NFS-Ganesha daemon active attribute</longdesc>
+<shortdesc lang="en">NFS-Ganesha daemon active attribute</shortdesc>
+<content type="string" default="ganesha-active" />
+</parameter>
+<parameter name="grace_active">
+<longdesc lang="en">NFS-Ganesha grace active attribute</longdesc>
+<shortdesc lang="en">NFS-Ganesha grace active attribute</shortdesc>
+<content type="string" default="grace-active" />
+</parameter>
+<parameter name="grace_delay">
+<longdesc lang="en">
+NFS-Ganesha grace delay.
+When changing this, adjust the ganesha_grace RA's monitor interval to match.
+</longdesc>
+<shortdesc lang="en">NFS-Ganesha grace delay</shortdesc>
+<content type="string" default="5" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="40s" />
+<action name="status" timeout="20s" interval="60s" />
+<action name="monitor" depth="0" timeout="10s" interval="10s" />
+<action name="meta-data" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+return ${OCF_SUCCESS}
+}
+
+ganesha_mon_usage() {
+ echo "ganesha.nfsd USAGE"
+}
+
+# Make sure meta-data and usage always succeed
+case ${__OCF_ACTION} in
+ meta-data) ganesha_meta_data
+ exit ${OCF_SUCCESS}
+ ;;
+ usage|help) ganesha_usage
+ exit ${OCF_SUCCESS}
+ ;;
+ *)
+ ;;
+esac
+
+ganesha_mon_start()
+{
+ ocf_log debug "ganesha_mon_start"
+ ganesha_mon_monitor
+ return $OCF_SUCCESS
+}
+
+ganesha_mon_stop()
+{
+ ocf_log debug "ganesha_mon_stop"
+ return $OCF_SUCCESS
+}
+
+ganesha_mon_monitor()
+{
+ local host=$(hostname -s)
+ local pid_file="/var/run/ganesha.pid"
+ local rhel6_pid_file="/var/run/ganesha.nfsd.pid"
+ local proc_pid="/proc/"
+
+ # RHEL6 /etc/init.d/nfs-ganesha adds -p /var/run/ganesha.nfsd.pid
+ # RHEL7 systemd does not. Would be nice if all distros used the
+ # same pid file.
+ if [ -e ${rhel6_pid_file} ]; then
+ pid_file=${rhel6_pid_file}
+ fi
+ if [ -e ${pid_file} ]; then
+ proc_pid="${proc_pid}$(cat ${pid_file})"
+ fi
+
+ if [ "x${proc_pid}" != "x/proc/" -a -d ${proc_pid} ]; then
+
+ attrd_updater -n ${OCF_RESKEY_ganesha_active} -v 1
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -n ${OCF_RESKEY_ganesha_active} -v 1 failed"
+ fi
+
+ # ganesha_grace (nfs-grace) RA follows grace-active attr
+ # w/ constraint location
+ attrd_updater -n ${OCF_RESKEY_grace_active} -v 1
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -n ${OCF_RESKEY_grace_active} -v 1 failed"
+ fi
+
+ # ganesha_mon (nfs-mon) and ganesha_grace (nfs-grace)
+ # track grace-active crm_attr (attr != crm_attr)
+ # we can't just use the attr as there's no way to query
+ # its value in RHEL6 pacemaker
+
+ crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 2> /dev/null
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 2> /dev/null
+ if [ $? -ne 0 ]; then
+ ocf_log info "mon monitor warning: crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 failed"
+ fi
+ fi
+
+ return ${OCF_SUCCESS}
+ fi
+
+ # VIP fail-over is triggered by clearing the
+ # ganesha-active node attribute on this node.
+ #
+ # Meanwhile the ganesha_grace notify() runs when its
+ # nfs-grace resource is disabled on a node; which
+ # is triggered by clearing the grace-active attribute
+ # on this node.
+ #
+ # We need to allow time for it to run and put
+ # the remaining ganesha.nfsds into grace before
+ # initiating the VIP fail-over.
+
+ attrd_updater -D -n ${OCF_RESKEY_grace_active}
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -D -n ${OCF_RESKEY_grace_active} failed"
+ fi
+
+ host=$(hostname -s)
+ crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 2> /dev/null
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 2> /dev/null
+ if [ $? -ne 0 ]; then
+ ocf_log info "mon monitor warning: crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 failed"
+ fi
+ fi
+
+ sleep ${OCF_RESKEY_grace_delay}
+
+ attrd_updater -D -n ${OCF_RESKEY_ganesha_active}
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -D -n ${OCF_RESKEY_ganesha_active} failed"
+ fi
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_mon_validate()
+{
+ return ${OCF_SUCCESS}
+}
+
+ganesha_mon_validate
+
+# Translate each action into the appropriate function call
+case ${__OCF_ACTION} in
+start) ganesha_mon_start
+ ;;
+stop) ganesha_mon_stop
+ ;;
+status|monitor) ganesha_mon_monitor
+ ;;
+*) ganesha_mon_usage
+ exit ${OCF_ERR_UNIMPLEMENTED}
+ ;;
+esac
+
+rc=$?
+
+# The resource agent may optionally log a debug message
+ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
+exit $rc
diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd
new file mode 100644
index 00000000000..f91e8b6b8f7
--- /dev/null
+++ b/extras/ganesha/ocf/ganesha_nfsd
@@ -0,0 +1,167 @@
+#!/bin/bash
+#
+# Copyright (c) 2014 Anand Subramanian anands@redhat.com
+# Copyright (c) 2015 Red Hat Inc.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+if [ -n "${OCF_DEBUG_LIBRARY}" ]; then
+ . ${OCF_DEBUG_LIBRARY}
+else
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+fi
+
+OCF_RESKEY_ha_vol_mnt_default="/run/gluster/shared_storage"
+: ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}}
+
+ganesha_meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ganesha_nfsd">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource agent acts as a dummy
+resource agent for nfs-ganesha.
+</longdesc>
+
+<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
+
+<parameters>
+<parameter name="ha_vol_mnt">
+<longdesc lang="en">HA State Volume Mount Point</longdesc>
+<shortdesc lang="en">HA_State Volume Mount Point</shortdesc>
+<content type="string" default="" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="5s" />
+<action name="stop" timeout="5s" />
+<action name="status" depth="0" timeout="5s" interval="0" />
+<action name="monitor" depth="0" timeout="5s" interval="0" />
+<action name="meta-data" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_usage() {
+ echo "ganesha.nfsd USAGE"
+}
+
+# Make sure meta-data and usage always succeed
+case $__OCF_ACTION in
+ meta-data) ganesha_meta_data
+ exit ${OCF_SUCCESS}
+ ;;
+ usage|help) ganesha_usage
+ exit ${OCF_SUCCESS}
+ ;;
+ *)
+ ;;
+esac
+
+ganesha_nfsd_start()
+{
+ local long_host=$(hostname)
+
+ if [[ -d /var/lib/nfs ]]; then
+ mv /var/lib/nfs /var/lib/nfs.backup
+ if [ $? -ne 0 ]; then
+ ocf_log notice "mv /var/lib/nfs /var/lib/nfs.backup failed"
+ fi
+ ln -s ${OCF_RESKEY_ha_vol_mnt}/nfs-ganesha/${long_host}/nfs /var/lib/nfs
+ if [ $? -ne 0 ]; then
+ ocf_log notice "ln -s ${OCF_RESKEY_ha_vol_mnt}/nfs-ganesha/${long_host}/nfs /var/lib/nfs failed"
+ fi
+ fi
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_stop()
+{
+
+ if [ -L /var/lib/nfs -a -d /var/lib/nfs.backup ]; then
+ rm -f /var/lib/nfs
+ if [ $? -ne 0 ]; then
+ ocf_log notice "rm -f /var/lib/nfs failed"
+ fi
+ mv /var/lib/nfs.backup /var/lib/nfs
+ if [ $? -ne 0 ]; then
+ ocf_log notice "mv /var/lib/nfs.backup /var/lib/nfs failed"
+ fi
+ fi
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_monitor()
+{
+ # pacemaker checks to see if RA is already running before starting it.
+ # if we return success, then it's presumed it's already running and
+ # doesn't need to be started, i.e. invoke the start action.
+ # return something other than success to make pacemaker invoke the
+ # start action
+ if [[ -L /var/lib/nfs ]]; then
+ return ${OCF_SUCCESS}
+ fi
+ return ${OCF_NOT_RUNNING}
+}
+
+ganesha_nfsd_validate()
+{
+ return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_validate
+
+# ocf_log notice "ganesha_nfsd ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION"
+
+# Translate each action into the appropriate function call
+case $__OCF_ACTION in
+start) ganesha_nfsd_start
+ ;;
+stop) ganesha_nfsd_stop
+ ;;
+status|monitor) ganesha_nfsd_monitor
+ ;;
+*) ganesha_nfsd_usage
+ exit ${OCF_ERR_UNIMPLEMENTED}
+ ;;
+esac
+
+rc=$?
+
+# The resource agent may optionally log a debug message
+ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
+exit $rc
diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am
new file mode 100644
index 00000000000..7e345fd5f19
--- /dev/null
+++ b/extras/ganesha/scripts/Makefile.am
@@ -0,0 +1,6 @@
+EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh \
+ ganesha-ha.sh
+
+scriptsdir = $(libexecdir)/ganesha
+scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh generate-epoch.py \
+ ganesha-ha.sh
diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh
new file mode 100755
index 00000000000..3040e8138b0
--- /dev/null
+++ b/extras/ganesha/scripts/create-export-ganesha.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+#This script is called by glusterd when the user
+#tries to export a volume via NFS-Ganesha.
+#An export file specific to a volume
+#is created in GANESHA_DIR/exports.
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_DIR=${1%/}
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+declare -i EXPORT_ID
+
+function check_cmd_status()
+{
+ if [ "$1" != "0" ]
+ then
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
+ exit 1
+ fi
+}
+
+
+if [ ! -d "$GANESHA_DIR/exports" ];
+ then
+ mkdir $GANESHA_DIR/exports
+ check_cmd_status `echo $?`
+fi
+
+function write_conf()
+{
+echo -e "# WARNING : Using Gluster CLI will overwrite manual
+# changes made to this file. To avoid it, edit the
+# file and run ganesha-ha.sh --refresh-config."
+
+echo "EXPORT{"
+echo " Export_Id = 2;"
+echo " Path = \"/$VOL\";"
+echo " FSAL {"
+echo " name = "GLUSTER";"
+echo " hostname=\"localhost\";"
+echo " volume=\"$VOL\";"
+echo " }"
+echo " Access_type = RW;"
+echo " Disable_ACL = true;"
+echo ' Squash="No_root_squash";'
+echo " Pseudo=\"/$VOL\";"
+echo ' Protocols = "3", "4" ;'
+echo ' Transports = "UDP","TCP";'
+echo ' SecType = "sys";'
+echo ' Security_Label = False;'
+echo " }"
+}
+if [ "$OPTION" = "on" ];
+then
+ if ! (cat $CONF | grep $VOL.conf\"$ )
+ then
+ write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
+ if [ "$count" = "1" ] ; then
+ EXPORT_ID=2
+ else
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ check_cmd_status `echo $?`
+ EXPORT_ID=EXPORT_ID+1
+ sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ check_cmd_status `echo $?`
+ fi
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ fi
+else
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
+fi
diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh
new file mode 100755
index 00000000000..9d613a0e7ad
--- /dev/null
+++ b/extras/ganesha/scripts/dbus-send.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_DIR=${1%/}
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+
+function check_cmd_status()
+{
+ if [ "$1" != "0" ]
+ then
+ logger "dynamic export failed on node :${hostname -s}"
+ fi
+}
+
+#This function keeps track of export IDs and increments it with every new entry
+function dynamic_export_add()
+{
+ dbus-send --system \
+--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \
+string:"EXPORT(Path=/$VOL)"
+ check_cmd_status `echo $?`
+}
+
+#This function removes an export dynamically(uses the export_id of the export)
+function dynamic_export_remove()
+{
+ # Below bash fetch all the export from ShowExport command and search
+ # export entry based on path and then get its export entry.
+ # There are two possiblities for path, either entire volume will be
+ # exported or subdir. It handles both cases. But it remove only first
+ # entry from the list based on assumption that entry exported via cli
+ # has lowest export id value
+ removed_id=$(dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports | grep -B 1 -we \
+ "/"$VOL -e "/"$VOL"/" | grep uint16 | awk '{print $2}' \
+ | head -1)
+
+ dbus-send --print-reply --system \
+--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
+ check_cmd_status `echo $?`
+}
+
+if [ "$OPTION" = "on" ];
+then
+ dynamic_export_add $@
+fi
+
+if [ "$OPTION" = "off" ];
+then
+ dynamic_export_remove $@
+fi
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
new file mode 100644
index 00000000000..9790a719e10
--- /dev/null
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -0,0 +1,1199 @@
+#!/bin/bash
+
+# Copyright 2015-2016 Red Hat Inc. All Rights Reserved
+#
+# Pacemaker+Corosync High Availability for NFS-Ganesha
+#
+# setup, teardown, add, delete, refresh-config, and status
+#
+# Each participating node in the cluster is assigned a virtual IP (VIP)
+# which fails over to another node when its associated ganesha.nfsd dies
+# for any reason. After the VIP is moved to another node all the
+# ganesha.nfsds are send a signal using DBUS to put them into NFS GRACE.
+#
+# There are six resource agent types used: ganesha_mon, ganesha_grace,
+# ganesha_nfsd, IPaddr, and Dummy. ganesha_mon is used to monitor the
+# ganesha.nfsd. ganesha_grace is used to send the DBUS signal to put
+# the remaining ganesha.nfsds into grace. ganesha_nfsd is used to start
+# and stop the ganesha.nfsd during setup and teardown. IPaddr manages
+# the VIP. A Dummy resource named $hostname-trigger_ip-1 is used to
+# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to
+# the new host.
+
+GANESHA_HA_SH=$(realpath $0)
+HA_NUM_SERVERS=0
+HA_SERVERS=""
+HA_VOL_NAME="gluster_shared_storage"
+HA_VOL_MNT="/run/gluster/shared_storage"
+HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
+SERVICE_MAN="DISTRO_NOT_FOUND"
+
+# rhel, fedora id, version
+ID=""
+VERSION_ID=""
+
+PCS9OR10_PCS_CNAME_OPTION=""
+PCS9OR10_PCS_CLONE_OPTION="clone"
+SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
+
+# UNBLOCK RA uses shared_storage which may become unavailable
+# during any of the nodes reboot. Hence increase timeout value.
+PORTBLOCK_UNBLOCK_TIMEOUT="60s"
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_CONF=
+
+function find_rhel7_conf
+{
+ while [[ $# > 0 ]]
+ do
+ key="$1"
+ case $key in
+ -f)
+ CONFFILE="$2"
+ break;
+ ;;
+ *)
+ ;;
+ esac
+ shift
+ done
+}
+
+if [ -z ${CONFFILE} ]
+ then
+ find_rhel7_conf ${OPTIONS}
+
+fi
+
+GANESHA_CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
+
+usage() {
+
+ echo "Usage : add|delete|refresh-config|status"
+ echo "Add-node : ganesha-ha.sh --add <HA_CONF_DIR> \
+<NODE-HOSTNAME> <NODE-VIP>"
+ echo "Delete-node: ganesha-ha.sh --delete <HA_CONF_DIR> \
+<NODE-HOSTNAME>"
+ echo "Refresh-config : ganesha-ha.sh --refresh-config <HA_CONFDIR> \
+<volume>"
+ echo "Status : ganesha-ha.sh --status <HA_CONFDIR>"
+}
+
+determine_service_manager () {
+
+ if [ -e "/bin/systemctl" ];
+ then
+ SERVICE_MAN="/bin/systemctl"
+ elif [ -e "/sbin/invoke-rc.d" ];
+ then
+ SERVICE_MAN="/sbin/invoke-rc.d"
+ elif [ -e "/sbin/service" ];
+ then
+ SERVICE_MAN="/sbin/service"
+ fi
+ if [[ "${SERVICE_MAN}X" == "DISTRO_NOT_FOUNDX" ]]
+ then
+ logger "Service manager not recognized, exiting"
+ exit 1
+ fi
+}
+
+manage_service ()
+{
+ local action=${1}
+ local new_node=${2}
+ local option=
+
+ if [[ "${action}" == "start" ]]; then
+ option="yes"
+ else
+ option="no"
+ fi
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
+
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]
+ then
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} nfs-ganesha ${action}"
+ fi
+}
+
+
+check_cluster_exists()
+{
+ local name=${1}
+ local cluster_name=""
+
+ if [ -e /var/run/corosync.pid ]; then
+ cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
+ if [[ "${cluster_name}X" == "${name}X" ]]; then
+ logger "$name already exists, exiting"
+ exit 0
+ fi
+ fi
+}
+
+
+determine_servers()
+{
+ local cmd=${1}
+ local num_servers=0
+ local tmp_ifs=${IFS}
+ local ha_servers=""
+
+ if [ "${cmd}X" != "setupX" -a "${cmd}X" != "statusX" ]; then
+ ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
+ IFS=$' '
+ for server in ${ha_servers} ; do
+ num_servers=$(expr ${num_servers} + 1)
+ done
+ IFS=${tmp_ifs}
+ HA_NUM_SERVERS=${num_servers}
+ HA_SERVERS="${ha_servers}"
+ else
+ IFS=$','
+ for server in ${HA_CLUSTER_NODES} ; do
+ num_servers=$(expr ${num_servers} + 1)
+ done
+ IFS=${tmp_ifs}
+ HA_NUM_SERVERS=${num_servers}
+ HA_SERVERS="${HA_CLUSTER_NODES//,/ }"
+ fi
+}
+
+stop_ganesha_all()
+{
+ local serverlist=${1}
+ for node in ${serverlist} ; do
+ manage_service "stop" ${node}
+ done
+}
+
+setup_cluster()
+{
+ local name=${1}
+ local num_servers=${2}
+ local servers=${3}
+ local unclean=""
+ local quorum_policy="stop"
+
+ logger "setting up cluster ${name} with the following ${servers}"
+
+ # pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} ${servers}
+ pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers}
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster setup ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers} failed, shutting down ganesha and bailing out"
+ #set up failed stop all ganesha process and clean up symlinks in cluster
+ stop_ganesha_all "${servers}"
+ exit 1;
+ fi
+
+ # pcs cluster auth ${servers}
+ pcs cluster auth
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster auth failed"
+ fi
+
+ pcs cluster start --all
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster start failed"
+ exit 1;
+ fi
+
+ sleep 1
+ # wait for the cluster to elect a DC before querying or writing
+ # to the CIB. BZ 1334092
+ crmadmin --dc_lookup --timeout=5000 > /dev/null 2>&1
+ while [ $? -ne 0 ]; do
+ crmadmin --dc_lookup --timeout=5000 > /dev/null 2>&1
+ done
+
+ unclean=$(pcs status | grep -u "UNCLEAN")
+ while [[ "${unclean}X" == "UNCLEANX" ]]; do
+ sleep 1
+ unclean=$(pcs status | grep -u "UNCLEAN")
+ done
+ sleep 1
+
+ if [ ${num_servers} -lt 3 ]; then
+ quorum_policy="ignore"
+ fi
+ pcs property set no-quorum-policy=${quorum_policy}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed"
+ fi
+
+ pcs property set stonith-enabled=false
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set stonith-enabled=false failed"
+ fi
+}
+
+
+setup_finalize_ha()
+{
+ local cibfile=${1}
+ local stopped=""
+
+ stopped=$(pcs status | grep -u "Stopped")
+ while [[ "${stopped}X" == "StoppedX" ]]; do
+ sleep 1
+ stopped=$(pcs status | grep -u "Stopped")
+ done
+}
+
+
+refresh_config ()
+{
+ local short_host=$(hostname -s)
+ local VOL=${1}
+ local HA_CONFDIR=${2}
+ local short_host=$(hostname -s)
+
+ local export_id=$(grep ^[[:space:]]*Export_Id $HA_CONFDIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+
+
+ if [ -e ${SECRET_PEM} ]; then
+ while [[ ${3} ]]; do
+ current_host=`echo ${3} | cut -d "." -f 1`
+ if [[ ${short_host} != ${current_host} ]]; then
+ output=$(ssh -oPasswordAuthentication=no \
+-oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
+"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
+string:$HA_CONFDIR/exports/export.$VOL.conf \
+string:\"EXPORT(Export_Id=$export_id)\" 2>&1")
+ ret=$?
+ logger <<< "${output}"
+ if [ ${ret} -ne 0 ]; then
+ echo "Refresh-config failed on ${current_host}. Please check logs on ${current_host}"
+ else
+ echo "Refresh-config completed on ${current_host}."
+ fi
+
+ fi
+ shift
+ done
+ else
+ echo "Error: refresh-config failed. Passwordless ssh is not enabled."
+ exit 1
+ fi
+
+ # Run the same command on the localhost,
+ output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
+string:$HA_CONFDIR/exports/export.$VOL.conf \
+string:"EXPORT(Export_Id=$export_id)" 2>&1)
+ ret=$?
+ logger <<< "${output}"
+ if [ ${ret} -ne 0 ] ; then
+ echo "Refresh-config failed on localhost."
+ else
+ echo "Success: refresh-config completed."
+ fi
+}
+
+
+teardown_cluster()
+{
+ local name=${1}
+
+ for server in ${HA_SERVERS} ; do
+ if [[ ${HA_CLUSTER_NODES} != *${server}* ]]; then
+ logger "info: ${server} is not in config, removing"
+
+ pcs cluster stop ${server} --force
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster stop ${server} failed"
+ fi
+
+ pcs cluster node remove ${server}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster node remove ${server} failed"
+ fi
+ fi
+ done
+
+ # BZ 1193433 - pcs doesn't reload cluster.conf after modification
+ # after teardown completes, a subsequent setup will appear to have
+ # 'remembered' the deleted node. You can work around this by
+ # issuing another `pcs cluster node remove $node`,
+ # `crm_node -f -R $server`, or
+ # `cibadmin --delete --xml-text '<node id="$server"
+ # uname="$server"/>'
+
+ pcs cluster stop --all
+ if [ $? -ne 0 ]; then
+ logger "warning pcs cluster stop --all failed"
+ fi
+
+ pcs cluster destroy
+ if [ $? -ne 0 ]; then
+ logger "error pcs cluster destroy failed"
+ exit 1
+ fi
+}
+
+
+cleanup_ganesha_config ()
+{
+ rm -f /etc/corosync/corosync.conf
+ rm -rf /etc/cluster/cluster.conf*
+ rm -rf /var/lib/pacemaker/cib/*
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $HA_CONFDIR/ganesha.conf
+}
+
+do_create_virt_ip_constraints()
+{
+ local cibfile=${1}; shift
+ local primary=${1}; shift
+ local weight="1000"
+
+ # first a constraint location rule that says the VIP must be where
+ # there's a ganesha.nfsd running
+ pcs -f ${cibfile} constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1 failed"
+ fi
+
+ # then a set of constraint location prefers to set the prefered order
+ # for where a VIP should move
+ while [[ ${1} ]]; do
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${1}=${weight}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location ${primary}-group prefers ${1}=${weight} failed"
+ fi
+ weight=$(expr ${weight} + 1000)
+ shift
+ done
+ # and finally set the highest preference for the VIP to its home node
+ # default weight when created is/was 100.
+ # on Fedora setting appears to be additive, so to get the desired
+ # value we adjust the weight
+ # weight=$(expr ${weight} - 100)
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${primary}=${weight}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location ${primary}-group prefers ${primary}=${weight} failed"
+ fi
+}
+
+
+wrap_create_virt_ip_constraints()
+{
+ local cibfile=${1}; shift
+ local primary=${1}; shift
+ local head=""
+ local tail=""
+
+ # build a list of peers, e.g. for a four node cluster, for node1,
+ # the result is "node2 node3 node4"; for node2, "node3 node4 node1"
+ # and so on.
+ while [[ ${1} ]]; do
+ if [[ ${1} == ${primary} ]]; then
+ shift
+ while [[ ${1} ]]; do
+ tail=${tail}" "${1}
+ shift
+ done
+ else
+ head=${head}" "${1}
+ fi
+ shift
+ done
+ do_create_virt_ip_constraints ${cibfile} ${primary} ${tail} ${head}
+}
+
+
+create_virt_ip_constraints()
+{
+ local cibfile=${1}; shift
+
+ while [[ ${1} ]]; do
+ wrap_create_virt_ip_constraints ${cibfile} ${1} ${HA_SERVERS}
+ shift
+ done
+}
+
+
+setup_create_resources()
+{
+ local cibfile=$(mktemp -u)
+
+ # fixup /var/lib/nfs
+ logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}"
+ pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION} failed"
+ fi
+
+ pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION} failed"
+ fi
+
+ # see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace
+ # start method. Allow time for ganesha_mon to start and set the
+ # ganesha-active crm_attribute
+ sleep 5
+
+ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} notify=true
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} failed"
+ fi
+
+ pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1"
+ fi
+
+ pcs cluster cib ${cibfile}
+
+ while [[ ${1} ]]; do
+
+ # this is variable indirection
+ # from a nvs like 'VIP_host1=10.7.6.5' or 'VIP_host1="10.7.6.5"'
+ # (or VIP_host-1=..., or VIP_host-1.my.domain.name=...)
+ # a variable 'clean_name' is created (e.g. w/ value 'VIP_host_1')
+ # and a clean nvs (e.g. w/ value 'VIP_host_1="10_7_6_5"')
+ # after the `eval ${clean_nvs}` there is a variable VIP_host_1
+ # with the value '10_7_6_5', and the following \$$ magic to
+ # reference it, i.e. `eval tmp_ipaddr=\$${clean_name}` gives us
+ # ${tmp_ipaddr} with 10_7_6_5 and then convert the _s back to .s
+ # to give us ipaddr="10.7.6.5". whew!
+ name="VIP_${1}"
+ clean_name=${name//[-.]/_}
+ nvs=$(grep "^${name}=" ${HA_CONFDIR}/ganesha-ha.conf)
+ clean_nvs=${nvs//[-.]/_}
+ eval ${clean_nvs}
+ eval tmp_ipaddr=\$${clean_name}
+ ipaddr=${tmp_ipaddr//_/.}
+
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
+ fi
+
+ pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
+ fi
+
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
+
+ shift
+ done
+
+ create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
+
+ pcs cluster cib-push ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs cluster cib-push ${cibfile} failed"
+ fi
+ rm -f ${cibfile}
+}
+
+
+teardown_resources()
+{
+ # local mntpt=$(grep ha-vol-mnt ${HA_CONFIG_FILE} | cut -d = -f 2)
+
+ # restore /var/lib/nfs
+ logger "notice: pcs resource delete nfs_setup-clone"
+ pcs resource delete nfs_setup-clone
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete nfs_setup-clone failed"
+ fi
+
+ # delete -clone resource agents
+ # in particular delete the ganesha monitor so we don't try to
+ # trigger anything when we shut down ganesha next.
+ pcs resource delete nfs-mon-clone
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete nfs-mon-clone failed"
+ fi
+
+ pcs resource delete nfs-grace-clone
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete nfs-grace-clone failed"
+ fi
+
+ while [[ ${1} ]]; do
+ pcs resource delete ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete ${1}-group failed"
+ fi
+ shift
+ done
+
+}
+
+
+recreate_resources()
+{
+ local cibfile=${1}; shift
+
+ while [[ ${1} ]]; do
+ # this is variable indirection
+ # see the comment on the same a few lines up
+ name="VIP_${1}"
+ clean_name=${name//[-.]/_}
+ nvs=$(grep "^${name}=" ${HA_CONFDIR}/ganesha-ha.conf)
+ clean_nvs=${nvs//[-.]/_}
+ eval ${clean_nvs}
+ eval tmp_ipaddr=\$${clean_name}
+ ipaddr=${tmp_ipaddr//_/.}
+
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
+ fi
+
+ pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
+ fi
+
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
+ shift
+ done
+}
+
+
+addnode_recreate_resources()
+{
+ local cibfile=${1}; shift
+ local add_node=${1}; shift
+ local add_vip=${1}; shift
+
+ recreate_resources ${cibfile} ${HA_SERVERS}
+
+ pcs -f ${cibfile} resource create ${add_node}-nfs_block ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=block ip=${add_vip} --group ${add_node}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s --group ${add_node}-group \
+ --after ${add_node}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
+ fi
+
+ pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed"
+ fi
+ pcs -f ${cibfile} resource create ${add_node}-nfs_unblock ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=unblock ip=${add_vip} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${add_node}-group --after \
+ ${add_node}-cluster_ip-1 op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op monitor interval=10s \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_unblock failed"
+ fi
+}
+
+
+clear_resources()
+{
+ local cibfile=${1}; shift
+
+ while [[ ${1} ]]; do
+ pcs -f ${cibfile} resource delete ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs -f ${cibfile} resource delete ${1}-group"
+ fi
+
+ shift
+ done
+}
+
+
+addnode_create_resources()
+{
+ local add_node=${1}; shift
+ local add_vip=${1}; shift
+ local cibfile=$(mktemp -u)
+
+ # start HA on the new node
+ pcs cluster start ${add_node}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster start ${add_node} failed"
+ fi
+
+ pcs cluster cib ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib ${cibfile} failed"
+ fi
+
+ # delete all the -cluster_ip-1 resources, clearing
+ # their constraints, then create them again so we can
+ # recompute their constraints
+ clear_resources ${cibfile} ${HA_SERVERS}
+ addnode_recreate_resources ${cibfile} ${add_node} ${add_vip}
+
+ HA_SERVERS="${HA_SERVERS} ${add_node}"
+ create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
+
+ pcs cluster cib-push ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib-push ${cibfile} failed"
+ fi
+ rm -f ${cibfile}
+}
+
+
+deletenode_delete_resources()
+{
+ local node=${1}; shift
+ local ha_servers=$(echo "${HA_SERVERS}" | sed s/${node}//)
+ local cibfile=$(mktemp -u)
+
+ pcs cluster cib ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib ${cibfile} failed"
+ fi
+
+ # delete all the -cluster_ip-1 and -trigger_ip-1 resources,
+ # clearing their constraints, then create them again so we can
+ # recompute their constraints
+ clear_resources ${cibfile} ${HA_SERVERS}
+ recreate_resources ${cibfile} ${ha_servers}
+ HA_SERVERS=$(echo "${ha_servers}" | sed -e "s/ / /")
+
+ create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
+
+ pcs cluster cib-push ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib-push ${cibfile} failed"
+ fi
+ rm -f ${cibfile}
+
+}
+
+
+deletenode_update_haconfig()
+{
+ local name="VIP_${1}"
+ local clean_name=${name//[-.]/_}
+
+ ha_servers=$(echo ${HA_SERVERS} | sed -e "s/ /,/")
+ sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf
+}
+
+
+setup_state_volume()
+{
+ local mnt=${HA_VOL_MNT}
+ local longname=""
+ local shortname=""
+ local dname=""
+ local dirname=""
+
+ longname=$(hostname)
+ dname=${longname#$(hostname -s)}
+
+ while [[ ${1} ]]; do
+
+ if [[ ${1} == *${dname} ]]; then
+ dirname=${1}
+ else
+ dirname=${1}${dname}
+ fi
+
+ if [ ! -d ${mnt}/nfs-ganesha/tickle_dir ]; then
+ mkdir ${mnt}/nfs-ganesha/tickle_dir
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
+ fi
+ for server in ${HA_SERVERS} ; do
+ if [[ ${server} != ${dirname} ]]; then
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
+ fi
+ done
+ shift
+ done
+
+}
+
+
+enable_pacemaker()
+{
+ while [[ ${1} ]]; do
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]; then
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} pacemaker enable"
+ fi
+ shift
+ done
+}
+
+
+addnode_state_volume()
+{
+ local newnode=${1}; shift
+ local mnt=${HA_VOL_MNT}
+ local longname=""
+ local dname=""
+ local dirname=""
+
+ longname=$(hostname)
+ dname=${longname#$(hostname -s)}
+
+ if [[ ${newnode} == *${dname} ]]; then
+ dirname=${newnode}
+ else
+ dirname=${newnode}${dname}
+ fi
+
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
+ fi
+
+ for server in ${HA_SERVERS} ; do
+
+ if [[ ${server} != ${dirname} ]]; then
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
+
+ ln -s ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
+ ln -s ${mnt}/nfs-ganesha/${dirname}/nfs/statd ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
+ fi
+ done
+
+}
+
+
+delnode_state_volume()
+{
+ local delnode=${1}; shift
+ local mnt=${HA_VOL_MNT}
+ local longname=""
+ local dname=""
+ local dirname=""
+
+ longname=$(hostname)
+ dname=${longname#$(hostname -s)}
+
+ if [[ ${delnode} == *${dname} ]]; then
+ dirname=${delnode}
+ else
+ dirname=${delnode}${dname}
+ fi
+
+ rm -rf ${mnt}/nfs-ganesha/${dirname}
+
+ for server in ${HA_SERVERS} ; do
+ if [[ ${server} != ${dirname} ]]; then
+ rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
+ rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
+ fi
+ done
+}
+
+
+status()
+{
+ local scratch=$(mktemp)
+ local regex_str="^${1}-cluster_ip-1"
+ local healthy=0
+ local index=1
+ local nodes
+
+ # change tabs to spaces, strip leading spaces, including any
+ # new '*' at the beginning of a line introduced in pcs-0.10.x
+ pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*\*//" -e "s/^[ ]*//" > ${scratch}
+
+ nodes[0]=${1}; shift
+
+ # make a regex of the configured nodes
+ # and initalize the nodes array for later
+ while [[ ${1} ]]; do
+
+ regex_str="${regex_str}|^${1}-cluster_ip-1"
+ nodes[${index}]=${1}
+ ((index++))
+ shift
+ done
+
+ # print the nodes that are expected to be online
+ grep -E "Online:" ${scratch}
+
+ echo
+
+ # print the VIPs and which node they are on
+ grep -E "${regex_str}" < ${scratch} | cut -d ' ' -f 1,4
+
+ echo
+
+ # check if the VIP and port block/unblock RAs are on the expected nodes
+ for n in ${nodes[*]}; do
+
+ grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
+ grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
+ grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
+ done
+
+ grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch}
+ result=$?
+
+ if [ ${result} -eq 0 ]; then
+ echo "Cluster HA Status: BAD"
+ elif [ ${healthy} -eq 0 ]; then
+ echo "Cluster HA Status: HEALTHY"
+ else
+ echo "Cluster HA Status: FAILOVER"
+ fi
+
+ rm -f ${scratch}
+}
+
+create_ganesha_conf_file()
+{
+ if [[ "$1" == "yes" ]];
+ then
+ if [ -e $GANESHA_CONF ];
+ then
+ rm -rf $GANESHA_CONF
+ fi
+ # The symlink /etc/ganesha/ganesha.conf need to be
+ # created using ganesha conf file mentioned in the
+ # shared storage. Every node will only have this
+ # link and actual file will stored in shared storage,
+ # so that ganesha conf editing of ganesha conf will
+ # be easy as well as it become more consistent.
+
+ ln -s $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ else
+ # Restoring previous file
+ rm -rf $GANESHA_CONF
+ cp $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $GANESHA_CONF
+ fi
+}
+
+set_quorum_policy()
+{
+ local quorum_policy="stop"
+ local num_servers=${1}
+
+ if [ ${num_servers} -lt 3 ]; then
+ quorum_policy="ignore"
+ fi
+ pcs property set no-quorum-policy=${quorum_policy}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed"
+ fi
+}
+
+main()
+{
+
+ local cmd=${1}; shift
+ if [[ ${cmd} == *help ]]; then
+ usage
+ exit 0
+ fi
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --on
+ fi
+
+ local osid=""
+
+ osid=$(grep ^ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F ID=)
+ osid=$(grep ^VERSION_ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F VERSION_ID=)
+
+ HA_CONFDIR=${1%/}; shift
+ local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
+ local node=""
+ local vip=""
+
+ # ignore any comment lines
+ cfgline=$(grep ^HA_NAME= ${ha_conf})
+ eval $(echo ${cfgline} | grep -F HA_NAME=)
+ cfgline=$(grep ^HA_CLUSTER_NODES= ${ha_conf})
+ eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=)
+
+ case "${cmd}" in
+
+ setup | --setup)
+ logger "setting up ${HA_NAME}"
+
+ check_cluster_exists ${HA_NAME}
+
+ determine_servers "setup"
+
+ # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
+ # default is pcs-0.10.x options but check for
+ # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
+ if [[ ! ${ID} =~ {rhel,centos} ]]; then
+ if [[ ${VERSION_ID} == 7.* ]]; then
+ PCS9OR10_PCS_CNAME_OPTION="--name"
+ PCS9OR10_PCS_CLONE_OPTION="--clone"
+ fi
+ fi
+
+ if [[ "${HA_NUM_SERVERS}X" != "1X" ]]; then
+
+ determine_service_manager
+
+ setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}"
+
+ setup_create_resources ${HA_SERVERS}
+
+ setup_finalize_ha
+
+ setup_state_volume ${HA_SERVERS}
+
+ enable_pacemaker ${HA_SERVERS}
+
+ else
+
+ logger "insufficient servers for HA, aborting"
+ fi
+ ;;
+
+ teardown | --teardown)
+ logger "tearing down ${HA_NAME}"
+
+ determine_servers "teardown"
+
+ teardown_resources ${HA_SERVERS}
+
+ teardown_cluster ${HA_NAME}
+
+ cleanup_ganesha_config ${HA_CONFDIR}
+ ;;
+
+ cleanup | --cleanup)
+ cleanup_ganesha_config ${HA_CONFDIR}
+ ;;
+
+ add | --add)
+ node=${1}; shift
+ vip=${1}; shift
+
+ logger "adding ${node} with ${vip} to ${HA_NAME}"
+
+ determine_service_manager
+
+ manage_service "start" ${node}
+
+ determine_servers "add"
+
+ pcs cluster node add ${node}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster node add ${node} failed"
+ fi
+
+ addnode_create_resources ${node} ${vip}
+ # Subsequent add-node recreates resources for all the nodes
+ # that already exist in the cluster. The nodes are picked up
+ # from the entries in the ganesha-ha.conf file. Adding the
+ # newly added node to the file so that the resources specfic
+ # to this node is correctly recreated in the future.
+ clean_node=${node//[-.]/_}
+ echo "VIP_${node}=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf
+
+ NEW_NODES="$HA_CLUSTER_NODES,${node}"
+
+ sed -i s/HA_CLUSTER_NODES.*/"HA_CLUSTER_NODES=\"$NEW_NODES\""/ \
+$HA_CONFDIR/ganesha-ha.conf
+
+ addnode_state_volume ${node}
+
+ # addnode_create_resources() already appended ${node} to
+ # HA_SERVERS, so only need to increment HA_NUM_SERVERS
+ # and set quorum policy
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} + 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
+ ;;
+
+ delete | --delete)
+ node=${1}; shift
+
+ logger "deleting ${node} from ${HA_NAME}"
+
+ determine_servers "delete"
+
+ deletenode_delete_resources ${node}
+
+ pcs cluster node remove ${node}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster node remove ${node} failed"
+ fi
+
+ deletenode_update_haconfig ${node}
+
+ delnode_state_volume ${node}
+
+ determine_service_manager
+
+ manage_service "stop" ${node}
+
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} - 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
+ ;;
+
+ status | --status)
+ determine_servers "status"
+
+ status ${HA_SERVERS}
+ ;;
+
+ refresh-config | --refresh-config)
+ VOL=${1}
+
+ determine_servers "refresh-config"
+
+ refresh_config ${VOL} ${HA_CONFDIR} ${HA_SERVERS}
+ ;;
+
+ setup-ganesha-conf-files | --setup-ganesha-conf-files)
+
+ create_ganesha_conf_file ${1}
+ ;;
+
+ *)
+ # setup and teardown are not intended to be used by a
+ # casual user
+ usage
+ logger "Usage: ganesha-ha.sh add|delete|status"
+ ;;
+
+ esac
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --off
+ fi
+}
+
+main $*
diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py
new file mode 100755
index 00000000000..77af014bab9
--- /dev/null
+++ b/extras/ganesha/scripts/generate-epoch.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python3
+#
+# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+#
+# Generates unique epoch value on each gluster node to be used by
+# nfs-ganesha service on that node.
+#
+# Configure 'EPOCH_EXEC' option to this script path in
+# '/etc/sysconfig/ganesha' file used by nfs-ganesha service.
+#
+# Construct epoch as follows -
+# first 32-bit contains the now() time
+# rest 32-bit value contains the local glusterd node uuid
+
+import time
+import binascii
+
+# Calculate the now() time into a 64-bit integer value
+def epoch_now():
+ epoch_time = int(time.mktime(time.localtime())) << 32
+ return epoch_time
+
+# Read glusterd UUID and extract first 32-bit of it
+def epoch_uuid():
+ file_name = '/var/lib/glusterd/glusterd.info'
+
+ for line in open(file_name):
+ if "UUID" in line:
+ glusterd_uuid = line.split('=')[1].strip()
+
+ uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-",""))
+
+ epoch_uuid = int(binascii.hexlify(uuid_bin), 32) & 0xFFFF0000
+ return epoch_uuid
+
+# Construct epoch as follows -
+# first 32-bit contains the now() time
+# rest 32-bit value contains the local glusterd node uuid
+epoch = (epoch_now() | epoch_uuid())
+print((str(epoch)))
+
+exit(0)
diff --git a/extras/geo-rep/Makefile.am b/extras/geo-rep/Makefile.am
index e4603ae80b8..09eff308ac4 100644
--- a/extras/geo-rep/Makefile.am
+++ b/extras/geo-rep/Makefile.am
@@ -1,4 +1,4 @@
-scriptsdir = $(datadir)/glusterfs/scripts
+scriptsdir = $(libexecdir)/glusterfs/scripts
scripts_SCRIPTS = gsync-upgrade.sh generate-gfid-file.sh get-gfid.sh \
slave-upgrade.sh schedule_georep.py
diff --git a/extras/geo-rep/schedule_georep.py.in b/extras/geo-rep/schedule_georep.py.in
index f29ae020b8f..48b2b507060 100644
--- a/extras/geo-rep/schedule_georep.py.in
+++ b/extras/geo-rep/schedule_georep.py.in
@@ -352,7 +352,7 @@ def get_summary(mastervol, slave_url):
def touch_mount_root(mastervol):
# Create a Mount and Touch the Mount point root,
# Hack to make sure some event available after
- # setting Checkpoint. Without this their is a chance of
+ # setting Checkpoint. Without this there is a chance of
# Checkpoint never completes.
with glustermount("localhost", mastervol) as mnt:
execute(["touch", mnt])
@@ -459,8 +459,8 @@ if __name__ == "__main__":
description=__doc__)
parser.add_argument("mastervol", help="Master Volume Name")
parser.add_argument("slave",
- help="SLAVEHOST or root@SLAVEHOST "
- "or user@SLAVEHOST",
+ help="Slave hostname "
+ "(<username>@SLAVEHOST or SLAVEHOST)",
metavar="SLAVE")
parser.add_argument("slavevol", help="Slave Volume Name")
parser.add_argument("--interval", help="Interval in Seconds. "
diff --git a/extras/glusterd.vol.in b/extras/glusterd.vol.in
index 6141d8a736e..5d7bad0e4c8 100644
--- a/extras/glusterd.vol.in
+++ b/extras/glusterd.vol.in
@@ -1,12 +1,11 @@
volume management
type mgmt/glusterd
option working-directory @GLUSTERD_WORKDIR@
- option transport-type socket,rdma
+ option transport-type socket
option transport.socket.keepalive-time 10
option transport.socket.keepalive-interval 2
option transport.socket.read-fail-log off
option transport.socket.listen-port 24007
- option transport.rdma.listen-port 24008
option ping-timeout 0
option event-threads 1
# option lock-timer 180
diff --git a/extras/glusterfs-georep-upgrade.py b/extras/glusterfs-georep-upgrade.py
new file mode 100755
index 00000000000..634576058d6
--- /dev/null
+++ b/extras/glusterfs-georep-upgrade.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python3
+"""
+
+Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+This file is part of GlusterFS.
+
+This file is licensed to you under your choice of the GNU Lesser
+General Public License, version 3 or any later version (LGPLv3 or
+later), or the GNU General Public License, version 2 (GPLv2), in all
+cases as published by the Free Software Foundation.
+
+"""
+
+import argparse
+import errno
+import os, sys
+import shutil
+from datetime import datetime
+
+def find_htime_path(brick_path):
+ dirs = []
+ htime_dir = os.path.join(brick_path, '.glusterfs/changelogs/htime')
+ for file in os.listdir(htime_dir):
+ if os.path.isfile(os.path.join(htime_dir,file)) and file.startswith("HTIME"):
+ dirs.append(os.path.join(htime_dir, file))
+ else:
+ raise FileNotFoundError("%s unavailable" % (os.path.join(htime_dir, file)))
+ return dirs
+
+def modify_htime_file(brick_path):
+ htime_file_path_list = find_htime_path(brick_path)
+
+ for htime_file_path in htime_file_path_list:
+ changelog_path = os.path.join(brick_path, '.glusterfs/changelogs')
+ temp_htime_path = os.path.join(changelog_path, 'htime/temp_htime_file')
+ with open(htime_file_path, 'r') as htime_file, open(temp_htime_path, 'w') as temp_htime_file:
+ #extract epoch times from htime file
+ paths = htime_file.read().split("\x00")
+
+ for pth in paths:
+ epoch_no = pth.split(".")[-1]
+ changelog = os.path.basename(pth)
+ #convert epoch time to year, month and day
+ if epoch_no != '':
+ date=(datetime.fromtimestamp(float(int(epoch_no))).strftime("%Y/%m/%d"))
+ #update paths in temp htime file
+ temp_htime_file.write("%s/%s/%s\x00" % (changelog_path, date, changelog))
+ #create directory in the format year/month/days
+ path = os.path.join(changelog_path, date)
+
+ if changelog.startswith("CHANGELOG."):
+ try:
+ os.makedirs(path, mode = 0o600);
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ #copy existing changelogs to new directory structure, delete old changelog files
+ shutil.copyfile(pth, os.path.join(path, changelog))
+ os.remove(pth)
+
+ #rename temp_htime_file with htime file
+ os.rename(htime_file_path, os.path.join('%s.bak'%htime_file_path))
+ os.rename(temp_htime_path, htime_file_path)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('brick_path', help="This upgrade script, which is to be run on\
+ server side, takes brick path as the argument, \
+ updates paths inside htime file and alters the directory structure \
+ above the changelog files inorder to support new optimised format \
+ of the directory structure as per \
+ https://review.gluster.org/#/c/glusterfs/+/23733/")
+ args = parser.parse_args()
+ modify_htime_file(args.brick_path)
diff --git a/extras/glusterfs-logrotate b/extras/glusterfs-logrotate
index 75f700e6459..6ba6ef18e9f 100644
--- a/extras/glusterfs-logrotate
+++ b/extras/glusterfs-logrotate
@@ -45,3 +45,24 @@
compress
delaycompress
}
+
+# Rotate snapd log
+/var/log/glusterfs/snaps/*/*.log {
+ sharedscripts
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
+ missingok
+ compress
+ delaycompress
+ notifempty
+ postrotate
+ for pid in `ps -aef | grep glusterfs | egrep "snapd" | awk '{print $2}'`; do
+ /usr/bin/kill -HUP $pid > /dev/null 2>&1 || true
+ done
+ endscript
+}
diff --git a/extras/group-gluster-block b/extras/group-gluster-block
index 56b406e3641..1e398019e6b 100644
--- a/extras/group-gluster-block
+++ b/extras/group-gluster-block
@@ -5,6 +5,14 @@ performance.stat-prefetch=off
performance.open-behind=off
performance.readdir-ahead=off
performance.strict-o-direct=on
+performance.client-io-threads=on
+performance.io-thread-count=32
+performance.high-prio-threads=32
+performance.normal-prio-threads=32
+performance.low-prio-threads=32
+performance.least-prio-threads=4
+client.event-threads=8
+server.event-threads=8
network.remote-dio=disable
cluster.eager-lock=enable
cluster.quorum-type=auto
diff --git a/extras/group-virt.example b/extras/group-virt.example
index c2ce89d7b9c..cc37c98a25c 100644
--- a/extras/group-virt.example
+++ b/extras/group-virt.example
@@ -2,7 +2,8 @@ performance.quick-read=off
performance.read-ahead=off
performance.io-cache=off
performance.low-prio-threads=32
-network.remote-dio=enable
+network.remote-dio=disable
+performance.strict-o-direct=on
cluster.eager-lock=enable
cluster.quorum-type=auto
cluster.server-quorum-type=server
@@ -16,3 +17,8 @@ cluster.choose-local=off
client.event-threads=4
server.event-threads=4
performance.client-io-threads=on
+network.ping-timeout=20
+server.tcp-user-timeout=20
+server.keepalive-time=10
+server.keepalive-interval=2
+server.keepalive-count=5
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index 885ed03ad5b..1f2564b44ff 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -79,9 +79,9 @@ done
if [ "$option" == "disable" ]; then
# Unmount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- cat /etc/fstab | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ umount /run/gluster/shared_storage
+ cat /etc/fstab | grep -v "gluster_shared_storage /run/gluster/shared_storage/" > /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
if [ "$is_originator" == 1 ]; then
@@ -104,8 +104,15 @@ function check_volume_status()
echo $status
}
-mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
- /var/run/gluster/shared_storage"
+key=`echo $5 | cut -d '=' -f 1`
+val=`echo $5 | cut -d '=' -f 2`
+if [ "$key" == "transport.address-family" ]; then
+ mount_cmd="mount -t glusterfs -o xlator-option=transport.address-family=inet6 \
+ $local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage"
+else
+ mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+ /run/gluster/shared_storage"
+fi
if [ "$option" == "enable" ]; then
retry=0;
@@ -120,10 +127,10 @@ if [ "$option" == "enable" ]; then
status=$(check_volume_status)
done
# Mount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- mkdir -p /var/run/gluster/shared_storage
+ umount /run/gluster/shared_storage
+ mkdir -p /run/gluster/shared_storage
$mount_cmd
- cp /etc/fstab /var/run/gluster/fstab.tmp
- echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults 0 0" >> /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ cp /etc/fstab /run/gluster/fstab.tmp
+ echo "$local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage/ glusterfs defaults 0 0" >> /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
diff --git a/extras/hook-scripts/start/post/Makefile.am b/extras/hook-scripts/start/post/Makefile.am
index e32546dc999..792019d3c9f 100644
--- a/extras/hook-scripts/start/post/Makefile.am
+++ b/extras/hook-scripts/start/post/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh
+EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh S31ganesha-start.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/start/post/
if WITH_SERVER
diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
new file mode 100755
index 00000000000..7ad6f23ad06
--- /dev/null
+++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+PROGNAME="Sganesha-start"
+OPTSPEC="volname:,gd-workdir:"
+VOL=
+declare -i EXPORT_ID
+ganesha_key="ganesha.enable"
+GANESHA_DIR="/run/gluster/shared_storage/nfs-ganesha"
+CONF1="$GANESHA_DIR/ganesha.conf"
+GLUSTERD_WORKDIR=
+
+function parse_args ()
+{
+ ARGS=$(getopt -l $OPTSPEC -o "o" -name $PROGNAME $@)
+ eval set -- "$ARGS"
+
+ while true; do
+ case $1 in
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+}
+
+
+
+#This function generates a new export entry as export.volume_name.conf
+function write_conf()
+{
+echo -e "# WARNING : Using Gluster CLI will overwrite manual
+# changes made to this file. To avoid it, edit the
+# file, copy it over to all the NFS-Ganesha nodes
+# and run ganesha-ha.sh --refresh-config."
+
+echo "EXPORT{"
+echo " Export_Id = 2;"
+echo " Path = \"/$VOL\";"
+echo " FSAL {"
+echo " name = \"GLUSTER\";"
+echo " hostname=\"localhost\";"
+echo " volume=\"$VOL\";"
+echo " }"
+echo " Access_type = RW;"
+echo " Disable_ACL = true;"
+echo " Squash=\"No_root_squash\";"
+echo " Pseudo=\"/$VOL\";"
+echo " Protocols = \"3\", \"4\" ;"
+echo " Transports = \"UDP\",\"TCP\";"
+echo " SecType = \"sys\";"
+echo "}"
+}
+
+#It adds the export dynamically by sending dbus signals
+function export_add()
+{
+ dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+string:$GANESHA_DIR/exports/export.$VOL.conf string:"EXPORT(Export_Id=$EXPORT_ID)"
+
+}
+
+# based on src/scripts/ganeshactl/Ganesha/export_mgr.py
+function is_exported()
+{
+ local volume="${1}"
+
+ dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports \
+ | grep -w -q "/${volume}"
+
+ return $?
+}
+
+# Check the info file (contains the volume options) to see if Ganesha is
+# enabled for this volume.
+function ganesha_enabled()
+{
+ local volume="${1}"
+ local info_file="${GLUSTERD_WORKDIR}/vols/${VOL}/info"
+ local enabled="off"
+
+ enabled=$(grep -w ${ganesha_key} ${info_file} | cut -d"=" -f2)
+
+ [ "${enabled}" == "on" ]
+
+ return $?
+}
+
+parse_args $@
+
+if ganesha_enabled ${VOL} && ! is_exported ${VOL}
+then
+ if [ ! -e ${GANESHA_DIR}/exports/export.${VOL}.conf ]
+ then
+ #Remove export entry from nfs-ganesha.conf
+ sed -i /$VOL.conf/d $CONF1
+ write_conf ${VOL} > ${GANESHA_DIR}/exports/export.${VOL}.conf
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ EXPORT_ID=EXPORT_ID+1
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ sed -i s/Export_Id.*/"Export_Id=$EXPORT_ID;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF1
+ else
+ EXPORT_ID=$(grep ^[[:space:]]*Export_Id $GANESHA_DIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+ fi
+ export_add $VOL
+fi
+
+exit 0
diff --git a/extras/mount-shared-storage.sh b/extras/mount-shared-storage.sh
index e99233f7e1e..cc40e13c3e3 100755
--- a/extras/mount-shared-storage.sh
+++ b/extras/mount-shared-storage.sh
@@ -21,7 +21,7 @@ do
continue
fi
- mount -t glusterfs "${arr[0]}" "${arr[1]}"
+ mount -t glusterfs -o "${arr[3]}" "${arr[0]}" "${arr[1]}"
#wait for few seconds
sleep 10
diff --git a/extras/ocf/volume.in b/extras/ocf/volume.in
index 46dd20b8ced..76cc649e55f 100755
--- a/extras/ocf/volume.in
+++ b/extras/ocf/volume.in
@@ -6,6 +6,7 @@
# HA resource
#
# Authors: Florian Haas (hastexo Professional Services GmbH)
+# Jiri Lunacek (Hosting90 Systems s.r.o.)
#
# License: GNU General Public License (GPL)
@@ -54,6 +55,14 @@ must have clone ordering enabled.
<shortdesc lang="en">gluster executable</shortdesc>
<content type="string" default="$OCF_RESKEY_binary_default"/>
</parameter>
+ <parameter name="peer_map">
+ <longdesc lang="en">
+ Mapping of hostname - peer name in the gluster cluster
+ in format hostname1:peername1,hostname2:peername2,...
+ </longdesc>
+ <shortdesc lang="en">gluster peer map</shortdesc>
+ <content type="string" default=""/>
+ </parameter>
</parameters>
<actions>
<action name="start" timeout="20" />
@@ -68,6 +77,10 @@ EOF
}
+if [ -n "${OCF_RESKEY_peer_map}" ]; then
+ SHORTHOSTNAME=`echo "${OCF_RESKEY_peer_map}" | egrep -o "$SHORTHOSTNAME\:[^,]+" | awk -F: '{print $2}'`
+fi
+
volume_getdir() {
local voldir
voldir="@GLUSTERD_WORKDIR@/vols/${OCF_RESKEY_volname}"
@@ -108,6 +121,10 @@ volume_getpids() {
volpid_dir=`volume_getpid_dir`
bricks=`volume_getbricks`
+
+ if [ -z "$bricks" ]; then
+ return 1
+ fi
for brick in ${bricks}; do
pidfile="${volpid_dir}/${SHORTHOSTNAME}${brick}.pid"
@@ -214,6 +231,11 @@ volume_validate_all() {
# Test for required binaries
check_binary $OCF_RESKEY_binary
+
+ if [ -z "$SHORTHOSTNAME" ]; then
+ ocf_log err 'Unable to get host in node map'
+ return $OCF_ERR_CONFIGURED
+ fi
return $OCF_SUCCESS
}
diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
index f03895de114..e62f7fc52a3 100755
--- a/extras/quota/quota_fsck.py
+++ b/extras/quota/quota_fsck.py
@@ -52,17 +52,17 @@ epilog_msg='''
def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None):
if log_type == QUOTA_VERBOSE:
- print('%-24s %-60s\nxattr_values: %s\n%s\n' % {"Verbose", path, xattr_dict, stbuf})
+ print('%-24s %-60s\nxattr_values: %s\n%s\n' % ("Verbose", path, xattr_dict, stbuf))
elif log_type == QUOTA_META_ABSENT:
- print('%-24s %-60s\n%s\n' % {"Quota-Meta Absent", path, xattr_dict})
+ print('%-24s %-60s\n%s\n' % ("Quota-Meta Absent", path, xattr_dict))
elif log_type == QUOTA_SIZE_MISMATCH:
print("mismatch")
if dir_size is not None:
- print('%24s %60s %12s %12s' % {"Size Mismatch", path, xattr_dict['contri_size'],
- dir_size})
+ print('%24s %60s %12s %12s' % ("Size Mismatch", path,
+ xattr_dict, dir_size))
else:
- print('%-24s %-60s %-12i %-12i' % {"Size Mismatch", path, xattr_dict['contri_size'],
- stbuf.st_size})
+ print('%-24s %-60s %-12s %-12s' % ("Size Mismatch", path, xattr_dict,
+ stbuf.st_size))
def size_differs_lot(s1, s2):
'''
@@ -156,12 +156,10 @@ def get_quota_xattr_brick(dpath):
xattr_dict = {}
xattr_dict['parents'] = {}
- for xattr in pairs:
+ for xattr in pairs[1:]:
+ xattr = xattr.decode("utf-8")
xattr_key = xattr.split("=")[0]
- if re.search("# file:", xattr_key):
- # skip the file comment
- continue
- elif xattr_key is "":
+ if xattr_key == "":
# skip any empty lines
continue
elif not re.search("quota", xattr_key):
diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
index 1127be0e976..0e4df77d481 100755
--- a/extras/snap_scheduler/gcron.py
+++ b/extras/snap_scheduler/gcron.py
@@ -19,10 +19,10 @@ import logging.handlers
import fcntl
-GCRON_TASKS = "/var/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
+GCRON_TASKS = "/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
GCRON_CROND_TASK = "/etc/cron.d/glusterfs_snap_cron_tasks"
GCRON_RELOAD_FLAG = "/var/run/gluster/crond_task_reload_flag"
-LOCK_FILE_DIR = "/var/run/gluster/shared_storage/snaps/lock_files/"
+LOCK_FILE_DIR = "/run/gluster/shared_storage/snaps/lock_files/"
log = logging.getLogger("gcron-logger")
start_time = 0.0
@@ -38,7 +38,8 @@ def initLogger(script_name):
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
out, err = process.communicate()
if process.returncode == 0:
logfile = os.path.join(out.strip(), script_name[:-3]+".log")
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index a66c5e3d5ce..e8fcc449a9b 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -67,7 +67,7 @@ except ImportError:
SCRIPT_NAME = "snap_scheduler"
scheduler_enabled = False
log = logging.getLogger(SCRIPT_NAME)
-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
+SHARED_STORAGE_DIR="/run/gluster/shared_storage"
GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
@@ -149,7 +149,7 @@ def initLogger():
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE, universal_newlines=True)
logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
fh = logging.FileHandler(logfile)
diff --git a/extras/statedumpparse.rb b/extras/statedumpparse.rb
new file mode 100755
index 00000000000..1aff43377db
--- /dev/null
+++ b/extras/statedumpparse.rb
@@ -0,0 +1,208 @@
+#!/usr/bin/env ruby
+
+require 'time'
+require 'optparse'
+
+unless Array.instance_methods.include? :to_h
+ class Array
+ def to_h
+ h = {}
+ each { |k,v| h[k]=v }
+ h
+ end
+ end
+end
+
+# statedump.c:gf_proc_dump_mempool_info uses a five-dash record separator,
+# client.c:client_fd_lk_ctx_dump uses a six-dash record separator.
+ARRSEP = /^(-{5,6}=-{5,6})?$/
+HEAD = /^\[(.*)\]$/
+INPUT_FORMATS = %w[statedump json]
+
+format = 'json'
+input_format = 'statedump'
+tz = '+0000'
+memstat_select,memstat_reject = //,/\Z./
+OptionParser.new do |op|
+ op.banner << " [<] <STATEDUMP>"
+ op.on("-f", "--format=F", "json/yaml/memstat(-[plain|human|json])") { |s| format = s }
+ op.on("--input-format=F", INPUT_FORMATS.join(?/)) { |s| input_format = s }
+ op.on("--timezone=T",
+ "time zone to apply to zoneless timestamps [default UTC]") { |s| tz = s }
+ op.on("--memstat-select=RX", "memstat: select memory types matching RX") { |s|
+ memstat_select = Regexp.new s
+ }
+ op.on("--memstat-reject=RX", "memstat: reject memory types matching RX") { |s|
+ memstat_reject = Regexp.new s
+ }
+end.parse!
+
+
+if format =~ /\Amemstat(?:-(.*))?/
+ memstat_type = $1 || 'plain'
+ unless %w[plain human json].include? memstat_type
+ raise "unknown memstat type #{memstat_type.dump}"
+ end
+ format = 'memstat'
+end
+
+repr, logsep = case format
+when 'yaml'
+ require 'yaml'
+
+ [proc { |e| e.to_yaml }, "\n"]
+when 'json', 'memstat'
+ require 'json'
+
+ [proc { |e| e.to_json }, " "]
+else
+ raise "unkonwn format '#{format}'"
+end
+formatter = proc { |e| puts repr.call(e) }
+
+INPUT_FORMATS.include? input_format or raise "unkwown input format '#{input_format}'"
+
+dumpinfo = {}
+
+# parse a statedump entry
+elem_cbk = proc { |s,&cbk|
+ arraylike = false
+ s.grep(/\S/).empty? and next
+ head = nil
+ while s.last =~ /^\s*$/
+ s.pop
+ end
+ body = catch { |misc2|
+ s[0] =~ HEAD ? (head = $1) : (throw misc2)
+ body = [[]]
+ s[1..-1].each { |l|
+ if l =~ ARRSEP
+ arraylike = true
+ body << []
+ next
+ end
+ body.last << l
+ }
+
+ body.reject(&:empty?).map { |e|
+ ea = e.map { |l|
+ k,v = l.split("=",2)
+ m = /\A(0|-?[1-9]\d*)(\.\d+)?\Z/.match v
+ [k, m ? (m[2] ? Float(v) : Integer(v)) : v]
+ }
+ begin
+ ea.to_h
+ rescue
+ throw misc2
+ end
+ }
+ }
+
+ if body
+ cbk.call [head, arraylike ? body : (body.empty? ? {} : body[0])]
+ else
+ STDERR.puts ["WARNING: failed to parse record:", repr.call(s)].join(logsep)
+ end
+}
+
+# aggregator routine
+aggr = case format
+when 'memstat'
+ meminfo = {}
+ # commit memory-related entries to meminfo
+ proc { |k,r|
+ case k
+ when /memusage/
+ (meminfo["GF_MALLOC"]||={})[k] ||= r["size"] if k =~ memstat_select and k !~ memstat_reject
+ when "mempool"
+ r.each {|e|
+ kk = "mempool:#{e['pool-name']}"
+ (meminfo["mempool"]||={})[kk] ||= e["size"] if kk =~ memstat_select and kk !~ memstat_reject
+ }
+ end
+ }
+else
+ # just format data, don't actually aggregate anything
+ proc { |pair| formatter.call pair }
+end
+
+# processing the data
+case input_format
+when 'statedump'
+ acc = []
+ $<.each { |l|
+ l = l.strip
+ if l =~ /^(DUMP-(?:START|END)-TIME):\s+(.*)/
+ dumpinfo["_meta"]||={}
+ (dumpinfo["_meta"]["date"]||={})[$1] = Time.parse([$2, tz].join " ")
+ next
+ end
+
+ if l =~ HEAD
+ elem_cbk.call(acc, &aggr)
+ acc = [l]
+ next
+ end
+
+ acc << l
+ }
+ elem_cbk.call(acc, &aggr)
+when 'json'
+ $<.each { |l|
+ r = JSON.load l
+ case r
+ when Array
+ aggr[r]
+ when Hash
+ dumpinfo.merge! r
+ end
+ }
+end
+
+# final actions: output aggregated data
+case format
+when 'memstat'
+ ma = meminfo.values.map(&:to_a).inject(:+)
+ totals = meminfo.map { |coll,h| [coll, h.values.inject(:+)] }.to_h
+ tt = ma.transpose[1].inject(:+)
+
+ summary_sep,showm = case memstat_type
+ when 'json'
+ ["", proc { |k,v| puts({type: k, value: v}.to_json) }]
+ when 'plain', 'human'
+ # human-friendly number representation
+ hr = proc { |n|
+ qa = %w[B kB MB GB]
+ q = ((1...qa.size).find {|i| n < (1 << i*10)} || qa.size) - 1
+ "%.2f%s" % [n.to_f / (1 << q*10), qa[q]]
+ }
+
+ templ = "%{val} %{key}"
+ tft = proc { |t| t }
+ nft = if memstat_type == 'human'
+ nw = [ma.transpose[1], totals.values, tt].flatten.map{|n| hr[n].size}.max
+ proc { |n|
+ hn = hr[n]
+ " " * (nw - hn.size) + hn
+ }
+ else
+ nw = tt.to_s.size
+ proc { |n| "%#{nw}d" % n }
+ end
+ ## Alternative template, key first:
+ # templ = "%{key} %{val}"
+ # tw = ma.transpose[0].map(&:size).max
+ # tft = proc { |t| t + " " * [tw - t.size, 0].max }
+ # nft = (memstat_type == 'human') ? hr : proc { |n| n }
+ ["\n", proc { |k,v| puts templ % {key: tft[k], val: nft[v]} }]
+ else
+ raise 'this should be impossible'
+ end
+
+ ma.sort_by { |k,v| v }.each(&showm)
+ print summary_sep
+ totals.each { |coll,t| showm.call "Total #{coll}", t }
+ showm.call "TOTAL", tt
+else
+ formatter.call dumpinfo
+end
diff --git a/extras/systemd/gluster-ta-volume.service.in b/extras/systemd/gluster-ta-volume.service.in
index 452c01c419f..2802bca05bf 100644
--- a/extras/systemd/gluster-ta-volume.service.in
+++ b/extras/systemd/gluster-ta-volume.service.in
@@ -4,7 +4,7 @@ After=network.target
[Service]
Environment="LOG_LEVEL=WARNING"
-ExecStart=@prefix@/sbin/glusterfsd -N --volfile-id ta -f @GLUSTERD_WORKDIR@/thin-arbiter/thin-arbiter.vol --brick-port 24007 --xlator-option ta-server.transport.socket.listen-port=24007
+ExecStart=@prefix@/sbin/glusterfsd -N --volfile-id ta -f @GLUSTERD_WORKDIR@/thin-arbiter/thin-arbiter.vol --brick-port 24007 --xlator-option ta-server.transport.socket.listen-port=24007 -LWARNING
Restart=always
KillMode=process
SuccessExitStatus=15
diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
index 89ef402ac83..abb0d82911f 100644
--- a/extras/systemd/glusterd.service.in
+++ b/extras/systemd/glusterd.service.in
@@ -1,6 +1,8 @@
[Unit]
Description=GlusterFS, a clustered file-system server
Documentation=man:glusterd(8)
+StartLimitBurst=6
+StartLimitIntervalSec=3600
Requires=@RPCBIND_SERVICE@
After=network.target @RPCBIND_SERVICE@
Before=network-online.target
@@ -10,10 +12,15 @@ Type=forking
PIDFile=@localstatedir@/run/glusterd.pid
LimitNOFILE=65536
Environment="LOG_LEVEL=INFO"
-EnvironmentFile=-@sysconfdir@/sysconfig/glusterd
+EnvironmentFile=-@SYSCONF_DIR@/sysconfig/glusterd
ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS
KillMode=process
+TimeoutSec=300
SuccessExitStatus=15
+Restart=on-abnormal
+RestartSec=60
+StartLimitBurst=6
+StartLimitInterval=3600
[Install]
WantedBy=multi-user.target
diff --git a/extras/thin-arbiter/thin-arbiter.vol b/extras/thin-arbiter/thin-arbiter.vol
index 244a4caf485..c76babc7b3c 100644
--- a/extras/thin-arbiter/thin-arbiter.vol
+++ b/extras/thin-arbiter/thin-arbiter.vol
@@ -33,11 +33,10 @@ volume ta-index
subvolumes ta-io-threads
end-volume
-volume ta-io-stats
+volume /mnt/thin-arbiter
type debug/io-stats
option count-fop-hits off
option latency-measurement off
- option log-level WARNING
option unique-id /mnt/thin-arbiter
subvolumes ta-index
end-volume
@@ -54,5 +53,5 @@ volume ta-server
option auth-path /mnt/thin-arbiter
option transport.address-family inet
option transport-type tcp
- subvolumes ta-io-stats
+ subvolumes /mnt/thin-arbiter
end-volume
diff --git a/extras/who-wrote-glusterfs/gitdm.domain-map b/extras/who-wrote-glusterfs/gitdm.domain-map
index 315355b08b8..7cd2bbd605b 100644
--- a/extras/who-wrote-glusterfs/gitdm.domain-map
+++ b/extras/who-wrote-glusterfs/gitdm.domain-map
@@ -4,6 +4,7 @@
active.by ActiveCloud
appeartv.com Appear TV
cern.ch CERN
+cmss.chinamobile.com China Mobile(Suzhou) Software Technology
datalab.es DataLab S.L.
fb.com Facebook
fedoraproject.org Fedora Project
diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in
index 9155cd87bbe..9688c79fab7 100644
--- a/geo-replication/gsyncd.conf.in
+++ b/geo-replication/gsyncd.conf.in
@@ -123,7 +123,7 @@ type=bool
help=Use this to set Active Passive mode to meta-volume.
[meta-volume-mnt]
-value=/var/run/gluster/shared_storage
+value=/run/gluster/shared_storage
help=Meta Volume or Shared Volume mount path
[allow-network]
@@ -266,7 +266,9 @@ allowed_values=ERROR,INFO,WARNING,DEBUG
[ssh-port]
value=22
-validation=int
+validation=minmax
+min=1
+max=65535
help=Set SSH port
type=int
diff --git a/geo-replication/setup.py b/geo-replication/setup.py
index 6d678baa2f7..0eae469d2d6 100644
--- a/geo-replication/setup.py
+++ b/geo-replication/setup.py
@@ -1,7 +1,7 @@
#
# Copyright (c) 2011-2014 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
-
+#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
@@ -20,11 +20,11 @@ setup(
name=name,
version="",
description='GlusterFS Geo Replication',
- license='',
+ license='GPLV2 and LGPLV3+',
author='Red Hat, Inc.',
author_email='gluster-devel@gluster.org',
url='http://www.gluster.org',
- packages=['syncdaemon', ],
+ packages=[name, ],
test_suite='nose.collector',
install_requires=[],
scripts=[],
diff --git a/geo-replication/src/peer_mountbroker.py.in b/geo-replication/src/peer_mountbroker.py.in
index 96a72643ac3..40b90ffc560 100644
--- a/geo-replication/src/peer_mountbroker.py.in
+++ b/geo-replication/src/peer_mountbroker.py.in
@@ -222,8 +222,10 @@ class CliSetup(Cmd):
name = "setup"
def args(self, parser):
- parser.add_argument("mount_root")
- parser.add_argument("group")
+ parser.add_argument("mount_root",
+ help="Path to the mountbroker-root directory.")
+ parser.add_argument("group",
+ help="Group to be used for setup.")
def run(self, args):
out = execute_in_peers("node-setup", [args.mount_root,
@@ -333,8 +335,10 @@ class CliAdd(Cmd):
name = "add"
def args(self, parser):
- parser.add_argument("volume")
- parser.add_argument("user")
+ parser.add_argument("volume",
+ help="Volume to be added.")
+ parser.add_argument("user",
+ help="User for which volume is to be added.")
def run(self, args):
out = execute_in_peers("node-add", [args.volume,
@@ -374,8 +378,9 @@ class CliRemove(Cmd):
name = "remove"
def args(self, parser):
- parser.add_argument("--volume", default=".")
- parser.add_argument("--user", default=".")
+ parser.add_argument("--volume", default=".", help="Volume to be removed.")
+ parser.add_argument("--user", default=".",
+ help="User for which volume has to be removed.")
def run(self, args):
out = execute_in_peers("node-remove", [args.volume,
diff --git a/geo-replication/syncdaemon/Makefile.am b/geo-replication/syncdaemon/Makefile.am
index 62c5ce7fe30..d70e3368faf 100644
--- a/geo-replication/syncdaemon/Makefile.am
+++ b/geo-replication/syncdaemon/Makefile.am
@@ -2,7 +2,7 @@ syncdaemondir = $(GLUSTERFS_LIBEXECDIR)/python/syncdaemon
syncdaemon_PYTHON = rconf.py gsyncd.py __init__.py master.py README.md repce.py \
resource.py syncdutils.py monitor.py libcxattr.py gsyncdconfig.py \
- libgfchangelog.py changelogagent.py gsyncdstatus.py conf.py logutils.py \
+ libgfchangelog.py gsyncdstatus.py conf.py logutils.py \
subcmds.py argsupgrade.py py2py3.py
CLEANFILES =
diff --git a/geo-replication/syncdaemon/README.md b/geo-replication/syncdaemon/README.md
index 2a202e3f99e..5ab785ae669 100644
--- a/geo-replication/syncdaemon/README.md
+++ b/geo-replication/syncdaemon/README.md
@@ -19,7 +19,6 @@ INSTALLATION
As of now, the supported way of operation is running from the source directory or using the RPMs given.
-If you use Python 2.4.x, you need to install the [Ctypes module](http://python.net/crew/theller/ctypes/).
CONFIGURATION
-------------
diff --git a/geo-replication/syncdaemon/changelogagent.py b/geo-replication/syncdaemon/changelogagent.py
deleted file mode 100644
index c5fdbc3a74f..00000000000
--- a/geo-replication/syncdaemon/changelogagent.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright (c) 2011-2014 Red Hat, Inc. <http://www.redhat.com>
-# This file is part of GlusterFS.
-
-# This file is licensed to you under your choice of the GNU Lesser
-# General Public License, version 3 or any later version (LGPLv3 or
-# later), or the GNU General Public License, version 2 (GPLv2), in all
-# cases as published by the Free Software Foundation.
-#
-
-import logging
-import syncdutils
-from syncdutils import select, CHANGELOG_AGENT_SERVER_VERSION
-from repce import RepceServer
-
-
-class _MetaChangelog(object):
-
- def __getattr__(self, meth):
- from libgfchangelog import Changes as LChanges
- xmeth = [m for m in dir(LChanges) if m[0] != '_']
- if meth not in xmeth:
- return
- for m in xmeth:
- setattr(self, m, getattr(LChanges, m))
- return getattr(self, meth)
-
-Changes = _MetaChangelog()
-
-
-class Changelog(object):
- def version(self):
- return CHANGELOG_AGENT_SERVER_VERSION
-
- def init(self):
- return Changes.cl_init()
-
- def register(self, cl_brick, cl_dir, cl_log, cl_level, retries=0):
- return Changes.cl_register(cl_brick, cl_dir, cl_log, cl_level, retries)
-
- def scan(self):
- return Changes.cl_scan()
-
- def getchanges(self):
- return Changes.cl_getchanges()
-
- def done(self, clfile):
- return Changes.cl_done(clfile)
-
- def history(self, changelog_path, start, end, num_parallel):
- return Changes.cl_history_changelog(changelog_path, start, end,
- num_parallel)
-
- def history_scan(self):
- return Changes.cl_history_scan()
-
- def history_getchanges(self):
- return Changes.cl_history_getchanges()
-
- def history_done(self, clfile):
- return Changes.cl_history_done(clfile)
-
-
-class ChangelogAgent(object):
- def __init__(self, obj, fd_tup):
- (inf, ouf, rw, ww) = fd_tup.split(',')
- repce = RepceServer(obj, int(inf), int(ouf), 1)
- t = syncdutils.Thread(target=lambda: (repce.service_loop(),
- syncdutils.finalize()))
- t.start()
- logging.info('Agent listining...')
-
- select((), (), ())
-
-
-def agent(obj, fd_tup):
- return ChangelogAgent(obj, fd_tup)
diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py
index 8940384616a..257ed72c6ae 100644
--- a/geo-replication/syncdaemon/gsyncd.py
+++ b/geo-replication/syncdaemon/gsyncd.py
@@ -22,8 +22,8 @@ import gsyncdconfig as gconf
from rconf import rconf
import subcmds
from conf import GLUSTERD_WORKDIR, GLUSTERFS_CONFDIR, GCONF_VERSION
-from syncdutils import set_term_handler, finalize, lf
-from syncdutils import log_raise_exception, FreeObject, escape
+from syncdutils import (set_term_handler, finalize, lf,
+ log_raise_exception, FreeObject, escape)
import argsupgrade
@@ -79,8 +79,6 @@ def main():
help="feedback fd between monitor and worker")
p.add_argument("--local-node", help="Local master node")
p.add_argument("--local-node-id", help="Local Node ID")
- p.add_argument("--rpc-fd",
- help="Read and Write fds for worker-agent communication")
p.add_argument("--subvol-num", type=int, help="Subvolume number")
p.add_argument("--is-hottier", action="store_true",
help="Is this brick part of hot tier")
@@ -92,19 +90,6 @@ def main():
p.add_argument("-c", "--config-file", help="Config File")
p.add_argument("--debug", action="store_true")
- # Agent
- p = sp.add_parser("agent")
- p.add_argument("master", help="Master Volume Name")
- p.add_argument("slave", help="Slave details user@host::vol format")
- p.add_argument("--local-path", help="Local brick path")
- p.add_argument("--local-node", help="Local master node")
- p.add_argument("--local-node-id", help="Local Node ID")
- p.add_argument("--slave-id", help="Slave Volume ID")
- p.add_argument("--rpc-fd",
- help="Read and Write fds for worker-agent communication")
- p.add_argument("-c", "--config-file", help="Config File")
- p.add_argument("--debug", action="store_true")
-
# Slave
p = sp.add_parser("slave")
p.add_argument("master", help="Master Volume Name")
@@ -271,8 +256,8 @@ def main():
# Default label to print in log file
label = args.subcmd
- if args.subcmd in ("worker", "agent"):
- # If Worker or agent, then add brick path also to label
+ if args.subcmd in ("worker"):
+ # If Worker, then add brick path also to label
label = "%s %s" % (args.subcmd, args.local_path)
elif args.subcmd == "slave":
# If Slave add Master node and Brick details
@@ -315,7 +300,7 @@ def main():
# Log message for loaded config file
if config_file is not None:
- logging.info(lf("Using session config file", path=config_file))
+ logging.debug(lf("Using session config file", path=config_file))
set_term_handler()
excont = FreeObject(exval=0)
diff --git a/geo-replication/syncdaemon/gsyncdstatus.py b/geo-replication/syncdaemon/gsyncdstatus.py
index 72bcb092f01..1a655ff8887 100644
--- a/geo-replication/syncdaemon/gsyncdstatus.py
+++ b/geo-replication/syncdaemon/gsyncdstatus.py
@@ -23,8 +23,8 @@ from datetime import datetime
from errno import EACCES, EAGAIN, ENOENT
import logging
-from syncdutils import EVENT_GEOREP_ACTIVE, EVENT_GEOREP_PASSIVE, gf_event
-from syncdutils import EVENT_GEOREP_CHECKPOINT_COMPLETED, lf
+from syncdutils import (EVENT_GEOREP_ACTIVE, EVENT_GEOREP_PASSIVE, gf_event,
+ EVENT_GEOREP_CHECKPOINT_COMPLETED, lf)
DEFAULT_STATUS = "N/A"
MONITOR_STATUS = ("Created", "Started", "Paused", "Stopped")
diff --git a/geo-replication/syncdaemon/libcxattr.py b/geo-replication/syncdaemon/libcxattr.py
index c7d69d7eb2e..e6406c36bd7 100644
--- a/geo-replication/syncdaemon/libcxattr.py
+++ b/geo-replication/syncdaemon/libcxattr.py
@@ -10,8 +10,8 @@
import os
from ctypes import CDLL, get_errno
-from py2py3 import bytearray_to_str, gr_create_string_buffer
-from py2py3 import gr_query_xattr, gr_lsetxattr, gr_lremovexattr
+from py2py3 import (bytearray_to_str, gr_create_string_buffer,
+ gr_query_xattr, gr_lsetxattr, gr_lremovexattr)
class Xattr(object):
diff --git a/geo-replication/syncdaemon/libgfchangelog.py b/geo-replication/syncdaemon/libgfchangelog.py
index 8d129567075..a3bda7282c0 100644
--- a/geo-replication/syncdaemon/libgfchangelog.py
+++ b/geo-replication/syncdaemon/libgfchangelog.py
@@ -12,130 +12,132 @@ import os
from ctypes import CDLL, RTLD_GLOBAL, get_errno, byref, c_ulong
from ctypes.util import find_library
from syncdutils import ChangelogException, ChangelogHistoryNotAvailable
-from py2py3 import gr_cl_history_changelog, gr_cl_done, gr_create_string_buffer
-from py2py3 import gr_cl_register, gr_cl_history_done, bytearray_to_str
-
-
-class Changes(object):
- libgfc = CDLL(find_library("gfchangelog"), mode=RTLD_GLOBAL,
- use_errno=True)
-
- @classmethod
- def geterrno(cls):
- return get_errno()
-
- @classmethod
- def raise_changelog_err(cls):
- errn = cls.geterrno()
- raise ChangelogException(errn, os.strerror(errn))
-
- @classmethod
- def _get_api(cls, call):
- return getattr(cls.libgfc, call)
-
- @classmethod
- def cl_init(cls):
- ret = cls._get_api('gf_changelog_init')(None)
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_register(cls, brick, path, log_file, log_level, retries=0):
- ret = gr_cl_register(cls, brick, path, log_file, log_level, retries)
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_scan(cls):
- ret = cls._get_api('gf_changelog_scan')()
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_startfresh(cls):
- ret = cls._get_api('gf_changelog_start_fresh')()
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_getchanges(cls):
- """ remove hardcoding for path name length """
- def clsort(f):
- return f.split('.')[-1]
- changes = []
- buf = gr_create_string_buffer(4096)
- call = cls._get_api('gf_changelog_next_change')
-
- while True:
- ret = call(buf, 4096)
- if ret in (0, -1):
- break
- # py2 and py3 compatibility
- result = bytearray_to_str(buf.raw[:ret - 1])
- changes.append(result)
- if ret == -1:
- cls.raise_changelog_err()
- # cleanup tracker
- cls.cl_startfresh()
- return sorted(changes, key=clsort)
-
- @classmethod
- def cl_done(cls, clfile):
- ret = gr_cl_done(cls, clfile)
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_history_scan(cls):
- ret = cls._get_api('gf_history_changelog_scan')()
- if ret == -1:
- cls.raise_changelog_err()
-
- return ret
-
- @classmethod
- def cl_history_changelog(cls, changelog_path, start, end, num_parallel):
- actual_end = c_ulong()
- ret = gr_cl_history_changelog(cls, changelog_path, start, end,
- num_parallel, byref(actual_end))
- if ret == -1:
- cls.raise_changelog_err()
-
- if ret == -2:
- raise ChangelogHistoryNotAvailable()
-
- return (ret, actual_end.value)
-
- @classmethod
- def cl_history_startfresh(cls):
- ret = cls._get_api('gf_history_changelog_start_fresh')()
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_history_getchanges(cls):
- """ remove hardcoding for path name length """
- def clsort(f):
- return f.split('.')[-1]
-
- changes = []
- buf = gr_create_string_buffer(4096)
- call = cls._get_api('gf_history_changelog_next_change')
-
- while True:
- ret = call(buf, 4096)
- if ret in (0, -1):
- break
- # py2 and py3 compatibility
- result = bytearray_to_str(buf.raw[:ret - 1])
- changes.append(result)
- if ret == -1:
- cls.raise_changelog_err()
-
- return sorted(changes, key=clsort)
-
- @classmethod
- def cl_history_done(cls, clfile):
- ret = gr_cl_history_done(cls, clfile)
- if ret == -1:
- cls.raise_changelog_err()
+from py2py3 import (gr_cl_history_changelog, gr_cl_done,
+ gr_create_string_buffer, gr_cl_register,
+ gr_cl_history_done, bytearray_to_str)
+
+
+libgfc = CDLL(
+ find_library("gfchangelog"),
+ mode=RTLD_GLOBAL,
+ use_errno=True
+)
+
+
+def _raise_changelog_err():
+ errn = get_errno()
+ raise ChangelogException(errn, os.strerror(errn))
+
+
+def _init():
+ if libgfc.gf_changelog_init(None) == -1:
+ _raise_changelog_err()
+
+
+def register(brick, path, log_file, log_level, retries=0):
+ _init()
+
+ ret = gr_cl_register(libgfc, brick, path, log_file, log_level, retries)
+
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def scan():
+ ret = libgfc.gf_changelog_scan()
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def startfresh():
+ ret = libgfc.gf_changelog_start_fresh()
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def getchanges():
+ def clsort(cfile):
+ return cfile.split('.')[-1]
+
+ changes = []
+ buf = gr_create_string_buffer(4096)
+ call = libgfc.gf_changelog_next_change
+
+ while True:
+ ret = call(buf, 4096)
+ if ret in (0, -1):
+ break
+
+ # py2 and py3 compatibility
+ result = bytearray_to_str(buf.raw[:ret - 1])
+ changes.append(result)
+
+ if ret == -1:
+ _raise_changelog_err()
+
+ # cleanup tracker
+ startfresh()
+
+ return sorted(changes, key=clsort)
+
+
+def done(clfile):
+ ret = gr_cl_done(libgfc, clfile)
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def history_scan():
+ ret = libgfc.gf_history_changelog_scan()
+ if ret == -1:
+ _raise_changelog_err()
+
+ return ret
+
+
+def history_changelog(changelog_path, start, end, num_parallel):
+ actual_end = c_ulong()
+ ret = gr_cl_history_changelog(libgfc, changelog_path, start, end,
+ num_parallel, byref(actual_end))
+ if ret == -1:
+ _raise_changelog_err()
+
+ if ret == -2:
+ raise ChangelogHistoryNotAvailable()
+
+ return (ret, actual_end.value)
+
+
+def history_startfresh():
+ ret = libgfc.gf_history_changelog_start_fresh()
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def history_getchanges():
+ def clsort(cfile):
+ return cfile.split('.')[-1]
+
+ changes = []
+ buf = gr_create_string_buffer(4096)
+ call = libgfc.gf_history_changelog_next_change
+
+ while True:
+ ret = call(buf, 4096)
+ if ret in (0, -1):
+ break
+
+ # py2 and py3 compatibility
+ result = bytearray_to_str(buf.raw[:ret - 1])
+ changes.append(result)
+
+ if ret == -1:
+ _raise_changelog_err()
+
+ return sorted(changes, key=clsort)
+
+
+def history_done(clfile):
+ ret = gr_cl_history_done(libgfc, clfile)
+ if ret == -1:
+ _raise_changelog_err()
diff --git a/geo-replication/syncdaemon/master.py b/geo-replication/syncdaemon/master.py
index f02cdb4c7f8..9501aeae6b5 100644
--- a/geo-replication/syncdaemon/master.py
+++ b/geo-replication/syncdaemon/master.py
@@ -22,11 +22,13 @@ from threading import Condition, Lock
from datetime import datetime
import gsyncdconfig as gconf
+import libgfchangelog
from rconf import rconf
-from syncdutils import Thread, GsyncdError, escape_space_newline
-from syncdutils import unescape_space_newline, gauxpfx, escape
-from syncdutils import lstat, errno_wrap, FreeObject, lf, matching_disk_gfid
-from syncdutils import NoStimeAvailable, PartialHistoryAvailable
+from syncdutils import (Thread, GsyncdError, escape_space_newline,
+ unescape_space_newline, gauxpfx, escape,
+ lstat, errno_wrap, FreeObject, lf, matching_disk_gfid,
+ NoStimeAvailable, PartialHistoryAvailable,
+ host_brick_split)
URXTIME = (-1, 0)
@@ -517,7 +519,7 @@ class GMasterCommon(object):
# If crawlwrap is called when partial history available,
# then it sets register_time which is the time when geo-rep
# worker registered to changelog consumption. Since nsec is
- # not considered in register time, their are chances of skipping
+ # not considered in register time, there are chances of skipping
# changes detection in xsync crawl. This limit will be reset when
# crawlwrap is called again.
self.live_changelog_start_time = None
@@ -1465,7 +1467,7 @@ class GMasterChangelogMixin(GMasterCommon):
node = rconf.args.resource_remote
node_data = node.split("@")
node = node_data[-1]
- remote_node_ip = node.split(":")[0]
+ remote_node_ip, _ = host_brick_split(node)
self.status.set_slave_node(remote_node_ip)
def changelogs_batch_process(self, changes):
@@ -1498,9 +1500,9 @@ class GMasterChangelogMixin(GMasterCommon):
# that are _historical_ to that time.
data_stime = self.get_data_stime()
- self.changelog_agent.scan()
+ libgfchangelog.scan()
self.crawls += 1
- changes = self.changelog_agent.getchanges()
+ changes = libgfchangelog.getchanges()
if changes:
if data_stime:
logging.info(lf("slave's time",
@@ -1517,10 +1519,9 @@ class GMasterChangelogMixin(GMasterCommon):
self.changelogs_batch_process(changes)
- def register(self, register_time, changelog_agent, status):
- self.changelog_agent = changelog_agent
+ def register(self, register_time, status):
self.sleep_interval = gconf.get("change-interval")
- self.changelog_done_func = self.changelog_agent.done
+ self.changelog_done_func = libgfchangelog.done
self.tempdir = self.setup_working_dir()
self.processed_changelogs_dir = os.path.join(self.tempdir,
".processed")
@@ -1529,11 +1530,10 @@ class GMasterChangelogMixin(GMasterCommon):
class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
- def register(self, register_time, changelog_agent, status):
- self.changelog_agent = changelog_agent
+ def register(self, register_time, status):
self.changelog_register_time = register_time
self.history_crawl_start_time = register_time
- self.changelog_done_func = self.changelog_agent.history_done
+ self.changelog_done_func = libgfchangelog.history_done
self.history_turns = 0
self.tempdir = self.setup_working_dir()
self.processed_changelogs_dir = os.path.join(self.tempdir,
@@ -1547,6 +1547,12 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
data_stime = self.get_data_stime()
end_time = int(time.time())
+
+ #as start of historical crawl marks Geo-rep worker restart
+ if gconf.get("ignore-deletes"):
+ logging.info(lf('ignore-deletes config option is set',
+ stime=data_stime))
+
logging.info(lf('starting history crawl',
turns=self.history_turns,
stime=data_stime,
@@ -1561,7 +1567,7 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
# location then consuming history will not work(Known issue as of now)
changelog_path = os.path.join(rconf.args.local_path,
".glusterfs/changelogs")
- ret, actual_end = self.changelog_agent.history(
+ ret, actual_end = libgfchangelog.history_changelog(
changelog_path,
data_stime[0],
end_time,
@@ -1573,10 +1579,10 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
# to be processed. returns positive value as number of changelogs
# to be processed, which will be fetched using
# history_getchanges()
- while self.changelog_agent.history_scan() > 0:
+ while libgfchangelog.history_scan() > 0:
self.crawls += 1
- changes = self.changelog_agent.history_getchanges()
+ changes = libgfchangelog.history_getchanges()
if changes:
if data_stime:
logging.info(lf("slave's time",
@@ -1629,7 +1635,7 @@ class GMasterXsyncMixin(GMasterChangelogMixin):
XSYNC_MAX_ENTRIES = 1 << 13
- def register(self, register_time=None, changelog_agent=None, status=None):
+ def register(self, register_time=None, status=None):
self.status = status
self.counter = 0
self.comlist = []
diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py
index 236afe70d11..6aa7b9dfc99 100644
--- a/geo-replication/syncdaemon/monitor.py
+++ b/geo-replication/syncdaemon/monitor.py
@@ -20,13 +20,14 @@ import random
from resource import SSH
import gsyncdconfig as gconf
+import libgfchangelog
from rconf import rconf
-from syncdutils import select, waitpid, errno_wrap, lf, grabpidfile
-from syncdutils import set_term_handler, GsyncdError
-from syncdutils import Thread, finalize, Volinfo, VolinfoFromGconf
-from syncdutils import gf_event, EVENT_GEOREP_FAULTY, get_up_nodes
+from syncdutils import (select, waitpid, errno_wrap, lf, grabpidfile,
+ set_term_handler, GsyncdError,
+ Thread, finalize, Volinfo, VolinfoFromGconf,
+ gf_event, EVENT_GEOREP_FAULTY, get_up_nodes,
+ unshare_propagation_supported)
from gsyncdstatus import GeorepStatus, set_monitor_status
-from syncdutils import unshare_propagation_supported
import py2py3
from py2py3 import pipe
@@ -81,7 +82,7 @@ class Monitor(object):
# give a chance to graceful exit
errno_wrap(os.kill, [-os.getpid(), signal.SIGTERM], [ESRCH])
- def monitor(self, w, argv, cpids, agents, slave_vol, slave_host, master,
+ def monitor(self, w, argv, cpids, slave_vol, slave_host, master,
suuid, slavenodes):
"""the monitor loop
@@ -150,7 +151,7 @@ class Monitor(object):
remote_host = "%s@%s" % (remote_user, remote_new[0])
remote_id = remote_new[1]
- # Spawn the worker and agent in lock to avoid fd leak
+ # Spawn the worker in lock to avoid fd leak
self.lock.acquire()
self.status[w[0]['dir']].set_worker_status(self.ST_INIT)
@@ -158,44 +159,10 @@ class Monitor(object):
brick=w[0]['dir'],
slave_node=remote_host))
- # Couple of pipe pairs for RPC communication b/w
- # worker and changelog agent.
-
- # read/write end for agent
- (ra, ww) = pipe()
- # read/write end for worker
- (rw, wa) = pipe()
-
- # spawn the agent process
- apid = os.fork()
- if apid == 0:
- os.close(rw)
- os.close(ww)
- args_to_agent = argv + [
- 'agent',
- rconf.args.master,
- rconf.args.slave,
- '--local-path', w[0]['dir'],
- '--local-node', w[0]['host'],
- '--local-node-id', w[0]['uuid'],
- '--slave-id', suuid,
- '--rpc-fd', ','.join([str(ra), str(wa), str(rw), str(ww)])
- ]
-
- if rconf.args.config_file is not None:
- args_to_agent += ['-c', rconf.args.config_file]
-
- if rconf.args.debug:
- args_to_agent.append("--debug")
-
- os.execv(sys.executable, args_to_agent)
-
pr, pw = pipe()
cpid = os.fork()
if cpid == 0:
os.close(pr)
- os.close(ra)
- os.close(wa)
args_to_worker = argv + [
'worker',
@@ -206,8 +173,6 @@ class Monitor(object):
'--local-node', w[0]['host'],
'--local-node-id', w[0]['uuid'],
'--slave-id', suuid,
- '--rpc-fd',
- ','.join([str(rw), str(ww), str(ra), str(wa)]),
'--subvol-num', str(w[2]),
'--resource-remote', remote_host,
'--resource-remote-id', remote_id
@@ -238,14 +203,8 @@ class Monitor(object):
os.execv(sys.executable, args_to_worker)
cpids.add(cpid)
- agents.add(apid)
os.close(pw)
- # close all RPC pipes in monitor
- os.close(ra)
- os.close(wa)
- os.close(rw)
- os.close(ww)
self.lock.release()
t0 = time.time()
@@ -254,42 +213,19 @@ class Monitor(object):
if so:
ret = nwait(cpid, os.WNOHANG)
- ret_agent = nwait(apid, os.WNOHANG)
-
- if ret_agent is not None:
- # Agent is died Kill Worker
- logging.info(lf("Changelog Agent died, Aborting Worker",
- brick=w[0]['dir']))
- errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
- nwait(cpid)
- nwait(apid)
if ret is not None:
logging.info(lf("worker died before establishing "
"connection",
brick=w[0]['dir']))
- nwait(apid) # wait for agent
else:
logging.debug("worker(%s) connected" % w[0]['dir'])
while time.time() < t0 + conn_timeout:
ret = nwait(cpid, os.WNOHANG)
- ret_agent = nwait(apid, os.WNOHANG)
if ret is not None:
logging.info(lf("worker died in startup phase",
brick=w[0]['dir']))
- nwait(apid) # wait for agent
- break
-
- if ret_agent is not None:
- # Agent is died Kill Worker
- logging.info(lf("Changelog Agent died, Aborting "
- "Worker",
- brick=w[0]['dir']))
- errno_wrap(os.kill, [cpid, signal.SIGKILL],
- [ESRCH])
- nwait(cpid)
- nwait(apid)
break
time.sleep(1)
@@ -304,12 +240,8 @@ class Monitor(object):
brick=w[0]['dir'],
timeout=conn_timeout))
errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
- nwait(apid) # wait for agent
ret = nwait(cpid)
if ret is None:
- # If worker dies, agent terminates on EOF.
- # So lets wait for agent first.
- nwait(apid)
ret = nwait(cpid)
if exit_signalled(ret):
ret = 0
@@ -333,18 +265,15 @@ class Monitor(object):
argv = [os.path.basename(sys.executable), sys.argv[0]]
cpids = set()
- agents = set()
ta = []
for wx in wspx:
def wmon(w):
- cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
+ cpid, _ = self.monitor(w, argv, cpids, slave_vol,
slave_host, master, suuid, slavenodes)
time.sleep(1)
self.lock.acquire()
for cpid in cpids:
errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
- for apid in agents:
- errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
self.lock.release()
finalize(exval=1)
t = Thread(target=wmon, args=[wx])
@@ -354,8 +283,8 @@ class Monitor(object):
# monitor status was being updated in each monitor thread. It
# should not be done as it can cause deadlock for a worker start.
# set_monitor_status uses flock to synchronize multple instances
- # updating the file. Since each monitor thread forks worker and
- # agent, these processes can hold the reference to fd of status
+ # updating the file. Since each monitor thread forks worker,
+ # these processes can hold the reference to fd of status
# file causing deadlock to workers which starts later as flock
# will not be release until all references to same fd is closed.
# It will also cause fd leaks.
diff --git a/geo-replication/syncdaemon/py2py3.py b/geo-replication/syncdaemon/py2py3.py
index faad750059c..f9c76e1b50a 100644
--- a/geo-replication/syncdaemon/py2py3.py
+++ b/geo-replication/syncdaemon/py2py3.py
@@ -55,23 +55,23 @@ if sys.version_info >= (3,):
def gr_lremovexattr(cls, path, attr):
return cls.libc.lremovexattr(path.encode(), attr.encode())
- def gr_cl_register(cls, brick, path, log_file, log_level, retries):
- return cls._get_api('gf_changelog_register')(brick.encode(),
- path.encode(),
- log_file.encode(),
- log_level, retries)
+ def gr_cl_register(libgfapi, brick, path, log_file, log_level, retries):
+ return libgfapi.gf_changelog_register(brick.encode(),
+ path.encode(),
+ log_file.encode(),
+ log_level, retries)
- def gr_cl_done(cls, clfile):
- return cls._get_api('gf_changelog_done')(clfile.encode())
+ def gr_cl_done(libgfapi, clfile):
+ return libgfapi.gf_changelog_done(clfile.encode())
- def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
+ def gr_cl_history_changelog(libgfapi, changelog_path, start, end, num_parallel,
actual_end):
- return cls._get_api('gf_history_changelog')(changelog_path.encode(),
- start, end, num_parallel,
- actual_end)
+ return libgfapi.gf_history_changelog(changelog_path.encode(),
+ start, end, num_parallel,
+ actual_end)
- def gr_cl_history_done(cls, clfile):
- return cls._get_api('gf_history_changelog_done')(clfile.encode())
+ def gr_cl_history_done(libgfapi, clfile):
+ return libgfapi.gf_history_changelog_done(clfile.encode())
# regular file
@@ -137,20 +137,20 @@ else:
def gr_lremovexattr(cls, path, attr):
return cls.libc.lremovexattr(path, attr)
- def gr_cl_register(cls, brick, path, log_file, log_level, retries):
- return cls._get_api('gf_changelog_register')(brick, path, log_file,
- log_level, retries)
+ def gr_cl_register(libgfapi, brick, path, log_file, log_level, retries):
+ return libgfapi.gf_changelog_register(brick, path, log_file,
+ log_level, retries)
- def gr_cl_done(cls, clfile):
- return cls._get_api('gf_changelog_done')(clfile)
+ def gr_cl_done(libgfapi, clfile):
+ return libgfapi.gf_changelog_done(clfile)
- def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
+ def gr_cl_history_changelog(libgfapi, changelog_path, start, end, num_parallel,
actual_end):
- return cls._get_api('gf_history_changelog')(changelog_path, start, end,
- num_parallel, actual_end)
+ return libgfapi.gf_history_changelog(changelog_path, start, end,
+ num_parallel, actual_end)
- def gr_cl_history_done(cls, clfile):
- return cls._get_api('gf_history_changelog_done')(clfile)
+ def gr_cl_history_done(libgfapi, clfile):
+ return libgfapi.gf_history_changelog_done(clfile)
# regular file
diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py
index 189d8a101fd..f12c7ceaa36 100644
--- a/geo-replication/syncdaemon/resource.py
+++ b/geo-replication/syncdaemon/resource.py
@@ -19,30 +19,31 @@ import struct
import logging
import tempfile
import subprocess
-from errno import EEXIST, ENOENT, ENODATA, ENOTDIR, ELOOP, EACCES
-from errno import EISDIR, ENOTEMPTY, ESTALE, EINVAL, EBUSY, EPERM
+from errno import (EEXIST, ENOENT, ENODATA, ENOTDIR, ELOOP, EACCES,
+ EISDIR, ENOTEMPTY, ESTALE, EINVAL, EBUSY, EPERM)
import errno
from rconf import rconf
import gsyncdconfig as gconf
+import libgfchangelog
import repce
from repce import RepceServer, RepceClient
from master import gmaster_builder
import syncdutils
-from syncdutils import GsyncdError, select, privileged, funcode
-from syncdutils import entry2pb, gauxpfx, errno_wrap, lstat
-from syncdutils import NoStimeAvailable, PartialHistoryAvailable
-from syncdutils import ChangelogException, ChangelogHistoryNotAvailable
-from syncdutils import get_changelog_log_level, get_rsync_version
-from syncdutils import CHANGELOG_AGENT_CLIENT_VERSION
-from syncdutils import GX_GFID_CANONICAL_LEN
+from syncdutils import (GsyncdError, select, privileged, funcode,
+ entry2pb, gauxpfx, errno_wrap, lstat,
+ NoStimeAvailable, PartialHistoryAvailable,
+ ChangelogException, ChangelogHistoryNotAvailable,
+ get_changelog_log_level, get_rsync_version,
+ GX_GFID_CANONICAL_LEN,
+ gf_mount_ready, lf, Popen, sup,
+ Xattr, matching_disk_gfid, get_gfid_from_mnt,
+ unshare_propagation_supported, get_slv_dir_path)
from gsyncdstatus import GeorepStatus
-from syncdutils import lf, Popen, sup
-from syncdutils import Xattr, matching_disk_gfid, get_gfid_from_mnt
-from syncdutils import unshare_propagation_supported, get_slv_dir_path
-from py2py3 import pipe, str_to_bytearray, entry_pack_reg
-from py2py3 import entry_pack_reg_stat, entry_pack_mkdir, entry_pack_symlink
+from py2py3 import (pipe, str_to_bytearray, entry_pack_reg,
+ entry_pack_reg_stat, entry_pack_mkdir,
+ entry_pack_symlink)
ENOTSUP = getattr(errno, 'ENOTSUP', 'EOPNOTSUPP')
@@ -950,6 +951,16 @@ class Mounter(object):
logging.exception('mount cleanup failure:')
rv = 200
os._exit(rv)
+
+ #Polling the dht.subvol.status value.
+ RETRIES = 10
+ while not gf_mount_ready():
+ if RETRIES < 0:
+ logging.error('Subvols are not up')
+ break
+ RETRIES -= 1
+ time.sleep(0.2)
+
logging.debug('auxiliary glusterfs mount prepared')
@@ -1245,9 +1256,6 @@ class GLUSTER(object):
# register the crawlers and start crawling
# g1 ==> Xsync, g2 ==> config.change_detector(changelog by default)
# g3 ==> changelog History
- (inf, ouf, ra, wa) = rconf.args.rpc_fd.split(',')
- changelog_agent = RepceClient(int(inf), int(ouf))
-
status = GeorepStatus(gconf.get("state-file"),
rconf.args.local_node,
rconf.args.local_path,
@@ -1255,12 +1263,6 @@ class GLUSTER(object):
rconf.args.master,
rconf.args.slave)
status.reset_on_worker_start()
- rv = changelog_agent.version()
- if int(rv) != CHANGELOG_AGENT_CLIENT_VERSION:
- raise GsyncdError(
- "RePCe major version mismatch(changelog agent): "
- "local %s, remote %s" %
- (CHANGELOG_AGENT_CLIENT_VERSION, rv))
try:
workdir = g2.setup_working_dir()
@@ -1271,17 +1273,16 @@ class GLUSTER(object):
# register with the changelog library
# 9 == log level (DEBUG)
# 5 == connection retries
- changelog_agent.init()
- changelog_agent.register(rconf.args.local_path,
- workdir,
- gconf.get("changelog-log-file"),
- get_changelog_log_level(
- gconf.get("changelog-log-level")),
- g2.CHANGELOG_CONN_RETRIES)
+ libgfchangelog.register(rconf.args.local_path,
+ workdir,
+ gconf.get("changelog-log-file"),
+ get_changelog_log_level(
+ gconf.get("changelog-log-level")),
+ g2.CHANGELOG_CONN_RETRIES)
register_time = int(time.time())
- g2.register(register_time, changelog_agent, status)
- g3.register(register_time, changelog_agent, status)
+ g2.register(register_time, status)
+ g3.register(register_time, status)
except ChangelogException as e:
logging.error(lf("Changelog register failed", error=e))
sys.exit(1)
@@ -1483,7 +1484,7 @@ class SSH(object):
if log_rsync_performance:
# use stdout=PIPE only when log_rsync_performance enabled
- # Else rsync will write to stdout and nobody is their
+ # Else rsync will write to stdout and nobody is there
# to consume. If PIPE is full rsync hangs.
po = Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py
index f8515f2607b..b8508532e30 100644
--- a/geo-replication/syncdaemon/subcmds.py
+++ b/geo-replication/syncdaemon/subcmds.py
@@ -97,17 +97,6 @@ def subcmd_slave(args):
local.service_loop()
-def subcmd_agent(args):
- import os
- from changelogagent import agent, Changelog
- from syncdutils import lf
-
- os.setsid()
- logging.debug(lf("RPC FD",
- rpc_fd=repr(args.rpc_fd)))
- return agent(Changelog(), args.rpc_fd)
-
-
def subcmd_voluuidget(args):
from subprocess import Popen, PIPE
import xml.etree.ElementTree as XET
diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
index b08098ef007..a3df103e76c 100644
--- a/geo-replication/syncdaemon/syncdutils.py
+++ b/geo-replication/syncdaemon/syncdutils.py
@@ -21,8 +21,8 @@ import subprocess
import socket
from subprocess import PIPE
from threading import Lock, Thread as baseThread
-from errno import EACCES, EAGAIN, EPIPE, ENOTCONN, ECONNABORTED
-from errno import EINTR, ENOENT, ESTALE, EBUSY, errorcode
+from errno import (EACCES, EAGAIN, EPIPE, ENOTCONN, ENOMEM, ECONNABORTED,
+ EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode, EIO)
from signal import signal, SIGTERM
import select as oselect
from os import waitpid as owaitpid
@@ -55,6 +55,8 @@ from rconf import rconf
from hashlib import sha256 as sha256
+ENOTSUP = getattr(errno, 'ENOTSUP', 'EOPNOTSUPP')
+
# auxiliary gfid based access prefix
_CL_AUX_GFID_PFX = ".gfid/"
ROOT_GFID = "00000000-0000-0000-0000-000000000001"
@@ -62,8 +64,6 @@ GF_OP_RETRIES = 10
GX_GFID_CANONICAL_LEN = 37 # canonical gfid len + '\0'
-CHANGELOG_AGENT_SERVER_VERSION = 1.0
-CHANGELOG_AGENT_CLIENT_VERSION = 1.0
NodeID = None
rsync_version = None
unshare_mnt_propagation = None
@@ -100,6 +100,19 @@ def unescape_space_newline(s):
.replace(NEWLINE_ESCAPE_CHAR, "\n")\
.replace(PERCENTAGE_ESCAPE_CHAR, "%")
+# gf_mount_ready() returns 1 if all subvols are up, else 0
+def gf_mount_ready():
+ ret = errno_wrap(Xattr.lgetxattr,
+ ['.', 'dht.subvol.status', 16],
+ [ENOENT, ENOTSUP, ENODATA], [ENOMEM])
+
+ if isinstance(ret, int):
+ logging.error("failed to get the xattr value")
+ return 1
+ ret = ret.rstrip('\x00')
+ if ret == "1":
+ return 1
+ return 0
def norm(s):
if s:
@@ -331,13 +344,24 @@ def log_raise_exception(excont):
ECONNABORTED):
logging.error(lf('Gluster Mount process exited',
error=errorcode[exc.errno]))
+ elif isinstance(exc, OSError) and exc.errno == EIO:
+ logging.error("Getting \"Input/Output error\" "
+ "is most likely due to "
+ "a. Brick is down or "
+ "b. Split brain issue.")
+ logging.error("This is expected as per design to "
+ "keep the consistency of the file system. "
+ "Once the above issue is resolved "
+ "geo-replication would automatically "
+ "proceed further.")
+ logtag = "FAIL"
else:
logtag = "FAIL"
if not logtag and logging.getLogger().isEnabledFor(logging.DEBUG):
logtag = "FULL EXCEPTION TRACE"
if logtag:
logging.exception(logtag + ": ")
- sys.stderr.write("failed with %s.\n" % type(exc).__name__)
+ sys.stderr.write("failed with %s: %s.\n" % (type(exc).__name__, exc))
excont.exval = 1
sys.exit(excont.exval)
@@ -564,7 +588,6 @@ def errno_wrap(call, arg=[], errnos=[], retry_errnos=[]):
def lstat(e):
return errno_wrap(os.lstat, [e], [ENOENT], [ESTALE, EBUSY])
-
def get_gfid_from_mnt(gfidpath):
return errno_wrap(Xattr.lgetxattr,
[gfidpath, 'glusterfs.gfid.string',
@@ -702,11 +725,13 @@ def get_slv_dir_path(slv_host, slv_volume, gfid):
if not isinstance(realpath, int):
basename = os.path.basename(realpath).rstrip('\x00')
dirpath = os.path.dirname(realpath)
- if dirpath is "/":
+ if dirpath == "/":
pargfid = ROOT_GFID
else:
dirpath = dirpath.strip("/")
pargfid = get_gfid_from_mnt(dirpath)
+ if isinstance(pargfid, int):
+ return None
dir_entry = os.path.join(pfx, pargfid, basename)
return dir_entry
@@ -718,12 +743,12 @@ def lf(event, **kwargs):
Log Format helper function, log messages can be
easily modified to structured log format.
lf("Config Change", sync_jobs=4, brick=/bricks/b1) will be
- converted as "Config Change<TAB>brick=/bricks/b1<TAB>sync_jobs=4"
+ converted as "Config Change [{brick=/bricks/b1}, {sync_jobs=4}]"
"""
- msg = event
+ msgparts = []
for k, v in kwargs.items():
- msg += "\t{0}={1}".format(k, v)
- return msg
+ msgparts.append("{%s=%s}" % (k, v))
+ return "%s [%s]" % (event, ", ".join(msgparts))
class Popen(subprocess.Popen):
@@ -869,6 +894,19 @@ class Popen(subprocess.Popen):
self.errfail()
+def host_brick_split(value):
+ """
+ IPv6 compatible way to split and get the host
+ and brick information. Example inputs:
+ node1.example.com:/exports/bricks/brick1/brick
+ fe80::af0f:df82:844f:ef66%utun0:/exports/bricks/brick1/brick
+ """
+ parts = value.split(":")
+ brick = parts[-1]
+ hostparts = parts[0:-1]
+ return (":".join(hostparts), brick)
+
+
class Volinfo(object):
def __init__(self, vol, host='localhost', prelude=[], master=True):
@@ -911,7 +949,7 @@ class Volinfo(object):
@memoize
def bricks(self):
def bparse(b):
- host, dirp = b.find("name").text.split(':', 2)
+ host, dirp = host_brick_split(b.find("name").text)
return {'host': host, 'dir': dirp, 'uuid': b.find("hostUuid").text}
return [bparse(b) for b in self.get('brick')]
@@ -987,6 +1025,16 @@ class VolinfoFromGconf(object):
def is_hot(self, brickpath):
return False
+ def is_uuid(self, value):
+ try:
+ uuid.UUID(value)
+ return True
+ except ValueError:
+ return False
+
+ def possible_path(self, value):
+ return "/" in value
+
@property
@memoize
def bricks(self):
@@ -1000,8 +1048,22 @@ class VolinfoFromGconf(object):
out = []
for b in bricks_data:
parts = b.split(":")
- bpath = parts[2] if len(parts) == 3 else ""
- out.append({"host": parts[1], "dir": bpath, "uuid": parts[0]})
+ b_uuid = None
+ if self.is_uuid(parts[0]):
+ b_uuid = parts[0]
+ # Set all parts except first
+ parts = parts[1:]
+
+ if self.possible_path(parts[-1]):
+ bpath = parts[-1]
+ # Set all parts except last
+ parts = parts[0:-1]
+
+ out.append({
+ "host": ":".join(parts), # if remaining parts are IPv6 name
+ "dir": bpath,
+ "uuid": b_uuid
+ })
return out
diff --git a/geo-replication/tests/unit/test_gsyncdstatus.py b/geo-replication/tests/unit/test_gsyncdstatus.py
index 483023dbfe9..9c1aa2ad4ad 100755
--- a/geo-replication/tests/unit/test_gsyncdstatus.py
+++ b/geo-replication/tests/unit/test_gsyncdstatus.py
@@ -13,11 +13,11 @@ import unittest
import os
import urllib
-from syncdaemon.gstatus import GeorepStatus, set_monitor_status
-from syncdaemon.gstatus import get_default_values
-from syncdaemon.gstatus import MONITOR_STATUS, DEFAULT_STATUS
-from syncdaemon.gstatus import STATUS_VALUES, CRAWL_STATUS_VALUES
-from syncdaemon.gstatus import human_time, human_time_utc
+from syncdaemon.gstatus import (GeorepStatus, set_monitor_status,
+ get_default_values,
+ MONITOR_STATUS, DEFAULT_STATUS,
+ STATUS_VALUES, CRAWL_STATUS_VALUES,
+ human_time, human_time_utc)
class GeorepStatusTestCase(unittest.TestCase):
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 2bd5c77e723..b6d63146e14 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -70,16 +70,6 @@
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without ocf
%{?_without_ocf:%global _without_ocf --without-ocf}
-# rdma
-# if you wish to compile an rpm without rdma support, compile like this...
-# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without rdma
-%{?_without_rdma:%global _without_rdma --disable-ibverbs}
-
-# No RDMA Support on 32-bit ARM
-%ifarch armv7hl
-%global _without_rdma --disable-ibverbs
-%endif
-
# server
# if you wish to build rpms without server components, compile like this
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without server
@@ -121,6 +111,12 @@
## All %%global definitions should be placed here and keep them sorted
##
+# selinux booleans whose defalut value needs modification
+# these booleans will be consumed by "%%selinux_set_booleans" macro.
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+%global selinuxbooleans rsync_full_access=1 rsync_client=1
+%endif
+
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
%global _with_systemd true
%endif
@@ -241,7 +237,9 @@ Requires(pre): shadow-utils
BuildRequires: systemd
%endif
-Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: libgfrpc0%{?_isa} = %{version}-%{release}
+Requires: libgfxdr0%{?_isa} = %{version}-%{release}
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%endif
@@ -282,75 +280,40 @@ BuildRequires: libattr-devel
BuildRequires: firewalld
%endif
-Obsoletes: hekafs
Obsoletes: %{name}-common < %{version}-%{release}
Obsoletes: %{name}-core < %{version}-%{release}
-Obsoletes: %{name}-ufo
Obsoletes: %{name}-ganesha
+Obsoletes: %{name}-rdma < %{version}-%{release}
%if ( 0%{!?_with_gnfs:1} )
-Obsoletes: %{name}-gnfs
+Obsoletes: %{name}-gnfs < %{version}-%{release}
%endif
Provides: %{name}-common = %{version}-%{release}
Provides: %{name}-core = %{version}-%{release}
%description
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package includes the glusterfs binary, the glusterfsd daemon and the
libglusterfs and glusterfs translator modules common to both GlusterFS server
and client framework.
-%package api
-Summary: GlusterFS api library
-Requires: %{name}%{?_isa} = %{version}-%{release}
-Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
-
-%description api
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the glusterfs libgfapi library.
-
-%package api-devel
-Summary: Development Libraries
-Requires: %{name}%{?_isa} = %{version}-%{release}
-Requires: %{name}-devel%{?_isa} = %{version}-%{release}
-Requires: libacl-devel
-
-%description api-devel
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the api include files.
-
%package cli
Summary: GlusterFS CLI
-Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: libglusterd0%{?_isa} = %{version}-%{release}
%description cli
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the GlusterFS CLI application and its man page
@@ -360,32 +323,14 @@ BuildRequires: libcurl-devel
%description cloudsync-plugins
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides cloudsync plugins for archival feature.
-%package devel
-Summary: Development Libraries
-Requires: %{name}%{?_isa} = %{version}-%{release}
-# Needed for the Glupy examples to work
-Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
-
-%description devel
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the development libraries and include files.
-
%package extra-xlators
Summary: Extra Gluster filesystem Translators
# We need python-gluster rpm for gluster module's __init__.py in Python
@@ -395,12 +340,11 @@ Requires: python%{_pythonver}
%description extra-xlators
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides extra filesystem Translators, such as Glupy,
for GlusterFS.
@@ -419,6 +363,49 @@ Provides: %{name}-client = %{version}-%{release}
%description fuse
GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides support to FUSE based clients and inlcudes the
+glusterfs(d) binary.
+
+%if ( 0%{!?_without_server:1} )
+%package ganesha
+Summary: NFS-Ganesha configuration
+Group: Applications/File
+
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
+Requires: nfs-ganesha-selinux >= 2.7.6
+Requires: nfs-ganesha-gluster >= 2.7.6
+Requires: pcs >= 0.10.0
+Requires: resource-agents >= 4.2.0
+Requires: dbus
+
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+Requires: cman, pacemaker, corosync
+%endif
+
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
+# we need portblock resource-agent in 3.9.5 and later.
+Requires: net-tools
+%endif
+
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%if ( 0%{?rhel} && 0%{?rhel} < 8 )
+Requires: selinux-policy >= 3.13.1-160
+Requires(post): policycoreutils-python
+Requires(postun): policycoreutils-python
+%else
+Requires(post): policycoreutils-python-utils
+Requires(postun): policycoreutils-python-utils
+%endif
+%endif
+
+%description ganesha
+GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
@@ -426,8 +413,9 @@ terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
-This package provides support to FUSE based clients and inlcudes the
-glusterfs(d) binary.
+This package provides the configuration and related files for using
+NFS-Ganesha as the NFS server using GlusterFS
+%endif
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
@@ -440,15 +428,22 @@ Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
+# required for setting selinux bools
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+Requires(post): policycoreutils-python-utils
+Requires(postun): policycoreutils-python-utils
+Requires: selinux-policy-targeted
+Requires(post): selinux-policy-targeted
+BuildRequires: selinux-policy-devel
+%endif
%description geo-replication
GlusterFS is a distributed file-system capable of scaling to several
-peta-bytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file system in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in userspace and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides support to geo-replication.
%endif
@@ -462,29 +457,187 @@ Requires: nfs-utils
%description gnfs
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the glusterfs legacy gNFS server xlator
%endif
-%package libs
-Summary: GlusterFS common libraries
+%package -n libglusterfs0
+Summary: GlusterFS libglusterfs library
+Requires: libgfrpc0%{?_isa} = %{version}-%{release}
+Requires: libgfxdr0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+Provides: %{name}-libs = %{version}-%{release}
-%description libs
+%description -n libglusterfs0
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the base libglusterfs library
+
+%package -n libglusterfs-devel
+Summary: GlusterFS libglusterfs library
+Requires: libgfrpc-devel%{?_isa} = %{version}-%{release}
+Requires: libgfxdr-devel%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
+Provides: %{name}-devel = %{version}-%{release}
+
+%description -n libglusterfs-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libglusterfs.so and the gluster C header files.
+
+%package -n libgfapi0
+Summary: GlusterFS api library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-api <= %{version}-%{release}
+Provides: %{name}-api = %{version}-%{release}
+
+%description -n libgfapi0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the glusterfs libgfapi library.
+
+%package -n libgfapi-devel
+Summary: Development Libraries
+Requires: libglusterfs-devel%{?_isa} = %{version}-%{release}
+Requires: libacl-devel
+Obsoletes: %{name}-api-devel <= %{version}-%{release}
+Provides: %{name}-api-devel = %{version}-%{release}
+
+%description -n libgfapi-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfapi.so and the api C header files.
+
+%package -n libgfchangelog0
+Summary: GlusterFS libchangelog library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libgfchangelog0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libgfchangelog library
+
+%package -n libgfchangelog-devel
+Summary: GlusterFS libchangelog library
+Requires: libglusterfs-devel%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
+
+%description -n libgfchangelog-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfchangelog.so and changelog C header files.
+
+%package -n libgfrpc0
+Summary: GlusterFS libgfrpc0 library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libgfrpc0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libgfrpc library
+
+%package -n libgfrpc-devel
+Summary: GlusterFS libgfrpc library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
-This package provides the base GlusterFS libraries
+%description -n libgfrpc-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfrpc.so and rpc C header files.
+
+%package -n libgfxdr0
+Summary: GlusterFS libgfxdr0 library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libgfxdr0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libgfxdr library
+
+%package -n libgfxdr-devel
+Summary: GlusterFS libgfxdr library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
+
+%description -n libgfxdr-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfxdr.so.
+
+%package -n libglusterd0
+Summary: GlusterFS libglusterd library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libglusterd0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libglusterd library
%package -n python%{_pythonver}-gluster
Summary: GlusterFS python library
@@ -497,39 +650,15 @@ Obsoletes: python-gluster < 3.10
%description -n python%{_pythonver}-gluster
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package contains the python modules of GlusterFS and own gluster
namespace.
-%if ( 0%{!?_without_rdma:1} )
-%package rdma
-Summary: GlusterFS rdma support for ib-verbs
-%if ( 0%{?fedora} && 0%{?fedora} > 26 )
-BuildRequires: rdma-core-devel
-%else
-BuildRequires: libibverbs-devel
-BuildRequires: librdmacm-devel >= 1.0.15
-%endif
-Requires: %{name}%{?_isa} = %{version}-%{release}
-
-%description rdma
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides support to ib-verbs library.
-%endif
-
%package regression-tests
Summary: Development Tools
Requires: %{name}%{?_isa} = %{version}-%{release}
@@ -559,12 +688,11 @@ Requires: %{_prefix}/lib/ocf/resource.d
%description resource-agents
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the resource agents which plug glusterd into
Open Cluster Framework (OCF) compliant cluster resource managers,
@@ -576,11 +704,15 @@ like Pacemaker.
Summary: Clustered file-system server
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-cli%{?_isa} = %{version}-%{release}
-Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: libgfchangelog0%{?_isa} = %{version}-%{release}
+%if ( 0%{?fedora} && 0%{?fedora} >= 30 || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
+Requires: glusterfs-selinux >= 0.1.0-2
+%endif
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
Requires: %{name}-fuse%{?_isa} = %{version}-%{release}
# self-heal daemon, rebalance, nfs-server etc. are actually clients
-Requires: %{name}-api%{?_isa} = %{version}-%{release}
+Requires: libgfapi0%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
Requires: lvm2
@@ -619,12 +751,11 @@ Requires: valgrind
%description server
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the glusterfs server daemon.
%endif
@@ -646,12 +777,11 @@ Summary: GlusterFS client-side translators
%description client-xlators
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the translators needed on any GlusterFS client.
@@ -708,7 +838,6 @@ done
%{?_without_fusermount} \
%{?_without_georeplication} \
%{?_without_ocf} \
- %{?_without_rdma} \
%{?_without_server} \
%{?_without_syslog} \
%{?_with_ipv6default} \
@@ -792,6 +921,15 @@ sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sha
install -D -p -m 0644 extras/glusterfs-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
+# ganesha ghosts
+%if ( 0%{!?_without_server:1} )
+mkdir -p %{buildroot}%{_sysconfdir}/ganesha
+touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
+mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
+touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
+touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
# geo-rep ghosts
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
@@ -842,23 +980,46 @@ rm -rf %{buildroot}
%endif
exit 0
-%post api
-/sbin/ldconfig
-
%if ( 0%{!?_without_events:1} )
%post events
%systemd_post glustereventsd
%endif
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%post ganesha
+semanage boolean -m ganesha_use_fusefs --on
+exit 0
+%endif
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+%selinux_set_booleans %{selinuxbooleans}
+%endif
if [ $1 -ge 1 ]; then
%systemd_postun_with_restart glusterd
fi
exit 0
%endif
-%post libs
+%post -n libglusterfs0
+/sbin/ldconfig
+
+%post -n libgfapi0
+/sbin/ldconfig
+
+%post -n libgfchangelog0
+/sbin/ldconfig
+
+%post -n libgfrpc0
+/sbin/ldconfig
+
+%post -n libgfxdr0
+/sbin/ldconfig
+
+%post -n libglusterd0
/sbin/ldconfig
%if ( 0%{!?_without_server:1} )
@@ -1004,6 +1165,36 @@ fi
exit 0
%endif
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%postun ganesha
+semanage boolean -m ganesha_use_fusefs --off
+exit 0
+%endif
+%endif
+
+##-----------------------------------------------------------------------------
+## All %%trigger should be placed here and keep them sorted
+##
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%trigger ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --on
+exit 0
+%endif
+%endif
+
+##-----------------------------------------------------------------------------
+## All %%triggerun should be placed here and keep them sorted
+##
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%triggerun ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --off
+exit 0
+%endif
+%endif
+
##-----------------------------------------------------------------------------
## All %%files should be placed here and keep them grouped
##
@@ -1014,9 +1205,6 @@ exit 0
%exclude %{_mandir}/man8/gluster.8*
%endif
%dir %{_localstatedir}/log/glusterfs
-%if ( 0%{!?_without_rdma:1} )
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
-%endif
%if 0%{?!_without_server:1}
%dir %{_datadir}/glusterfs
%dir %{_datadir}/glusterfs/scripts
@@ -1070,20 +1258,12 @@ exit 0
%{_tmpfilesdir}/gluster.conf
%endif
-%files api
-%exclude %{_libdir}/*.so
-# libgfapi files
-%{_libdir}/libgfapi.*
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
- %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
-
-%files api-devel
-%{_libdir}/pkgconfig/glusterfs-api.pc
-%{_libdir}/libgfapi.so
-%dir %{_includedir}/glusterfs
-%dir %{_includedir}/glusterfs/api
- %{_includedir}/glusterfs/api/*
+%if ( 0%{?_without_server:1} )
+#exclude ganesha related files
+%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
+%exclude %{_libexecdir}/ganesha/*
+%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
+%endif
%files cli
%{_sbindir}/gluster
@@ -1095,14 +1275,33 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
-%files devel
+%files -n libglusterfs-devel
%dir %{_includedir}/glusterfs
- %{_includedir}/glusterfs/*
-%exclude %{_includedir}/glusterfs/api
-%exclude %{_libdir}/libgfapi.so
-%{_libdir}/*.so
+ %{_includedir}/glusterfs/*.h
+ %{_includedir}/glusterfs/server/*.h
+%{_libdir}/libglusterfs.so
+
+%files -n libgfapi-devel
+%dir %{_includedir}/glusterfs/api
+ %{_includedir}/glusterfs/api/*.h
+%{_libdir}/libgfapi.so
+%{_libdir}/pkgconfig/glusterfs-api.pc
+
+
+%files -n libgfchangelog-devel
+%dir %{_includedir}/glusterfs/gfchangelog
+ %{_includedir}/glusterfs/gfchangelog/*.h
+%{_libdir}/libgfchangelog.so
%{_libdir}/pkgconfig/libgfchangelog.pc
+%files -n libgfrpc-devel
+%dir %{_includedir}/glusterfs/rpc
+ %{_includedir}/glusterfs/rpc/*.h
+%{_libdir}/libgfrpc.so
+
+%files -n libgfxdr-devel
+%{_libdir}/libgfxdr.so
+
%files client-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
@@ -1153,7 +1352,6 @@ exit 0
%{_unitdir}/gluster-ta-volume.service
%endif
-
%if ( 0%{!?_without_georeplication:1} )
%files geo-replication
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
@@ -1165,6 +1363,13 @@ exit 0
%dir %{_libexecdir}/glusterfs/python/syncdaemon
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
+%dir %{_libexecdir}/glusterfs/scripts
+ %{_libexecdir}/glusterfs/scripts/get-gfid.sh
+ %{_libexecdir}/glusterfs/scripts/slave-upgrade.sh
+ %{_libexecdir}/glusterfs/scripts/gsync-upgrade.sh
+ %{_libexecdir}/glusterfs/scripts/generate-gfid-file.sh
+ %{_libexecdir}/glusterfs/scripts/gsync-sync-gfid
+ %{_libexecdir}/glusterfs/scripts/schedule_georep.py*
%{_libexecdir}/glusterfs/gverify.sh
%{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
%{_libexecdir}/glusterfs/peer_gsec_create
@@ -1181,19 +1386,28 @@ exit 0
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
-%dir %{_datadir}/glusterfs
-%dir %{_datadir}/glusterfs/scripts
- %{_datadir}/glusterfs/scripts/get-gfid.sh
- %{_datadir}/glusterfs/scripts/slave-upgrade.sh
- %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
- %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
- %{_datadir}/glusterfs/scripts/gsync-sync-gfid
- %{_datadir}/glusterfs/scripts/schedule_georep.py*
%endif
-%files libs
-%{_libdir}/*.so.*
-%exclude %{_libdir}/libgfapi.*
+%files -n libglusterfs0
+%{_libdir}/libglusterfs.so.*
+
+%files -n libgfapi0
+%{_libdir}/libgfapi.so.*
+%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
+ %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
+
+%files -n libgfchangelog0
+%{_libdir}/libgfchangelog.so.*
+
+%files -n libgfrpc0
+%{_libdir}/libgfrpc.so.*
+
+%files -n libgfxdr0
+%{_libdir}/libgfxdr.so.*
+
+%files -n libglusterd0
+%{_libdir}/libglusterd.so.*
+%exclude %{_libdir}/libglusterd.so
%files -n python%{_pythonver}-gluster
# introducing glusterfs module in site packages.
@@ -1209,18 +1423,25 @@ exit 0
%{python2_sitelib}/gluster/cliutils
%endif
-%if ( 0%{!?_without_rdma:1} )
-%files rdma
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
- %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
-%endif
-
%files regression-tests
%dir %{_datadir}/glusterfs
%{_datadir}/glusterfs/run-tests.sh
%{_datadir}/glusterfs/tests
%exclude %{_datadir}/glusterfs/tests/vagrant
+%if ( 0%{!?_without_server:1} )
+%files ganesha
+%dir %{_libexecdir}/ganesha
+%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
+%{_libexecdir}/ganesha/*
+%{_prefix}/lib/ocf/resource.d/heartbeat/*
+%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
+%ghost %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
+%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
+%ghost %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
+%ghost %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
+%endif
+
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
# /usr/lib is the standard for OCF, also on x86_64
@@ -1255,7 +1476,7 @@ exit 0
# binaries
%{_sbindir}/glusterd
-%{_sbindir}/glfsheal
+%{_libexecdir}/glusterfs/glfsheal
%{_sbindir}/gf_attach
%{_sbindir}/gluster-setgfid2path
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
@@ -1400,9 +1621,25 @@ exit 0
%endif
%changelog
+* Thu May 14 2020 Kaleb S. KEITHLEY <kkeithle@redhat.com>
+- refactor, common practice, Issue #1126
+
+* Mon May 11 2020 Sunny Kumar <sunkumar@redhat.com>
+- added requires policycoreutils-python-utils on rhel8 for geo-replication
+
* Wed Oct 9 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove leftover bd xlator cruft
+* Fri Aug 23 2019 Shwetha K Acharya <sacharya@redhat.com>
+- removed {name}-ufs from Obsoletes
+- added "< version" for obsoletes {name}-gnfs and {name}-rdma
+
+* Mon Jul 15 2019 Jiffin Tony Thottan <jthottan@redhat.com>
+- Adding ganesha ha bits back in gluster repository
+
+* Fri Jul 12 2019 Amar Tumballi <amarts@redhat.com>
+- Remove rdma package, and mark older rdma package as 'Obsoletes'
+
* Fri Jun 14 2019 Niels de Vos <ndevos@redhat.com>
- always build glusterfs-cli to allow monitoring/managing from clients
@@ -1493,9 +1730,6 @@ exit 0
* Thu Feb 16 2017 Niels de Vos <ndevos@redhat.com>
- Obsolete and Provide python-gluster for upgrading from glusterfs < 3.10
-* Tue Feb 7 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
-- remove ganesha (#1418417)
-
* Wed Feb 1 2017 Poornima G <pgurusid@redhat.com>
- Install /var/lib/glusterd/groups/metadata-cache by default
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am
index 7b8d1dbf1fb..a0a778158d8 100644
--- a/glusterfsd/src/Makefile.am
+++ b/glusterfsd/src/Makefile.am
@@ -6,14 +6,15 @@ endif
glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c
glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(top_builddir)/rpc/xdr/src/libgfxdr.la ${GF_LDADD}
-glusterfsd_LDFLAGS = $(GF_LDFLAGS) $(LIB_DL)
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la $(GF_LDADD) $(LIB_DL)
+glusterfsd_LDFLAGS = $(GF_LDFLAGS)
gf_attach_SOURCES = gf_attach.c
gf_attach_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/api/src/libgfapi.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la
+gf_attach_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
index e688c3c9eb4..c553b0b1f61 100644
--- a/glusterfsd/src/gf_attach.c
+++ b/glusterfsd/src/gf_attach.c
@@ -19,9 +19,16 @@
#include "xdr-generic.h"
#include "glusterd1-xdr.h"
+/* In seconds */
+#define CONNECT_TIMEOUT 60
+#define REPLY_TIMEOUT 120
+
int done = 0;
int rpc_status;
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+
struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = {
[GLUSTERD_BRICK_NULL] = {"NULL", NULL},
[GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL},
@@ -38,8 +45,12 @@ struct rpc_clnt_program gf_attach_prog = {
int32_t
my_callback(struct rpc_req *req, struct iovec *iov, int count, void *frame)
{
+ pthread_mutex_lock(&mutex);
rpc_status = req->rpc_status;
done = 1;
+ /* Signal main thread which is the only waiter */
+ pthread_cond_signal(&cond);
+ pthread_mutex_unlock(&mutex);
return 0;
}
@@ -48,6 +59,7 @@ int
send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
{
int ret = -1;
+ struct timespec ts;
struct iobuf *iobuf = NULL;
struct iobref *iobref = NULL;
struct iovec iov = {
@@ -57,7 +69,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
call_frame_t *frame = NULL;
gd1_mgmt_brick_op_req brick_req;
void *req = &brick_req;
- int i;
brick_req.op = op;
brick_req.name = path;
@@ -75,10 +86,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
if (!iobref)
goto out;
- frame = create_frame(this, this->ctx->pool);
- if (!frame)
- goto out;
-
iobref_add(iobref, iobuf);
iov.iov_base = iobuf->ptr;
@@ -91,20 +98,44 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
iov.iov_len = ret;
- for (i = 0; i < 60; ++i) {
- if (rpc->conn.connected) {
- break;
- }
- sleep(1);
+ /* Wait for connection */
+ timespec_now_realtime(&ts);
+ ts.tv_sec += CONNECT_TIMEOUT;
+ pthread_mutex_lock(&rpc->conn.lock);
+ {
+ while (!rpc->conn.connected)
+ if (pthread_cond_timedwait(&rpc->conn.cond, &rpc->conn.lock, &ts) ==
+ ETIMEDOUT) {
+ fprintf(stderr, "timeout waiting for RPC connection\n");
+ pthread_mutex_unlock(&rpc->conn.lock);
+ return EXIT_FAILURE;
+ }
+ }
+ pthread_mutex_unlock(&rpc->conn.lock);
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
}
/* Send the msg */
ret = rpc_clnt_submit(rpc, &gf_attach_prog, op, my_callback, &iov, 1, NULL,
0, iobref, frame, NULL, 0, NULL, 0, NULL);
if (!ret) {
- for (i = 0; !done && (i < 120); ++i) {
- sleep(1);
+ /* OK, wait for callback */
+ timespec_now_realtime(&ts);
+ ts.tv_sec += REPLY_TIMEOUT;
+ pthread_mutex_lock(&mutex);
+ {
+ while (!done)
+ if (pthread_cond_timedwait(&cond, &mutex, &ts) == ETIMEDOUT) {
+ fprintf(stderr, "timeout waiting for RPC reply\n");
+ pthread_mutex_unlock(&mutex);
+ return EXIT_FAILURE;
+ }
}
+ pthread_mutex_unlock(&mutex);
}
out:
diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h
index 209279d051b..0cdbffa71ea 100644
--- a/glusterfsd/src/glusterfsd-messages.h
+++ b/glusterfsd/src/glusterfsd-messages.h
@@ -34,6 +34,60 @@ GLFS_MSGID(
glusterfsd_msg_28, glusterfsd_msg_29, glusterfsd_msg_30, glusterfsd_msg_31,
glusterfsd_msg_32, glusterfsd_msg_33, glusterfsd_msg_34, glusterfsd_msg_35,
glusterfsd_msg_36, glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39,
- glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43);
+ glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43,
+ glusterfsd_msg_029, glusterfsd_msg_041, glusterfsd_msg_042);
+
+#define glusterfsd_msg_1_STR "Could not create absolute mountpoint path"
+#define glusterfsd_msg_2_STR "Could not get current working directory"
+#define glusterfsd_msg_4_STR "failed to set mount-point to options dictionary"
+#define glusterfsd_msg_3_STR "failed to set dict value for key"
+#define glusterfsd_msg_5_STR "failed to set disable for key"
+#define glusterfsd_msg_6_STR "failed to set enable for key"
+#define glusterfsd_msg_7_STR \
+ "Not a client process, not performing mount operation"
+#define glusterfsd_msg_8_STR "MOUNT_POINT initialization failed"
+#define glusterfsd_msg_9_STR "loading volume file failed"
+#define glusterfsd_msg_10_STR "xlator option is invalid"
+#define glusterfsd_msg_11_STR "Fetching the volume file from server..."
+#define glusterfsd_msg_12_STR "volume initialization failed"
+#define glusterfsd_msg_34_STR "memory init failed"
+#define glusterfsd_msg_13_STR "ERROR: glusterfs uuid generation failed"
+#define glusterfsd_msg_14_STR "ERROR: glusterfs pool creation failed"
+#define glusterfsd_msg_15_STR \
+ "ERROR: '--volfile-id' is mandatory if '-s' OR '--volfile-server' option " \
+ "is given"
+#define glusterfsd_msg_16_STR "ERROR: parsing the volfile failed"
+#define glusterfsd_msg_33_STR \
+ "obsolete option '--volfile-max-fecth-attempts or fetch-attempts' was " \
+ "provided"
+#define glusterfsd_msg_17_STR "pidfile open failed"
+#define glusterfsd_msg_18_STR "pidfile lock failed"
+#define glusterfsd_msg_20_STR "pidfile truncation failed"
+#define glusterfsd_msg_21_STR "pidfile write failed"
+#define glusterfsd_msg_22_STR "failed to exeute pthread_sigmask"
+#define glusterfsd_msg_23_STR "failed to create pthread"
+#define glusterfsd_msg_24_STR "daemonization failed"
+#define glusterfsd_msg_25_STR "mount failed"
+#define glusterfsd_msg_26_STR "failed to construct the graph"
+#define glusterfsd_msg_27_STR "fuse xlator cannot be specified in volume file"
+#define glusterfsd_msg_28_STR "Cannot reach volume specification file"
+#define glusterfsd_msg_29_STR "ERROR: glusterfsd context not initialized"
+#define glusterfsd_msg_43_STR \
+ "command line argument --brick-mux is valid only for brick process"
+#define glusterfsd_msg_029_STR "failed to create command line string"
+#define glusterfsd_msg_30_STR "Started running version"
+#define glusterfsd_msg_31_STR "Could not create new sync-environment"
+#define glusterfsd_msg_40_STR "No change in volfile, countinuing"
+#define glusterfsd_msg_39_STR "Unable to create/delete temporary file"
+#define glusterfsd_msg_38_STR \
+ "Not processing brick-op since volume graph is not yet active"
+#define glusterfsd_msg_35_STR "rpc req buffer unserialization failed"
+#define glusterfsd_msg_36_STR "problem in xlator loading"
+#define glusterfsd_msg_37_STR "failed to get dict value"
+#define glusterfsd_msg_41_STR "received attach request for volfile"
+#define glusterfsd_msg_42_STR "failed to unserialize xdata to dictionary"
+#define glusterfsd_msg_041_STR "can't detach. flie not found"
+#define glusterfsd_msg_042_STR \
+ "couldnot detach old graph. Aborting the reconfiguration operation"
#endif /* !_GLUSTERFSD_MESSAGES_H_ */
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 027ff618992..eaf6796e4c3 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -45,8 +45,6 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
int
glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp);
int
-glusterfs_graph_unknown_options(glusterfs_graph_t *graph);
-int
emancipate(glusterfs_ctx_t *ctx, int ret);
int
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
@@ -65,6 +63,11 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
gf_boolean_t
mgmt_is_multiplexed_daemon(char *name);
+
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test);
+
int
mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
{
@@ -102,8 +105,8 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
sizeof(volfile_obj->volfile_checksum))) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
- "No change in volfile, continuing");
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
+ NULL);
goto out;
}
volfile_tmp = volfile_obj;
@@ -115,8 +118,8 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
tmp_fd = mkstemp(template);
if (-1 == tmp_fd) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
- "Unable to create temporary file: %s", template);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
ret = -1;
goto out;
}
@@ -126,8 +129,8 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
- "Unable to delete temporary file: %s", template);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
ret = 0;
}
@@ -202,6 +205,7 @@ glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,
retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
if (retlen == -1) {
gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");
+ GF_FREE(iob);
goto ret;
}
@@ -475,10 +479,6 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
dict_t *dict = NULL;
xlator_t *this = NULL;
gf1_cli_top_op top_op = 0;
- uint32_t blk_size = 0;
- uint32_t blk_count = 0;
- double time = 0;
- double throughput = 0;
xlator_t *any = NULL;
xlator_t *xlator = NULL;
glusterfs_graph_t *active = NULL;
@@ -511,35 +511,23 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
}
ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);
- if ((!ret) &&
- (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) {
- ret = dict_get_uint32(dict, "blk-size", &blk_size);
- if (ret)
- goto cont;
- ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
- if (ret)
- goto cont;
-
- if (GF_CLI_TOP_READ_PERF == top_op) {
- ret = glusterfs_volume_top_read_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
- ret = glusterfs_volume_top_write_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- }
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "time", time);
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "throughput", throughput);
- if (ret)
- goto cont;
+ if (ret)
+ goto cont;
+ if (GF_CLI_TOP_READ_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false);
+ } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true);
}
+
cont:
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
xlator = get_xlator_by_name(any, xlator_req.name);
@@ -576,13 +564,12 @@ out:
return ret;
}
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test)
{
int32_t fd = -1;
- int32_t input_fd = -1;
+ int32_t output_fd = -1;
char export_path[PATH_MAX] = {
0,
};
@@ -590,46 +577,44 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
int32_t iter = 0;
int32_t ret = -1;
uint64_t total_blks = 0;
+ uint32_t blk_size;
+ uint32_t blk_count;
+ double throughput = 0;
+ double time = 0;
struct timeval begin, end = {
0,
};
GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
+ ret = dict_get_uint32(dict, "blk-size", &blk_size);
+ if (ret)
+ goto out;
+ ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
+ if (ret)
+ goto out;
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ if (!(blk_size > 0) || !(blk_count > 0))
goto out;
- }
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
+ buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char);
if (!buf) {
ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
goto out;
}
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
+ ".gf-tmp-stats-perf");
+ fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
+ if (-1 == fd) {
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Unable to open input file");
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
goto out;
}
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
ret = sys_write(fd, buf, blk_size);
if (ret != blk_size) {
ret = -1;
@@ -637,77 +622,36 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
gf_log("glusterd", GF_LOG_WARNING, "Error in write");
ret = -1;
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes written %" PRId64,
- *throughput, *time, total_blks);
-
-out:
- if (fd >= 0)
- sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
- GF_FREE(buf);
- sys_unlink(export_path);
-
- return ret;
-}
-
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
-{
- int32_t fd = -1;
- int32_t input_fd = -1;
- int32_t output_fd = -1;
- char export_path[PATH_MAX] = {
- 0,
- };
- char *buf = NULL;
- int32_t iter = 0;
- int32_t ret = -1;
- uint64_t total_blks = 0;
- struct timeval begin, end = {
- 0,
- };
+ throughput, time, total_blks);
- GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
-
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ /* if it's a write test, we are done. Otherwise, we continue to the read
+ * part */
+ if (write_test == _gf_true) {
+ ret = 0;
goto out;
}
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
- if (!buf) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
+ ret = sys_fsync(fd);
+ if (ret) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
goto out;
}
-
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ ret = sys_lseek(fd, 0L, 0);
+ if (ret != 0) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open input file");
goto out;
}
@@ -718,30 +662,8 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
goto out;
}
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write(fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- }
+ total_blks = 0;
- ret = sys_fsync(fd);
- if (ret) {
- gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
- goto out;
- }
- ret = sys_lseek(fd, 0L, 0);
- if (ret != 0) {
- gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
- ret = -1;
- goto out;
- }
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
ret = sys_read(fd, buf, blk_size);
@@ -756,31 +678,36 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
ret = -1;
gf_log("glusterd", GF_LOG_WARNING, "Error in read");
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes read %" PRId64,
- *throughput, *time, total_blks);
-
+ throughput, time, total_blks);
+ ret = 0;
out:
if (fd >= 0)
sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
if (output_fd >= 0)
sys_close(output_fd);
GF_FREE(buf);
sys_unlink(export_path);
-
+ if (ret == 0) {
+ ret = dict_set_double(dict, "time", time);
+ if (ret)
+ goto end;
+ ret = dict_set_double(dict, "throughput", throughput);
+ if (ret)
+ goto end;
+ }
+end:
return ret;
}
@@ -796,7 +723,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
xlator_t *xlator = NULL;
xlator_t *any = NULL;
dict_t *output = NULL;
- char key[2048] = {0};
+ char key[32] = {0};
+ int len;
char *xname = NULL;
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
@@ -820,10 +748,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
active = ctx->active;
if (!active) {
ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
- "Not processing brick-op no. %d since volume graph is "
- "not yet active.",
- xlator_req.op);
+ gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
+ "brick-op_no.=%d", xlator_req.op, NULL);
goto out;
}
any = active->first;
@@ -848,8 +774,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
if (ret) {
gf_log(this->name, GF_LOG_ERROR,
"Couldn't get "
@@ -867,8 +793,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
xlator = xlator_search_by_name(any, xname);
XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
/* If notify fails for an xlator we need to capture it but
@@ -942,8 +868,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator_req.input.input_len, &input);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35,
- "rpc req buffer unserialization failed.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL);
goto out;
}
@@ -952,8 +877,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator = xlator_search_by_name(any, xname);
if (!xlator) {
snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36,
- "problem in xlator loading.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL);
goto out;
}
@@ -966,8 +890,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
ret = dict_get_str(input, "scrub-value", &scrub_opt);
if (ret) {
snprintf(msg, sizeof(msg), "Failed to get scrub value");
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37,
- "failed to get dict value");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL);
ret = -1;
goto out;
}
@@ -1034,44 +957,49 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
}
ret = 0;
+ if (!this->ctx->active) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "got attach for %s but no active graph", xlator_req.name);
+ goto post_unlock;
+ }
+
+ gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);
+
LOCK(&ctx->volfile_lock);
{
- if (this->ctx->active) {
- gf_log(this->name, GF_LOG_INFO, "got attach for %s",
- xlator_req.name);
- ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
- &newgraph);
- if (!ret && (newgraph && newgraph->first)) {
- nextchild = newgraph->first;
- ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- LG_MSG_EVENT_NOTIFY_FAILED,
- "Parent up notification "
- "failed for %s ",
- nextchild->name);
- goto out;
- }
- /* we need a protocol/server xlator as
- * nextchild
- */
- srv_xl = this->ctx->active->first;
- srv_conf = (server_conf_t *)srv_xl->private;
- rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
+ ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
+ &newgraph);
+ if (!ret && (newgraph && newgraph->first)) {
+ nextchild = newgraph->first;
+ ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,
+ "event=ParentUp", "name=%s", nextchild->name, NULL);
+ goto unlock;
}
- } else {
- gf_log(this->name, GF_LOG_WARNING,
- "got attach for %s but no active graph", xlator_req.name);
+ /* we need a protocol/server xlator as
+ * nextchild
+ */
+ srv_xl = this->ctx->active->first;
+ srv_conf = (server_conf_t *)srv_xl->private;
+ rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
}
if (ret) {
ret = -1;
}
-
- glusterfs_translator_info_response_send(req, ret, NULL, NULL);
-
- out:
+ ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);
+ if (ret) {
+ /* Response sent back to glusterd, req is already destroyed. So
+ * resetting the ret to 0. Otherwise another response will be
+ * send from rpcsvc_check_and_reply_error. Which will lead to
+ * double resource leak.
+ */
+ ret = 0;
+ }
+ unlock:
UNLOCK(&ctx->volfile_lock);
}
+post_unlock:
if (xlator_req.dict.dict_val)
free(xlator_req.dict.dict_val);
free(xlator_req.input.input_val);
@@ -1088,14 +1016,12 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
0,
};
xlator_t *this = NULL;
- glusterfs_ctx_t *ctx = NULL;
dict_t *dict = NULL;
GF_ASSERT(req);
this = THIS;
GF_ASSERT(this);
- ctx = this->ctx;
ret = xdr_to_generic(req->msg[0], &xlator_req,
(xdrproc_t)xdr_gd1_mgmt_brick_op_req);
@@ -1105,10 +1031,8 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
goto out;
}
- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41,
- "received attach "
- "request for volfile-id=%s",
- xlator_req.name);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s",
+ xlator_req.name, NULL);
dict = dict_new();
if (!dict) {
@@ -1120,22 +1044,16 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
&dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
- "failed to unserialize xdata to dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL);
goto out;
}
dict->extra_stdfree = xlator_req.dict.dict_val;
ret = 0;
- if (ctx->active) {
- ret = mgmt_process_volfile(xlator_req.input.input_val,
- xlator_req.input.input_len, xlator_req.name,
- dict);
- } else {
- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
- "got attach for %s but no active graph", xlator_req.name);
- }
+ ret = mgmt_process_volfile(xlator_req.input.input_val,
+ xlator_req.input.input_len, xlator_req.name,
+ dict);
out:
if (dict)
dict_unref(dict);
@@ -1154,8 +1072,8 @@ glusterfs_handle_svc_detach(rpcsvc_request_t *req)
0,
};
ssize_t ret;
- glusterfs_ctx_t *ctx = NULL;
gf_volfile_t *volfile_obj = NULL;
+ glusterfs_ctx_t *ctx = NULL;
gf_volfile_t *volfile_tmp = NULL;
ret = xdr_to_generic(req->msg[0], &xlator_req,
@@ -1178,8 +1096,8 @@ glusterfs_handle_svc_detach(rpcsvc_request_t *req)
if (!volfile_tmp) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_41,
- "can't detach %s - not found", xlator_req.name);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s",
+ xlator_req.name, NULL);
/*
* Used to be -ENOENT. However, the caller asked us to
* make sure it's down and if it's already down that's
@@ -1188,12 +1106,12 @@ glusterfs_handle_svc_detach(rpcsvc_request_t *req)
ret = 0;
goto out;
}
+ /* coverity[ORDER_REVERSAL] */
ret = glusterfs_process_svc_detach(ctx, volfile_tmp);
if (ret) {
UNLOCK(&ctx->volfile_lock);
- gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_41,
- "Could not detach "
- "old graph. Aborting the reconfiguration operation");
+ gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042,
+ NULL);
goto out;
}
}
@@ -1250,10 +1168,8 @@ glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
goto out;
if (statbuf.st_size > GF_UNIT_MB) {
- gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
- "Allocated size exceeds expectation: "
- "reconsider logic (%" PRId64 ")",
- statbuf.st_size);
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
+ "reconsider logic (%" PRId64 ")", statbuf.st_size, NULL);
}
msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);
if (!msg)
@@ -1571,10 +1487,12 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
}
any = active->first;
- if ((cmd & GF_CLI_STATUS_NFS) != 0)
- ret = gf_asprintf(&node_name, "%s", "nfs-server");
- else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
ret = gf_asprintf(&node_name, "%s", "glustershd");
+#ifdef BUILD_GNFS
+ else if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf(&node_name, "%s", "nfs-server");
+#endif
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf(&node_name, "%s", "quotad");
else if ((cmd & GF_CLI_STATUS_BITD) != 0)
@@ -1634,7 +1552,7 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
break;
case GF_CLI_STATUS_CLIENTS:
- // clients not availbale for SHD
+ // clients not available for SHD
if ((cmd & GF_CLI_STATUS_SHD) != 0)
break;
@@ -1917,6 +1835,11 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
top = active->first;
for (trav = top->children; trav; trav = trav->next) {
@@ -2021,14 +1944,14 @@ glusterfs_handle_rpc_msg(rpcsvc_request_t *req)
return ret;
}
-rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_event},
+static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
+ GF_CBK_EVENT_NOTIFY},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP},
};
-struct rpcclnt_cb_program mgmt_cbk_prog = {
+static struct rpcclnt_cb_program mgmt_cbk_prog = {
.progname = "GlusterFS Callback",
.prognum = GLUSTER_CBK_PROGRAM,
.progver = GLUSTER_CBK_VERSION,
@@ -2036,7 +1959,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = {
.numactors = GF_CBK_MAXVALUE,
};
-char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
+static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_NULL] = "NULL",
[GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
[GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
@@ -2045,14 +1968,14 @@ char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
};
-rpc_clnt_prog_t clnt_pmap_prog = {
+static rpc_clnt_prog_t clnt_pmap_prog = {
.progname = "Gluster Portmap",
.prognum = GLUSTER_PMAP_PROGRAM,
.progver = GLUSTER_PMAP_VERSION,
.procnames = clnt_pmap_procs,
};
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = "NULL",
[GF_HNDSK_SETVOLUME] = "SETVOLUME",
[GF_HNDSK_GETSPEC] = "GETSPEC",
@@ -2060,57 +1983,55 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
};
-rpc_clnt_prog_t clnt_handshake_prog = {
+static rpc_clnt_prog_t clnt_handshake_prog = {
.progname = "GlusterFS Handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
.procnames = clnt_handshake_procs,
};
-rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL,
- glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE,
- glusterfs_handle_terminate, NULL, 0, DRC_NA},
+static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
+ [GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL,
+ GLUSTERD_BRICK_NULL, DRC_NA, 0},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL,
+ GLUSTERD_BRICK_TERMINATE, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
- GLUSTERD_BRICK_XLATOR_INFO,
glusterfs_handle_translator_info_get, NULL,
- 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP,
- glusterfs_handle_translator_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS,
- glusterfs_handle_brick_status, NULL, 0, DRC_NA},
+ GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
+ glusterfs_handle_translator_op, NULL,
+ GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL,
+ GLUSTERD_BRICK_STATUS, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
- GLUSTERD_BRICK_XLATOR_DEFRAG,
- glusterfs_handle_defrag, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE,
- glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS,
- glusterfs_handle_node_status, NULL, 0, DRC_NA},
+ glusterfs_handle_defrag, NULL,
+ GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile,
+ NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL,
+ GLUSTERD_NODE_STATUS, DRC_NA, 0},
[GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
- GLUSTERD_VOLUME_BARRIER_OP,
- glusterfs_handle_volume_barrier_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER,
- glusterfs_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT,
- glusterfs_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_ATTACH] = {"ATTACH", GLUSTERD_BRICK_ATTACH,
- glusterfs_handle_attach, NULL, 0, DRC_NA},
+ glusterfs_handle_volume_barrier_op, NULL,
+ GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL,
+ GLUSTERD_BRICK_BARRIER, DRC_NA, 0},
+ [GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL,
+ GLUSTERD_NODE_BITROT, DRC_NA, 0},
+ [GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL,
+ GLUSTERD_BRICK_ATTACH, DRC_NA, 0},
- [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS,
- glusterfs_handle_dump_metrics, NULL, 0, DRC_NA},
+ [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics,
+ NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0},
- [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", GLUSTERD_SVC_ATTACH,
- glusterfs_handle_svc_attach, NULL, 0, DRC_NA},
+ [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL,
+ GLUSTERD_SVC_ATTACH, DRC_NA, 0},
- [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", GLUSTERD_SVC_DETACH,
- glusterfs_handle_svc_detach, NULL, 0, DRC_NA},
+ [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL,
+ GLUSTERD_SVC_DETACH, DRC_NA, 0},
};
-struct rpcsvc_program glusterfs_mop_prog = {
+static struct rpcsvc_program glusterfs_mop_prog = {
.progname = "Gluster Brick operations",
.prognum = GD_BRICK_PROGRAM,
.progver = GD_BRICK_VERSION,
@@ -2238,10 +2159,12 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
dict->extra_stdfree = rsp.xdata.xdata_val;
- /* glusterd2 only */
ret = dict_get_str(dict, "servers-list", &servers_list);
if (ret) {
- goto volfile;
+ /* Server list is set by glusterd at the time of getspec */
+ ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list);
+ if (ret)
+ goto volfile;
}
gf_log(frame->this->name, GF_LOG_INFO,
@@ -2291,8 +2214,8 @@ volfile:
tmp_fd = mkstemp(template);
if (-1 == tmp_fd) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
- "Unable to create temporary file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
ret = -1;
goto post_unlock;
}
@@ -2302,8 +2225,8 @@ volfile:
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
- "Unable to delete temporary file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
ret = 0;
}
@@ -2864,50 +2787,6 @@ out:
}
int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx)
-{
- cmd_args_t *cmd_args = NULL;
- rpcsvc_t *rpc = NULL;
- rpcsvc_listener_t *listener = NULL;
- rpcsvc_listener_t *next = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- GF_ASSERT(ctx);
-
- rpc = ctx->listener;
- ctx->listener = NULL;
-
- (void)rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
-
- list_for_each_entry_safe(listener, next, &rpc->listeners, list)
- {
- rpcsvc_listener_destroy(listener);
- }
-
- (void)rpcsvc_unregister_notify(rpc, glusterfs_rpcsvc_notify, THIS);
-
- GF_FREE(rpc);
-
- cmd_args = &ctx->cmd_args;
- if (cmd_args->sock_file) {
- ret = sys_unlink(cmd_args->sock_file);
- if (ret && (ENOENT == errno)) {
- ret = 0;
- }
- }
-
- if (ret) {
- this = THIS;
- gf_log(this->name, GF_LOG_ERROR,
- "Failed to unlink listener "
- "socket %s, error: %s",
- cmd_args->sock_file, strerror(errno));
- }
- return ret;
-}
-
-int
glusterfs_mgmt_notify(int32_t op, void *data, ...)
{
int ret = 0;
@@ -3139,9 +3018,6 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
int ret = -1;
int emancipate_ret = -1;
cmd_args_t *cmd_args = NULL;
- char brick_name[PATH_MAX] = {
- 0,
- };
cmd_args = &ctx->cmd_args;
@@ -3152,14 +3028,6 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
goto out;
}
- if (cmd_args->volfile_server_transport &&
- !strcmp(cmd_args->volfile_server_transport, "rdma")) {
- snprintf(brick_name, sizeof(brick_name), "%s.rdma",
- cmd_args->brick_name);
- req.brick = brick_name;
- } else
- req.brick = cmd_args->brick_name;
-
req.port = cmd_args->brick_port;
req.pid = (int)getpid(); /* only glusterd2 consumes this */
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index cf6d9a7215c..dae41f33fef 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -47,12 +47,6 @@
#include <malloc.h>
#endif
-#ifdef HAVE_MALLOC_STATS
-#ifdef DEBUG
-#include <mcheck.h>
-#endif
-#endif
-
#include <glusterfs/xlator.h>
#include <glusterfs/glusterfs.h>
#include <glusterfs/compat.h>
@@ -198,7 +192,7 @@ static struct argp_option gf_options[] = {
{"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN,
"Brick Port to be registered with Gluster portmapper"},
{"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Do not purge the cache on file open"},
+ "Do not purge the cache on file open [default: false]"},
{"global-timer-wheel", ARGP_GLOBAL_TIMER_WHEEL, "BOOL", OPTION_ARG_OPTIONAL,
"Instantiate process global timer-wheel"},
{"thin-client", ARGP_THIN_CLIENT_KEY, 0, 0,
@@ -222,7 +216,10 @@ static struct argp_option gf_options[] = {
"Resolve all auxiliary groups in fuse translator (max 32 otherwise)"},
{"lru-limit", ARGP_FUSE_LRU_LIMIT_KEY, "N", 0,
"Set fuse module's limit for number of inodes kept in LRU list to N "
- "[default: 131072]"},
+ "[default: 65536]"},
+ {"invalidate-limit", ARGP_FUSE_INVALIDATE_LIMIT_KEY, "N", 0,
+ "Suspend inode invalidations implied by 'lru-limit' if the number of "
+ "outstanding invalidations reaches N"},
{"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0,
"Set fuse module's background queue length to N "
"[default: 64]"},
@@ -276,6 +273,9 @@ static struct argp_option gf_options[] = {
"attribute, dentry and page-cache. "
"Disable this only if same files/directories are not accessed across "
"two different mounts concurrently [default: \"on\"]"},
+ {"fuse-dev-eperm-ratelimit-ns", ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY,
+ "OPTIONS", OPTION_HIDDEN,
+ "rate limit reading from fuse device upon EPERM failure"},
{"brick-mux", ARGP_BRICK_MUX_KEY, 0, 0, "Enable brick mux. "},
{0, 0, 0, 0, "Miscellaneous Options:"},
{
@@ -292,8 +292,12 @@ int
glusterfs_mgmt_init(glusterfs_ctx_t *ctx);
int
glusterfs_listener_init(glusterfs_ctx_t *ctx);
-int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx);
+
+#define DICT_SET_VAL(method, dict, key, val, msgid) \
+ if (method(dict, key, val)) { \
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, msgid, "key=%s", key); \
+ goto err; \
+ }
static int
set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
@@ -315,172 +319,97 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
ret = gf_asprintf(&mount_point, "%s/%s", cwd,
cmd_args->mount_point);
if (ret == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
- "Could not create absolute mountpoint "
- "path");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
+ "gf_asprintf failed", NULL);
goto err;
}
} else {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
- "Could not get current working directory");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
+ "getcwd failed", NULL);
goto err;
}
- } else
- mount_point = gf_strdup(cmd_args->mount_point);
- ret = dict_set_dynstr(options, ZR_MOUNTPOINT_OPT, mount_point);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
- "failed to set mount-point to options dictionary");
- goto err;
+ } else {
+ mount_point = gf_strdup(cmd_args->mount_point);
}
+ DICT_SET_VAL(dict_set_dynstr_sizen, options, ZR_MOUNTPOINT_OPT, mount_point,
+ glusterfsd_msg_3);
if (cmd_args->fuse_attribute_timeout >= 0) {
- ret = dict_set_double(options, ZR_ATTR_TIMEOUT_OPT,
- cmd_args->fuse_attribute_timeout);
-
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_4,
- "failed to set dict value "
- "for key " ZR_ATTR_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_ATTR_TIMEOUT_OPT,
+ cmd_args->fuse_attribute_timeout, glusterfsd_msg_3);
}
if (cmd_args->fuse_entry_timeout >= 0) {
- ret = dict_set_double(options, ZR_ENTRY_TIMEOUT_OPT,
- cmd_args->fuse_entry_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_ENTRY_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_ENTRY_TIMEOUT_OPT,
+ cmd_args->fuse_entry_timeout, glusterfsd_msg_3);
}
if (cmd_args->fuse_negative_timeout >= 0) {
- ret = dict_set_double(options, ZR_NEGATIVE_TIMEOUT_OPT,
- cmd_args->fuse_negative_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_NEGATIVE_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_NEGATIVE_TIMEOUT_OPT,
+ cmd_args->fuse_negative_timeout, glusterfsd_msg_3);
}
if (cmd_args->client_pid_set) {
- ret = dict_set_int32(options, "client-pid", cmd_args->client_pid);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key client-pid");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "client-pid",
+ cmd_args->client_pid, glusterfsd_msg_3);
}
if (cmd_args->uid_map_root) {
- ret = dict_set_int32(options, "uid-map-root", cmd_args->uid_map_root);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "uid-map-root");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "uid-map-root",
+ cmd_args->uid_map_root, glusterfsd_msg_3);
}
if (cmd_args->volfile_check) {
- ret = dict_set_int32(options, ZR_STRICT_VOLFILE_CHECK,
- cmd_args->volfile_check);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_STRICT_VOLFILE_CHECK);
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, ZR_STRICT_VOLFILE_CHECK,
+ cmd_args->volfile_check, glusterfsd_msg_3);
}
if (cmd_args->dump_fuse) {
- ret = dict_set_static_ptr(options, ZR_DUMP_FUSE, cmd_args->dump_fuse);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_DUMP_FUSE);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DUMP_FUSE,
+ cmd_args->dump_fuse, glusterfsd_msg_3);
}
if (cmd_args->acl) {
- ret = dict_set_static_ptr(options, "acl", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key acl");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "acl", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->selinux) {
- ret = dict_set_static_ptr(options, "selinux", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key selinux");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "selinux", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->capability) {
- ret = dict_set_static_ptr(options, "capability", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key capability");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "capability", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->aux_gfid_mount) {
- ret = dict_set_static_ptr(options, "virtual-gfid-access", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "aux-gfid-mount");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "virtual-gfid-access", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->enable_ino32) {
- ret = dict_set_static_ptr(options, "enable-ino32", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "enable-ino32");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "enable-ino32", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->read_only) {
- ret = dict_set_static_ptr(options, "read-only", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key read-only");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "read-only", "on",
+ glusterfsd_msg_3);
}
switch (cmd_args->fopen_keep_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
+
+ DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache", "on",
+ glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "fopen-keep-cache mode %d",
cmd_args->fopen_keep_cache);
@@ -488,72 +417,43 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
}
if (cmd_args->gid_timeout_set) {
- ret = dict_set_int32(options, "gid-timeout", cmd_args->gid_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key gid-timeout");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "gid-timeout",
+ cmd_args->gid_timeout, glusterfsd_msg_3);
}
if (cmd_args->resolve_gids) {
- ret = dict_set_static_ptr(options, "resolve-gids", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "resolve-gids");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "resolve-gids", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->lru_limit >= 0) {
- ret = dict_set_int32(options, "lru-limit", cmd_args->lru_limit);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "lru-limit");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "lru-limit",
+ cmd_args->lru_limit, glusterfsd_msg_3);
+ }
+
+ if (cmd_args->invalidate_limit >= 0) {
+ DICT_SET_VAL(dict_set_int32_sizen, options, "invalidate-limit",
+ cmd_args->invalidate_limit, glusterfsd_msg_3);
}
if (cmd_args->background_qlen) {
- ret = dict_set_int32(options, "background-qlen",
- cmd_args->background_qlen);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "background-qlen");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "background-qlen",
+ cmd_args->background_qlen, glusterfsd_msg_3);
}
if (cmd_args->congestion_threshold) {
- ret = dict_set_int32(options, "congestion-threshold",
- cmd_args->congestion_threshold);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "congestion-threshold");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "congestion-threshold",
+ cmd_args->congestion_threshold, glusterfsd_msg_3);
}
switch (cmd_args->fuse_direct_io_mode) {
case GF_OPTION_DISABLE: /* disable */
- ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "disable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key " ZR_DIRECT_IO_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT,
+ "disable", glusterfsd_msg_3);
break;
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key " ZR_DIRECT_IO_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT,
+ "enable", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* auto */
default:
gf_msg_debug("glusterfsd", 0, "fuse direct io type %d",
cmd_args->fuse_direct_io_mode);
@@ -562,150 +462,82 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
switch (cmd_args->no_root_squash) {
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr(options, "no-root-squash", "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key "
- "no-root-squash");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash",
+ "enable", glusterfsd_msg_3);
break;
- case GF_OPTION_DISABLE: /* disable/default */
default:
- ret = dict_set_static_ptr(options, "no-root-squash", "disable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key "
- "no-root-squash");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash",
+ "disable", glusterfsd_msg_3);
gf_msg_debug("glusterfsd", 0, "fuse no-root-squash mode %d",
cmd_args->no_root_squash);
break;
}
if (!cmd_args->no_daemon_mode) {
- ret = dict_set_static_ptr(options, "sync-to-mount", "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key sync-mtab");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "sync-to-mount", "enable",
+ glusterfsd_msg_3);
}
if (cmd_args->use_readdirp) {
- ret = dict_set_str(options, "use-readdirp", cmd_args->use_readdirp);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "use-readdirp");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "use-readdirp",
+ cmd_args->use_readdirp, glusterfsd_msg_3);
}
if (cmd_args->event_history) {
ret = dict_set_str(options, "event-history", cmd_args->event_history);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "event-history");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "event-history",
+ cmd_args->event_history, glusterfsd_msg_3);
}
if (cmd_args->thin_client) {
- ret = dict_set_static_ptr(options, "thin-client", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "thin-client");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "thin-client", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->reader_thread_count) {
- ret = dict_set_uint32(options, "reader-thread-count",
- cmd_args->reader_thread_count);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "reader-thread-count");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "reader-thread-count",
+ cmd_args->reader_thread_count, glusterfsd_msg_3);
}
- ret = dict_set_uint32(options, "auto-invalidation",
- cmd_args->fuse_auto_inval);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key auto-invalidation");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "auto-invalidation",
+ cmd_args->fuse_auto_inval, glusterfsd_msg_3);
switch (cmd_args->kernel_writeback_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache",
+ "on", glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "kernel-writeback-cache mode %d",
cmd_args->kernel_writeback_cache);
break;
}
if (cmd_args->attr_times_granularity) {
- ret = dict_set_uint32(options, "attr-times-granularity",
- cmd_args->attr_times_granularity);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "attr-times-granularity");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "attr-times-granularity",
+ cmd_args->attr_times_granularity, glusterfsd_msg_3);
}
switch (cmd_args->fuse_flush_handle_interrupt) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "flush-handle-interrupt", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "flush-handle-interrupt");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt",
+ "on", glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "flush-handle-interrupt", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "flush-handle-interrupt");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "fuse-flush-handle-interrupt mode %d",
cmd_args->fuse_flush_handle_interrupt);
break;
}
if (cmd_args->global_threading) {
- ret = dict_set_static_ptr(options, "global-threading", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key global-threading");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "global-threading", "on",
+ glusterfsd_msg_3);
+ }
+ if (cmd_args->fuse_dev_eperm_ratelimit_ns) {
+ DICT_SET_VAL(dict_set_uint32, options, "fuse-dev-eperm-ratelimit-ns",
+ cmd_args->fuse_dev_eperm_ratelimit_ns, glusterfsd_msg_3);
}
ret = 0;
@@ -728,8 +560,7 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
}
if (ctx->process_mode != GF_CLIENT_PROCESS) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7,
- "Not a client process, not performing mount operation");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7, NULL);
return -1;
}
@@ -742,8 +573,8 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
goto err;
if (xlator_set_type(master, "mount/fuse") == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
- "MOUNT-POINT %s initialization failed", cmd_args->mount_point);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
+ "MOUNT-POINT=%s", cmd_args->mount_point, NULL);
goto err;
}
@@ -760,8 +591,8 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
ret = dict_set_static_ptr(master->options, ZR_FUSE_MOUNTOPTS,
cmd_args->fuse_mountopts);
if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_FUSE_MOUNTOPTS);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
+ ZR_FUSE_MOUNTOPTS, NULL);
goto err;
}
}
@@ -787,23 +618,14 @@ err:
static FILE *
get_volfp(glusterfs_ctx_t *ctx)
{
- int ret = 0;
cmd_args_t *cmd_args = NULL;
FILE *specfp = NULL;
- struct stat statbuf;
cmd_args = &ctx->cmd_args;
- ret = sys_lstat(cmd_args->volfile, &statbuf);
- if (ret == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
- return NULL;
- }
-
if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
+ "volume_file=%s", cmd_args->volfile, NULL);
return NULL;
}
@@ -859,8 +681,7 @@ gf_remember_xlator_option(char *arg)
dot = strchr(arg, '.');
if (!dot) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -873,8 +694,7 @@ gf_remember_xlator_option(char *arg)
equals = strchr(arg, '=');
if (!equals) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -886,8 +706,7 @@ gf_remember_xlator_option(char *arg)
option->key[(equals - dot - 1)] = '\0';
if (!*(equals + 1)) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -1304,6 +1123,14 @@ parse_opts(int key, char *arg, struct argp_state *state)
argp_failure(state, -1, 0, "unknown LRU limit option %s", arg);
break;
+ case ARGP_FUSE_INVALIDATE_LIMIT_KEY:
+ if (!gf_string2int32(arg, &cmd_args->invalidate_limit))
+ break;
+
+ argp_failure(state, -1, 0, "unknown invalidate limit option %s",
+ arg);
+ break;
+
case ARGP_FUSE_BACKGROUND_QLEN_KEY:
if (!gf_string2int(arg, &cmd_args->background_qlen))
break;
@@ -1536,6 +1363,21 @@ parse_opts(int key, char *arg, struct argp_state *state)
argp_failure(state, -1, 0,
"Invalid value for global threading \"%s\"", arg);
break;
+
+ case ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY:
+ if (gf_string2uint32(arg, &cmd_args->fuse_dev_eperm_ratelimit_ns)) {
+ argp_failure(state, -1, 0,
+ "Non-numerical value for "
+ "'fuse-dev-eperm-ratelimit-ns' option %s",
+ arg);
+ } else if (cmd_args->fuse_dev_eperm_ratelimit_ns > 1000000000) {
+ argp_failure(state, -1, 0,
+ "Invalid 'fuse-dev-eperm-ratelimit-ns' value %s. "
+ "Valid range: [\"0, 1000000000\"]",
+ arg);
+ }
+
+ break;
}
return 0;
}
@@ -1553,11 +1395,6 @@ should_call_fini(glusterfs_ctx_t *ctx, xlator_t *trav)
return _gf_true;
}
- /* This is the only one known to be safe in glusterfsd. */
- if (!strcmp(trav->type, "experimental/fdl")) {
- return _gf_true;
- }
-
return _gf_false;
}
@@ -1675,8 +1512,7 @@ reincarnate(int signum)
gf_msg_trace("gluster", 0, "received reincarnate request (sig:HUP)");
if (cmd_args->volfile_server) {
- gf_msg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11,
- "Fetching the volume file from server...");
+ gf_smsg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11, NULL);
ret = glusterfs_volfile_fetch(ctx);
}
@@ -1684,8 +1520,7 @@ reincarnate(int signum)
gf_log_logrotate(1);
if (ret < 0)
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12,
- "volume initialization failed.");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12, NULL);
return;
}
@@ -1737,8 +1572,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ret = xlator_mem_acct_init(THIS, gfd_mt_end);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34,
- "memory accounting init failed.");
+ gf_smsg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34, NULL);
return ret;
}
@@ -1752,8 +1586,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ctx->process_uuid = generate_glusterfs_ctx_id();
if (!ctx->process_uuid) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13,
- "ERROR: glusterfs uuid generation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13, NULL);
goto out;
}
@@ -1761,23 +1594,20 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ctx->iobuf_pool = iobuf_pool_new();
if (!ctx->iobuf_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs iobuf pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "iobuf", NULL);
goto out;
}
ctx->event_pool = gf_event_pool_new(DEFAULT_EVENT_POOL_SIZE,
STARTING_EVENT_THREADS);
if (!ctx->event_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs event pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "event", NULL);
goto out;
}
ctx->pool = GF_CALLOC(1, sizeof(call_pool_t), gfd_mt_call_pool_t);
if (!ctx->pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs call pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "call", NULL);
goto out;
}
@@ -1787,22 +1617,19 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
/* frame_mem_pool size 112 * 4k */
ctx->pool->frame_mem_pool = mem_pool_new(call_frame_t, 4096);
if (!ctx->pool->frame_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs frame pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "frame", NULL);
goto out;
}
/* stack_mem_pool size 256 * 1024 */
ctx->pool->stack_mem_pool = mem_pool_new(call_stack_t, 1024);
if (!ctx->pool->stack_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stack pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stack", NULL);
goto out;
}
ctx->stub_mem_pool = mem_pool_new(call_stub_t, 1024);
if (!ctx->stub_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stub pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stub", NULL);
goto out;
}
@@ -1860,6 +1687,10 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
INIT_LIST_HEAD(&cmd_args->xlator_options);
INIT_LIST_HEAD(&cmd_args->volfile_servers);
+ ctx->pxl_count = 0;
+ pthread_mutex_init(&ctx->fd_lock, NULL);
+ pthread_cond_init(&ctx->fd_cond, NULL);
+ INIT_LIST_HEAD(&ctx->janitor_fds);
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
@@ -2148,7 +1979,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
struct stat stbuf = {
0,
};
- char timestr[32];
+ char timestr[GF_TIMESTR_SIZE];
char tmp_logfile[1024] = {0};
char *tmp_logfile_dyn = NULL;
char *tmp_logfilebase = NULL;
@@ -2210,9 +2041,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
/* Make sure after the parsing cli, if '--volfile-server' option is
given, then '--volfile-id' is mandatory */
if (cmd_args->volfile_server && !cmd_args->volfile_id) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15,
- "ERROR: '--volfile-id' is mandatory if '-s' OR "
- "'--volfile-server' option is given");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15, NULL);
ret = -1;
goto out;
}
@@ -2229,8 +2058,8 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
and exit */
ret = sys_stat(cmd_args->volfile, &stbuf);
if (ret) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
- "ERROR: parsing the volfile failed");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
+ NULL);
/* argp_usage (argp.) */
fprintf(stderr, "USAGE: %s [options] [mountpoint]\n", argv[0]);
goto out;
@@ -2254,8 +2083,8 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
if (((ret == 0) &&
(S_ISREG(stbuf.st_mode) || S_ISLNK(stbuf.st_mode))) ||
(ret == -1)) {
- /* Have separate logfile per run */
- gf_time_fmt(timestr, sizeof timestr, time(NULL), gf_timefmt_FT);
+ /* Have separate logfile per run. */
+ gf_time_fmt(timestr, sizeof timestr, gf_time(), gf_timefmt_FT);
sprintf(tmp_logfile, "%s.%s.%d", cmd_args->log_file, timestr,
getpid());
@@ -2282,9 +2111,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
compatibility with third party applications
*/
if (cmd_args->max_connect_attempts) {
- gf_msg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33,
- "obsolete option '--volfile-max-fecth-attempts or "
- "fetch-attempts' was provided");
+ gf_smsg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33, NULL);
}
#ifdef GF_DARWIN_HOST_OS
@@ -2311,8 +2138,8 @@ glusterfs_pidfile_setup(glusterfs_ctx_t *ctx)
pidfp = fopen(cmd_args->pid_file, "a+");
if (!pidfp) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
- "pidfile %s open failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
+ "pidfile=%s", cmd_args->pid_file, NULL);
goto out;
}
@@ -2363,29 +2190,29 @@ glusterfs_pidfile_update(glusterfs_ctx_t *ctx, pid_t pid)
ret = lockf(fileno(pidfp), F_TLOCK, 0);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
- "pidfile %s lock failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = sys_ftruncate(fileno(pidfp), 0);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
- "pidfile %s truncation failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = fprintf(pidfp, "%d\n", pid);
if (ret <= 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = fflush(pidfp);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
@@ -2478,8 +2305,7 @@ glusterfs_signals_setup(glusterfs_ctx_t *ctx)
ret = pthread_sigmask(SIG_BLOCK, &set, NULL);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22,
- "failed to execute pthread_sigmask");
+ gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22, NULL);
return ret;
}
@@ -2491,8 +2317,7 @@ glusterfs_signals_setup(glusterfs_ctx_t *ctx)
fallback to signals getting handled by other threads.
setup the signal handlers
*/
- gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23,
- "failed to create pthread");
+ gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23, NULL);
return ret;
}
@@ -2538,8 +2363,7 @@ daemonize(glusterfs_ctx_t *ctx)
sys_close(ctx->daemon_pipe[1]);
}
- gf_msg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24,
- "daemonization failed");
+ gf_smsg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24, NULL);
goto out;
case 0:
/* child */
@@ -2560,8 +2384,8 @@ daemonize(glusterfs_ctx_t *ctx)
} else {
err = cstatus;
}
- gf_msg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
- "mount failed");
+ gf_smsg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
+ NULL);
exit(err);
}
}
@@ -2652,16 +2476,13 @@ glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp)
graph = glusterfs_graph_construct(fp);
if (!graph) {
- gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_26,
- "failed to construct the graph");
+ gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_26, NULL);
goto out;
}
for (trav = graph->first; trav; trav = trav->next) {
if (strcmp(trav->type, "mount/fuse") == 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27,
- "fuse xlator cannot be specified in volume "
- "file");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27, NULL);
goto out;
}
}
@@ -2742,8 +2563,7 @@ glusterfs_volumes_init(glusterfs_ctx_t *ctx)
fp = get_volfp(ctx);
if (!fp) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28,
- "Cannot reach volume specification file");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28, NULL);
ret = -1;
goto out;
}
@@ -2774,8 +2594,7 @@ main(int argc, char *argv[])
ctx = glusterfs_ctx_new();
if (!ctx) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29,
- "ERROR: glusterfs context not initialized");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29, NULL);
return ENOMEM;
}
glusterfsd_ctx = ctx;
@@ -2839,9 +2658,7 @@ main(int argc, char *argv[])
/* set brick_mux mode only for server process */
if ((ctx->process_mode != GF_SERVER_PROCESS) && cmd->brick_mux) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43,
- "command line argument --brick-mux is valid only for brick "
- "process");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43, NULL);
goto out;
}
@@ -2856,15 +2673,14 @@ main(int argc, char *argv[])
len = snprintf(cmdlinestr + pos, sizeof(cmdlinestr) - pos, " %s",
argv[i]);
if ((len <= 0) || (len >= (sizeof(cmdlinestr) - pos))) {
- gf_msg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_29,
- "failed to create command line string");
+ gf_smsg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_029, NULL);
ret = -1;
goto out;
}
}
- gf_msg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30,
- "Started running %s version %s (args: %s)", argv[0],
- PACKAGE_VERSION, cmdlinestr);
+ gf_smsg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30, "arg=%s", argv[0],
+ "version=%s", PACKAGE_VERSION, "cmdlinestr=%s", cmdlinestr,
+ NULL);
ctx->cmdlinestr = gf_strdup(cmdlinestr);
}
@@ -2899,8 +2715,7 @@ main(int argc, char *argv[])
ctx->env = syncenv_new(0, 0, 0);
if (!ctx->env) {
- gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_31,
- "Could not create new sync-environment");
+ gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_31, NULL);
goto out;
}
diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h
index dc7d995e778..4e1413caa70 100644
--- a/glusterfsd/src/glusterfsd.h
+++ b/glusterfsd/src/glusterfsd.h
@@ -112,7 +112,9 @@ enum argp_option_keys {
ARGP_FUSE_LRU_LIMIT_KEY = 190,
ARGP_FUSE_AUTO_INVAL_KEY = 191,
ARGP_GLOBAL_THREADING_KEY = 192,
- ARGP_BRICK_MUX_KEY = 193
+ ARGP_BRICK_MUX_KEY = 193,
+ ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY = 194,
+ ARGP_FUSE_INVALIDATE_LIMIT_KEY = 195,
};
struct _gfd_vol_top_priv {
@@ -133,14 +135,6 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
void
cleanup_and_exit(int signum);
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time);
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time);
void
xlator_mem_cleanup(xlator_t *this);
diff --git a/heal/src/Makefile.am b/heal/src/Makefile.am
index f04a294cc56..aa18d3eff88 100644
--- a/heal/src/Makefile.am
+++ b/heal/src/Makefile.am
@@ -1,5 +1,6 @@
if WITH_SERVER
-sbin_PROGRAMS = glfsheal
+scriptdir = $(GLUSTERFS_LIBEXECDIR)
+script_PROGRAMS = glfsheal
endif
glfsheal_SOURCES = glfs-heal.c
@@ -18,8 +19,7 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/rpc/xdr/src\
-I$(top_builddir)/rpc/xdr/src\
-I$(top_srcdir)/api/src\
- -DDATADIR=\"$(localstatedir)\" \
- -DSBIN_DIR=\"$(sbindir)\"
+ -DDATADIR=\"$(localstatedir)\"
AM_CFLAGS = -Wall $(GF_CFLAGS) $(XML_CFLAGS)
diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
index 20372316edd..bf4b47f8760 100644
--- a/heal/src/glfs-heal.c
+++ b/heal/src/glfs-heal.c
@@ -773,8 +773,7 @@ static int
glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
uint64_t *offset, num_entries_t *num_entries,
print_status glfsh_print_status,
- gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode,
- dict_t *xattr_req)
+ gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode)
{
gf_dirent_t *entry = NULL;
gf_dirent_t *tmp = NULL;
@@ -806,7 +805,7 @@ glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
gf_uuid_parse(entry->d_name, gfid);
gf_uuid_copy(loc.gfid, gfid);
- ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, xattr_req, NULL);
+ ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, NULL, NULL);
if (ret) {
if ((mode != GLFSH_MODE_CONTINUE_ON_ERROR) && (ret == -ENOTCONN))
goto out;
@@ -875,19 +874,19 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
if (heal_op == GF_SHD_OP_INDEX_SUMMARY) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_print_heal_status,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_SPLIT_BRAIN_FILES) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_print_spb_status,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_HEAL_SUMMARY) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_print_summary_status,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) {
@@ -896,7 +895,7 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
} else if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_heal_status_boolean,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
}
@@ -950,10 +949,6 @@ glfsh_print_pending_heals_type(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
int32_t op_errno = 0;
gf_boolean_t ignore = _gf_false;
- ret = dict_set_str(xattr_req, "index-vgfid", vgfid);
- if (ret)
- return ret;
-
if (!strcmp(vgfid, GF_XATTROP_DIRTY_GFID))
ignore = _gf_true;
@@ -1060,6 +1055,10 @@ glfsh_set_heal_options(glfs_t *fs, gf_xl_afr_op_t heal_op)
if (ret)
goto out;
+ ret = glfs_set_xlator_option(fs, "*-replicate-*", "halo-enabled", "off");
+ if (ret)
+ goto out;
+
if ((heal_op != GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE) &&
(heal_op != GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) &&
(heal_op != GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME))
@@ -1730,14 +1729,19 @@ main(int argc, char **argv)
goto out;
}
+ char *var_str = (heal_op == GF_SHD_OP_INDEX_SUMMARY ||
+ heal_op == GF_SHD_OP_HEAL_SUMMARY)
+ ? "replicate/disperse"
+ : "replicate";
+
ret = glfsh_validate_volume(top_subvol, heal_op);
if (ret < 0) {
ret = -EINVAL;
- gf_asprintf(&op_errstr, "Volume %s is not of type %s", volname,
- (heal_op == GF_SHD_OP_INDEX_SUMMARY ||
- heal_op == GF_SHD_OP_HEAL_SUMMARY)
- ? "replicate/disperse"
- : "replicate");
+ gf_asprintf(&op_errstr,
+ "This command is supported "
+ "for only volumes of %s type. Volume %s "
+ "is not of type %s",
+ var_str, volname, var_str);
goto out;
}
rootloc.inode = inode_ref(top_subvol->itable->root);
diff --git a/libgfdb.pc.in b/libgfdb.pc.in
deleted file mode 100644
index 463e8becd3a..00000000000
--- a/libgfdb.pc.in
+++ /dev/null
@@ -1,12 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-
-Name: libgfdb
-Description: GlusterFS Database Library
-Version: @LIBGFDB_VERSION@
-Libs: -L${libdir} -lgfchangedb -lglusterfs
-Cflags: -I${includedir}
-Requires: sqlite3 @PKGCONFIG_UUID@
diff --git a/libglusterd/Makefile.am b/libglusterd/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/libglusterd/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/libglusterd/src/Makefile.am b/libglusterd/src/Makefile.am
new file mode 100644
index 00000000000..684d2bac96b
--- /dev/null
+++ b/libglusterd/src/Makefile.am
@@ -0,0 +1,31 @@
+libglusterd_la_CFLAGS = $(GF_CFLAGS) $(GF_DARWIN_LIBGLUSTERFS_CFLAGS) \
+ -DDATADIR=\"$(localstatedir)\"
+
+libglusterd_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
+ -DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
+ -DXLATORPARENTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)\" \
+ -DXXH_NAMESPACE=GF_ -D__USE_LARGEFILE64 \
+ -I$(CONTRIBDIR)/rbtree \
+ -I$(CONTRIBDIR)/libexecinfo ${ARGP_STANDALONE_CPPFLAGS} \
+ -DSBIN_DIR=\"$(sbindir)\" -I$(CONTRIBDIR)/timer-wheel \
+ -I$(CONTRIBDIR)/xxhash \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src/
+
+libglusterd_la_LIBADD = $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS)
+libglusterd_la_LDFLAGS = -version-info $(LIBGLUSTERFS_LT_VERSION) $(GF_LDFLAGS) \
+ -export-symbols $(top_srcdir)/libglusterd/src/libglusterd.sym
+
+lib_LTLIBRARIES = libglusterd.la
+
+libglusterd_la_SOURCES = gd-common-utils.c
+
+libglusterd_la_HEADERS = gd-common-utils.h
+
+libglusterd_ladir = $(includedir)/glusterfs
+
+noinst_HEADERS = gd-common-utils.h
+
+EXTRA_DIST = libglusterd.sym
+
+CLEANFILES =
diff --git a/libglusterd/src/gd-common-utils.c b/libglusterd/src/gd-common-utils.c
new file mode 100644
index 00000000000..243fab215e6
--- /dev/null
+++ b/libglusterd/src/gd-common-utils.c
@@ -0,0 +1,78 @@
+/*
+ Copyright (c) 2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "gd-common-utils.h"
+#include "cli1-xdr.h"
+
+int
+get_vol_type(int type, int dist_count, int brick_count)
+{
+ if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
+ (dist_count < brick_count))
+ type = type + GF_CLUSTER_TYPE_MAX - 1;
+
+ return type;
+}
+
+char *
+get_struct_variable(int mem_num, gf_gsync_status_t *sts_val)
+{
+ switch (mem_num) {
+ case 0:
+ return (sts_val->node);
+ case 1:
+ return (sts_val->master);
+ case 2:
+ return (sts_val->brick);
+ case 3:
+ return (sts_val->slave_user);
+ case 4:
+ return (sts_val->slave);
+ case 5:
+ return (sts_val->slave_node);
+ case 6:
+ return (sts_val->worker_status);
+ case 7:
+ return (sts_val->crawl_status);
+ case 8:
+ return (sts_val->last_synced);
+ case 9:
+ return (sts_val->entry);
+ case 10:
+ return (sts_val->data);
+ case 11:
+ return (sts_val->meta);
+ case 12:
+ return (sts_val->failures);
+ case 13:
+ return (sts_val->checkpoint_time);
+ case 14:
+ return (sts_val->checkpoint_completed);
+ case 15:
+ return (sts_val->checkpoint_completion_time);
+ case 16:
+ return (sts_val->brick_host_uuid);
+ case 17:
+ return (sts_val->last_synced_utc);
+ case 18:
+ return (sts_val->checkpoint_time_utc);
+ case 19:
+ return (sts_val->checkpoint_completion_time_utc);
+ case 20:
+ return (sts_val->slavekey);
+ case 21:
+ return (sts_val->session_slave);
+ default:
+ goto out;
+ }
+
+out:
+ return NULL;
+}
diff --git a/libglusterd/src/gd-common-utils.h b/libglusterd/src/gd-common-utils.h
new file mode 100644
index 00000000000..b9bb4f956fe
--- /dev/null
+++ b/libglusterd/src/gd-common-utils.h
@@ -0,0 +1,28 @@
+/*
+ Copyright (c) 2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GD_COMMON_UTILS_H
+#define _GD_COMMON_UTILS_H
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <stddef.h>
+
+#include "protocol-common.h"
+#include "rpcsvc.h"
+
+int
+get_vol_type(int type, int dist_count, int brick_count);
+
+char *
+get_struct_variable(int mem_num, gf_gsync_status_t *sts_val);
+
+#endif /* _GD_COMMON_UTILS_H */
diff --git a/libglusterd/src/libglusterd.sym b/libglusterd/src/libglusterd.sym
new file mode 100644
index 00000000000..45969a87c12
--- /dev/null
+++ b/libglusterd/src/libglusterd.sym
@@ -0,0 +1,2 @@
+get_vol_type
+get_struct_variable
diff --git a/libglusterfs/src/Makefile.am b/libglusterfs/src/Makefile.am
index 79ab7ee93f2..385e8ef4600 100644
--- a/libglusterfs/src/Makefile.am
+++ b/libglusterfs/src/Makefile.am
@@ -7,13 +7,13 @@ libglusterfs_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
-DXLATORPARENTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)\" \
-DXXH_NAMESPACE=GF_ -D__USE_LARGEFILE64 \
- -I$(top_srcdir)/rpc/xdr/src/ -I$(top_builddir)/rpc/xdr/src/ \
- -I$(top_srcdir)/rpc/rpc-lib/src/ -I$(CONTRIBDIR)/rbtree \
+ -I$(CONTRIBDIR)/rbtree \
-I$(CONTRIBDIR)/libexecinfo ${ARGP_STANDALONE_CPPFLAGS} \
-DSBIN_DIR=\"$(sbindir)\" -I$(CONTRIBDIR)/timer-wheel \
-I$(CONTRIBDIR)/xxhash
-libglusterfs_la_LIBADD = $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS)
+libglusterfs_la_LIBADD = $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS) $(LIB_DL) \
+ $(URCU_LIBS) $(URCU_CDS_LIBS)
libglusterfs_la_LDFLAGS = -version-info $(LIBGLUSTERFS_LT_VERSION) $(GF_LDFLAGS) \
-export-symbols $(top_srcdir)/libglusterfs/src/libglusterfs.sym
@@ -40,12 +40,9 @@ libglusterfs_la_SOURCES = dict.c xlator.c logging.c \
throttle-tbf.c monitoring.c async.c
nodist_libglusterfs_la_SOURCES = y.tab.c graph.lex.c defaults.c
-nodist_libglusterfs_la_HEADERS = y.tab.h protocol-common.h
+nodist_libglusterfs_la_HEADERS = y.tab.h
-BUILT_SOURCES = graph.lex.c defaults.c eventtypes.h protocol-common.h
-
-protocol-common.h: $(top_srcdir)/rpc/rpc-lib/src/protocol-common.h
- cp $(top_srcdir)/rpc/rpc-lib/src/protocol-common.h .
+BUILT_SOURCES = graph.lex.c defaults.c eventtypes.h
libglusterfs_la_HEADERS = glusterfs/common-utils.h glusterfs/defaults.h \
glusterfs/default-args.h glusterfs/dict.h glusterfs/glusterfs.h \
@@ -69,7 +66,7 @@ libglusterfs_la_HEADERS = glusterfs/common-utils.h glusterfs/defaults.h \
glusterfs/quota-common-utils.h glusterfs/rot-buffs.h \
glusterfs/compat-uuid.h glusterfs/upcall-utils.h glusterfs/throttle-tbf.h \
glusterfs/events.h glusterfs/atomic.h glusterfs/monitoring.h \
- glusterfs/async.h
+ glusterfs/async.h glusterfs/glusterfs-fops.h
libglusterfs_ladir = $(includedir)/glusterfs
@@ -82,8 +79,7 @@ noinst_HEADERS = unittest/unittest.h \
$(CONTRIBDIR)/userspace-rcu/wfcqueue.h \
$(CONTRIBDIR)/userspace-rcu/wfstack.h \
$(CONTRIBDIR)/userspace-rcu/static-wfcqueue.h \
- $(CONTRIBDIR)/userspace-rcu/static-wfstack.h \
- tier-ctr-interface.h
+ $(CONTRIBDIR)/userspace-rcu/static-wfstack.h
eventtypes.h: $(top_srcdir)/events/eventskeygen.py
$(PYTHON) $(top_srcdir)/events/eventskeygen.py C_HEADER
diff --git a/libglusterfs/src/call-stub.c b/libglusterfs/src/call-stub.c
index 3c7c7748c99..ee84f08acd4 100644
--- a/libglusterfs/src/call-stub.c
+++ b/libglusterfs/src/call-stub.c
@@ -41,7 +41,6 @@ fop_lookup_stub(call_frame_t *frame, fop_lookup_t fn, loc_t *loc, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_LOOKUP);
@@ -60,8 +59,6 @@ fop_lookup_cbk_stub(call_frame_t *frame, fop_lookup_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LOOKUP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -77,7 +74,6 @@ fop_stat_stub(call_frame_t *frame, fop_stat_t fn, loc_t *loc, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_STAT);
@@ -95,8 +91,6 @@ fop_stat_cbk_stub(call_frame_t *frame, fop_stat_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_STAT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -111,8 +105,6 @@ fop_fstat_stub(call_frame_t *frame, fop_fstat_t fn, fd_t *fd, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FSTAT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -128,8 +120,6 @@ fop_fstat_cbk_stub(call_frame_t *frame, fop_fstat_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSTAT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -145,7 +135,6 @@ fop_truncate_stub(call_frame_t *frame, fop_truncate_t fn, loc_t *loc, off_t off,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_TRUNCATE);
@@ -164,8 +153,6 @@ fop_truncate_cbk_stub(call_frame_t *frame, fop_truncate_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_TRUNCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -182,8 +169,6 @@ fop_ftruncate_stub(call_frame_t *frame, fop_ftruncate_t fn, fd_t *fd, off_t off,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FTRUNCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -201,8 +186,6 @@ fop_ftruncate_cbk_stub(call_frame_t *frame, fop_ftruncate_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FTRUNCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -220,7 +203,6 @@ fop_access_stub(call_frame_t *frame, fop_access_t fn, loc_t *loc, int32_t mask,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_ACCESS);
@@ -238,8 +220,6 @@ fop_access_cbk_stub(call_frame_t *frame, fop_access_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ACCESS);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -255,7 +235,6 @@ fop_readlink_stub(call_frame_t *frame, fop_readlink_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_READLINK);
@@ -274,8 +253,6 @@ fop_readlink_cbk_stub(call_frame_t *frame, fop_readlink_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READLINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -292,7 +269,6 @@ fop_mknod_stub(call_frame_t *frame, fop_mknod_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_MKNOD);
@@ -312,8 +288,6 @@ fop_mknod_cbk_stub(call_frame_t *frame, fop_mknod_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_MKNOD);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -330,7 +304,6 @@ fop_mkdir_stub(call_frame_t *frame, fop_mkdir_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_MKDIR);
@@ -350,8 +323,6 @@ fop_mkdir_cbk_stub(call_frame_t *frame, fop_mkdir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_MKDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -368,7 +339,6 @@ fop_unlink_stub(call_frame_t *frame, fop_unlink_t fn, loc_t *loc, int xflag,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_UNLINK);
@@ -388,8 +358,6 @@ fop_unlink_cbk_stub(call_frame_t *frame, fop_unlink_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_UNLINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -406,7 +374,6 @@ fop_rmdir_stub(call_frame_t *frame, fop_rmdir_t fn, loc_t *loc, int flags,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_RMDIR);
@@ -426,8 +393,6 @@ fop_rmdir_cbk_stub(call_frame_t *frame, fop_rmdir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_RMDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -444,7 +409,6 @@ fop_symlink_stub(call_frame_t *frame, fop_symlink_t fn, const char *linkname,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
GF_VALIDATE_OR_GOTO("call-stub", linkname, out);
@@ -465,8 +429,6 @@ fop_symlink_cbk_stub(call_frame_t *frame, fop_symlink_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SYMLINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -483,7 +445,6 @@ fop_rename_stub(call_frame_t *frame, fop_rename_t fn, loc_t *oldloc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", oldloc, out);
GF_VALIDATE_OR_GOTO("call-stub", newloc, out);
@@ -505,8 +466,6 @@ fop_rename_cbk_stub(call_frame_t *frame, fop_rename_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_RENAME);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -523,7 +482,6 @@ fop_link_stub(call_frame_t *frame, fop_link_t fn, loc_t *oldloc, loc_t *newloc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", oldloc, out);
GF_VALIDATE_OR_GOTO("call-stub", newloc, out);
@@ -544,8 +502,6 @@ fop_link_cbk_stub(call_frame_t *frame, fop_link_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -562,7 +518,6 @@ fop_create_stub(call_frame_t *frame, fop_create_t fn, loc_t *loc, int32_t flags,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_CREATE);
@@ -582,8 +537,6 @@ fop_create_cbk_stub(call_frame_t *frame, fop_create_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_CREATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -600,7 +553,6 @@ fop_open_stub(call_frame_t *frame, fop_open_t fn, loc_t *loc, int32_t flags,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_OPEN);
@@ -618,8 +570,6 @@ fop_open_cbk_stub(call_frame_t *frame, fop_open_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_OPEN);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -635,8 +585,6 @@ fop_readv_stub(call_frame_t *frame, fop_readv_t fn, fd_t *fd, size_t size,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_READ);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -653,8 +601,6 @@ fop_readv_cbk_stub(call_frame_t *frame, fop_readv_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READ);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -672,7 +618,6 @@ fop_writev_stub(call_frame_t *frame, fop_writev_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", vector, out);
stub = stub_new(frame, 1, GF_FOP_WRITE);
@@ -692,8 +637,6 @@ fop_writev_cbk_stub(call_frame_t *frame, fop_writev_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_WRITE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -709,8 +652,6 @@ fop_flush_stub(call_frame_t *frame, fop_flush_t fn, fd_t *fd, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FLUSH);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -726,8 +667,6 @@ fop_flush_cbk_stub(call_frame_t *frame, fop_flush_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FLUSH);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -743,8 +682,6 @@ fop_fsync_stub(call_frame_t *frame, fop_fsync_t fn, fd_t *fd, int32_t datasync,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FSYNC);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -761,8 +698,6 @@ fop_fsync_cbk_stub(call_frame_t *frame, fop_fsync_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSYNC);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -779,7 +714,6 @@ fop_opendir_stub(call_frame_t *frame, fop_opendir_t fn, loc_t *loc, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_OPENDIR);
@@ -797,8 +731,6 @@ fop_opendir_cbk_stub(call_frame_t *frame, fop_opendir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_OPENDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -814,8 +746,6 @@ fop_fsyncdir_stub(call_frame_t *frame, fop_fsyncdir_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FSYNCDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -831,8 +761,6 @@ fop_fsyncdir_cbk_stub(call_frame_t *frame, fop_fsyncdir_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSYNCDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -847,7 +775,6 @@ fop_statfs_stub(call_frame_t *frame, fop_statfs_t fn, loc_t *loc, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_STATFS);
@@ -865,8 +792,6 @@ fop_statfs_cbk_stub(call_frame_t *frame, fop_statfs_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_STATFS);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -882,7 +807,6 @@ fop_setxattr_stub(call_frame_t *frame, fop_setxattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_SETXATTR);
@@ -900,8 +824,6 @@ fop_setxattr_cbk_stub(call_frame_t *frame, fop_setxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -917,7 +839,6 @@ fop_getxattr_stub(call_frame_t *frame, fop_getxattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_GETXATTR);
@@ -936,8 +857,6 @@ fop_getxattr_cbk_stub(call_frame_t *frame, fop_getxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_GETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -953,7 +872,6 @@ fop_fsetxattr_stub(call_frame_t *frame, fop_fsetxattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
stub = stub_new(frame, 1, GF_FOP_FSETXATTR);
@@ -971,8 +889,6 @@ fop_fsetxattr_cbk_stub(call_frame_t *frame, fop_fsetxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -988,7 +904,6 @@ fop_fgetxattr_stub(call_frame_t *frame, fop_fgetxattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
stub = stub_new(frame, 1, GF_FOP_FGETXATTR);
@@ -1007,8 +922,6 @@ fop_fgetxattr_cbk_stub(call_frame_t *frame, fop_fgetxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_GETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1024,7 +937,6 @@ fop_removexattr_stub(call_frame_t *frame, fop_removexattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
GF_VALIDATE_OR_GOTO("call-stub", name, out);
@@ -1043,8 +955,6 @@ fop_removexattr_cbk_stub(call_frame_t *frame, fop_removexattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_REMOVEXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1060,7 +970,6 @@ fop_fremovexattr_stub(call_frame_t *frame, fop_fremovexattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
GF_VALIDATE_OR_GOTO("call-stub", name, out);
@@ -1079,8 +988,6 @@ fop_fremovexattr_cbk_stub(call_frame_t *frame, fop_fremovexattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FREMOVEXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1096,7 +1003,6 @@ fop_lk_stub(call_frame_t *frame, fop_lk_t fn, fd_t *fd, int32_t cmd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", lock, out);
stub = stub_new(frame, 1, GF_FOP_LK);
@@ -1114,8 +1020,6 @@ fop_lk_cbk_stub(call_frame_t *frame, fop_lk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1131,7 +1035,6 @@ fop_inodelk_stub(call_frame_t *frame, fop_inodelk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", lock, out);
stub = stub_new(frame, 1, GF_FOP_INODELK);
@@ -1149,8 +1052,6 @@ fop_inodelk_cbk_stub(call_frame_t *frame, fop_inodelk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_INODELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1166,7 +1067,6 @@ fop_finodelk_stub(call_frame_t *frame, fop_finodelk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", lock, out);
stub = stub_new(frame, 1, GF_FOP_FINODELK);
@@ -1185,8 +1085,6 @@ fop_finodelk_cbk_stub(call_frame_t *frame, fop_inodelk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FINODELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1203,8 +1101,6 @@ fop_entrylk_stub(call_frame_t *frame, fop_entrylk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_ENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1221,8 +1117,6 @@ fop_entrylk_cbk_stub(call_frame_t *frame, fop_entrylk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1239,8 +1133,6 @@ fop_fentrylk_stub(call_frame_t *frame, fop_fentrylk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1256,8 +1148,6 @@ fop_fentrylk_cbk_stub(call_frame_t *frame, fop_fentrylk_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1274,8 +1164,6 @@ fop_readdirp_cbk_stub(call_frame_t *frame, fop_readdirp_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READDIRP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1291,8 +1179,6 @@ fop_readdir_cbk_stub(call_frame_t *frame, fop_readdir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1338,7 +1224,6 @@ fop_rchecksum_stub(call_frame_t *frame, fop_rchecksum_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
stub = stub_new(frame, 1, GF_FOP_RCHECKSUM);
@@ -1357,8 +1242,6 @@ fop_rchecksum_cbk_stub(call_frame_t *frame, fop_rchecksum_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_RCHECKSUM);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1375,8 +1258,6 @@ fop_xattrop_cbk_stub(call_frame_t *frame, fop_xattrop_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_XATTROP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1392,7 +1273,6 @@ fop_fxattrop_cbk_stub(call_frame_t *frame, fop_fxattrop_cbk_t fn,
dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
stub = stub_new(frame, 0, GF_FOP_FXATTROP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1409,7 +1289,6 @@ fop_xattrop_stub(call_frame_t *frame, fop_xattrop_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", xattr, out);
stub = stub_new(frame, 1, GF_FOP_XATTROP);
@@ -1427,7 +1306,6 @@ fop_fxattrop_stub(call_frame_t *frame, fop_fxattrop_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", xattr, out);
stub = stub_new(frame, 1, GF_FOP_FXATTROP);
@@ -1446,8 +1324,6 @@ fop_setattr_cbk_stub(call_frame_t *frame, fop_setattr_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SETATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1465,8 +1341,6 @@ fop_fsetattr_cbk_stub(call_frame_t *frame, fop_setattr_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSETATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1483,7 +1357,6 @@ fop_setattr_stub(call_frame_t *frame, fop_setattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_SETATTR);
@@ -1501,7 +1374,6 @@ fop_fsetattr_stub(call_frame_t *frame, fop_fsetattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_FSETATTR);
@@ -1520,8 +1392,6 @@ fop_fallocate_cbk_stub(call_frame_t *frame, fop_fallocate_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FALLOCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1539,7 +1409,6 @@ fop_fallocate_stub(call_frame_t *frame, fop_fallocate_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_FALLOCATE);
@@ -1558,8 +1427,6 @@ fop_discard_cbk_stub(call_frame_t *frame, fop_discard_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_DISCARD);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1577,7 +1444,6 @@ fop_discard_stub(call_frame_t *frame, fop_discard_t fn, fd_t *fd, off_t offset,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_DISCARD);
@@ -1596,8 +1462,6 @@ fop_zerofill_cbk_stub(call_frame_t *frame, fop_zerofill_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ZEROFILL);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1615,7 +1479,6 @@ fop_zerofill_stub(call_frame_t *frame, fop_zerofill_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_ZEROFILL);
@@ -1633,8 +1496,6 @@ fop_ipc_cbk_stub(call_frame_t *frame, fop_ipc_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_IPC);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1650,7 +1511,6 @@ fop_ipc_stub(call_frame_t *frame, fop_ipc_t fn, int32_t op, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_IPC);
@@ -1668,8 +1528,6 @@ fop_lease_cbk_stub(call_frame_t *frame, fop_lease_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LEASE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1685,7 +1543,6 @@ fop_lease_stub(call_frame_t *frame, fop_lease_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
GF_VALIDATE_OR_GOTO("call-stub", lease, out);
@@ -1704,8 +1561,6 @@ fop_seek_cbk_stub(call_frame_t *frame, fop_seek_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SEEK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1722,7 +1577,6 @@ fop_seek_stub(call_frame_t *frame, fop_seek_t fn, fd_t *fd, off_t offset,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_SEEK);
@@ -1741,8 +1595,6 @@ fop_getactivelk_cbk_stub(call_frame_t *frame, fop_getactivelk_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_GETACTIVELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1759,7 +1611,6 @@ fop_getactivelk_stub(call_frame_t *frame, fop_getactivelk_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_GETACTIVELK);
@@ -1781,8 +1632,6 @@ fop_setactivelk_cbk_stub(call_frame_t *frame, fop_setactivelk_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SETACTIVELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1803,7 +1652,6 @@ fop_setactivelk_stub(call_frame_t *frame, fop_setactivelk_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_SETACTIVELK);
@@ -1825,7 +1673,6 @@ fop_copy_file_range_stub(call_frame_t *frame, fop_copy_file_range_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_COPY_FILE_RANGE);
@@ -1848,7 +1695,6 @@ fop_copy_file_range_cbk_stub(call_frame_t *frame, fop_copy_file_range_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 0, GF_FOP_COPY_FILE_RANGE);
@@ -1869,7 +1715,6 @@ fop_put_stub(call_frame_t *frame, fop_put_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", vector, out);
stub = stub_new(frame, 1, GF_FOP_PUT);
@@ -1889,8 +1734,6 @@ fop_put_cbk_stub(call_frame_t *frame, fop_put_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_PUT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1907,7 +1750,6 @@ fop_icreate_stub(call_frame_t *frame, fop_icreate_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_ICREATE);
@@ -1947,8 +1789,6 @@ fop_icreate_cbk_stub(call_frame_t *frame, fop_icreate_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ICREATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1966,7 +1806,6 @@ fop_namelink_stub(call_frame_t *frame, fop_namelink_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_NAMELINK);
@@ -2006,8 +1845,6 @@ fop_namelink_cbk_stub(call_frame_t *frame, fop_namelink_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_NAMELINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index e875c8b6b69..9d377c3c2e1 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -13,7 +13,6 @@
#include "glusterfs/statedump.h"
#include "glusterfs/client_t.h"
#include "glusterfs/list.h"
-#include "rpcsvc.h"
#include "glusterfs/libglusterfs-messages.h"
static int
@@ -110,50 +109,13 @@ gf_clienttable_alloc(void)
return clienttable;
}
-void
-gf_client_clienttable_destroy(clienttable_t *clienttable)
-{
- client_t *client = NULL;
- cliententry_t *cliententries = NULL;
- uint32_t client_count = 0;
- int32_t i = 0;
-
- if (!clienttable) {
- gf_msg_callingfn("client_t", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "!clienttable");
- return;
- }
-
- LOCK(&clienttable->lock);
- {
- client_count = clienttable->max_clients;
- clienttable->max_clients = 0;
- cliententries = clienttable->cliententries;
- clienttable->cliententries = NULL;
- }
- UNLOCK(&clienttable->lock);
-
- if (cliententries != NULL) {
- for (i = 0; i < client_count; i++) {
- client = cliententries[i].client;
- if (client != NULL) {
- gf_client_unref(client);
- }
- }
-
- GF_FREE(cliententries);
- LOCK_DESTROY(&clienttable->lock);
- GF_FREE(clienttable);
- }
-}
-
/*
* Increments ref.bind if the client is already present or creates a new
* client with ref.bind = 1,ref.count = 1 it signifies that
* as long as ref.bind is > 0 client should be alive.
*/
client_t *
-gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
+gf_client_get(xlator_t *this, client_auth_data_t *cred, char *client_uid,
char *subdir_mount)
{
client_t *client = NULL;
@@ -181,11 +143,10 @@ gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
* if auth was used, matching auth flavour and data
*/
if (strcmp(client_uid, client->client_uid) == 0 &&
- (cred->flavour != AUTH_NONE &&
- (cred->flavour == client->auth.flavour &&
- (size_t)cred->datalen == client->auth.len &&
- memcmp(cred->authdata, client->auth.data, client->auth.len) ==
- 0))) {
+ (cred->flavour && (cred->flavour == client->auth.flavour &&
+ (size_t)cred->datalen == client->auth.len &&
+ memcmp(cred->authdata, client->auth.data,
+ client->auth.len) == 0))) {
GF_ATOMIC_INC(client->bind);
goto unlock;
}
@@ -227,7 +188,7 @@ gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
GF_ATOMIC_INIT(client->fd_cnt, 0);
client->auth.flavour = cred->flavour;
- if (cred->flavour != AUTH_NONE) {
+ if (cred->flavour) {
client->auth.data = GF_MALLOC(cred->datalen, gf_common_mt_client_t);
if (client->auth.data == NULL) {
GF_FREE(client->scratch_ctx.ctx);
@@ -353,8 +314,6 @@ client_destroy(client_t *client)
clienttable = client->this->ctx->clienttable;
- LOCK_DESTROY(&client->scratch_ctx.lock);
-
LOCK(&clienttable->lock);
{
clienttable->cliententries[client->tbl_index].client = NULL;
@@ -372,6 +331,8 @@ client_destroy(client_t *client)
if (client->subdir_inode)
inode_unref(client->subdir_inode);
+ LOCK_DESTROY(&client->scratch_ctx.lock);
+
GF_FREE(client->auth.data);
GF_FREE(client->auth.username);
GF_FREE(client->auth.passwd);
@@ -581,62 +542,6 @@ client_ctx_del(client_t *client, void *key, void **value)
}
void
-client_dump(client_t *client, char *prefix)
-{
- if (!client)
- return;
-
- gf_proc_dump_write("refcount", "%" GF_PRI_ATOMIC,
- GF_ATOMIC_GET(client->count));
-}
-
-void
-cliententry_dump(cliententry_t *cliententry, char *prefix)
-{
- if (!cliententry)
- return;
-
- if (GF_CLIENTENTRY_ALLOCATED != cliententry->next_free)
- return;
-
- if (cliententry->client)
- client_dump(cliententry->client, prefix);
-}
-
-void
-clienttable_dump(clienttable_t *clienttable, char *prefix)
-{
- int i = 0;
- int ret = -1;
- char key[GF_DUMP_MAX_BUF_LEN] = {0};
-
- if (!clienttable)
- return;
-
- ret = TRY_LOCK(&clienttable->lock);
- {
- if (ret) {
- gf_msg("client_t", GF_LOG_WARNING, 0, LG_MSG_LOCK_FAILED,
- "Unable to acquire lock");
- return;
- }
- gf_proc_dump_build_key(key, prefix, "maxclients");
- gf_proc_dump_write(key, "%d", clienttable->max_clients);
- gf_proc_dump_build_key(key, prefix, "first_free");
- gf_proc_dump_write(key, "%d", clienttable->first_free);
- for (i = 0; i < clienttable->max_clients; i++) {
- if (GF_CLIENTENTRY_ALLOCATED ==
- clienttable->cliententries[i].next_free) {
- gf_proc_dump_build_key(key, prefix, "cliententry[%d]", i);
- gf_proc_dump_add_section("%s", key);
- cliententry_dump(&clienttable->cliententries[i], key);
- }
- }
- }
- UNLOCK(&clienttable->lock);
-}
-
-void
client_ctx_dump(client_t *client, char *prefix)
{
#if 0 /* TBD, FIXME */
diff --git a/libglusterfs/src/common-utils.c b/libglusterfs/src/common-utils.c
index 8e2ffa3accd..682cbf28055 100644
--- a/libglusterfs/src/common-utils.c
+++ b/libglusterfs/src/common-utils.c
@@ -37,6 +37,9 @@
#ifndef GF_LINUX_HOST_OS
#include <sys/resource.h>
#endif
+#ifdef HAVE_SYNCFS_SYS
+#include <sys/syscall.h>
+#endif
#include "glusterfs/compat-errno.h"
#include "glusterfs/common-utils.h"
@@ -45,13 +48,12 @@
#include "glusterfs/stack.h"
#include "glusterfs/lkowner.h"
#include "glusterfs/syscall.h"
-#include "cli1-xdr.h"
#include "glusterfs/globals.h"
#define XXH_INLINE_ALL
#include "xxhash.h"
#include <ifaddrs.h>
#include "glusterfs/libglusterfs-messages.h"
-#include "protocol-common.h"
+#include "glusterfs/glusterfs-acl.h"
#ifdef __FreeBSD__
#include <pthread_np.h>
#undef BIT_SET
@@ -77,6 +79,15 @@ char *vol_type_str[] = {
typedef int32_t (*rw_op_t)(int32_t fd, char *buf, int32_t size);
typedef int32_t (*rwv_op_t)(int32_t fd, const struct iovec *buf, int32_t size);
+char *xattrs_to_heal[] = {"user.",
+ POSIX_ACL_ACCESS_XATTR,
+ POSIX_ACL_DEFAULT_XATTR,
+ QUOTA_LIMIT_KEY,
+ QUOTA_LIMIT_OBJECTS_KEY,
+ GF_SELINUX_XATTR_KEY,
+ GF_XATTR_MDATA_KEY,
+ NULL};
+
void
gf_xxh64_wrapper(const unsigned char *data, size_t const len,
unsigned long long const seed, char *xxh64)
@@ -304,8 +315,7 @@ mkdir_p(char *path, mode_t mode, gf_boolean_t allow_symlinks)
dir[i] = '\0';
ret = sys_mkdir(dir, mode);
if (ret && errno != EEXIST) {
- gf_msg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
- "Failed due to reason");
+ gf_smsg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED, NULL);
goto out;
}
@@ -316,10 +326,8 @@ mkdir_p(char *path, mode_t mode, gf_boolean_t allow_symlinks)
if (S_ISLNK(stbuf.st_mode)) {
ret = -1;
- gf_msg("", GF_LOG_ERROR, 0, LG_MSG_DIR_IS_SYMLINK,
- "%s is a "
- "symlink",
- dir);
+ gf_smsg("", GF_LOG_ERROR, 0, LG_MSG_DIR_IS_SYMLINK, "dir=%s",
+ dir, NULL);
goto out;
}
}
@@ -332,10 +340,10 @@ mkdir_p(char *path, mode_t mode, gf_boolean_t allow_symlinks)
if (ret == 0)
errno = 0;
ret = -1;
- gf_msg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
- "Failed"
- " to create directory, possibly some of the components"
- " were not directories");
+ gf_smsg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
+ "possibly some of the components"
+ " were not directories",
+ NULL);
goto out;
}
@@ -408,10 +416,8 @@ gf_rev_dns_lookup(const char *ip)
/* Get the FQDN */
ret = gf_get_hostname_from_ip((char *)ip, &fqdn);
if (ret != 0) {
- gf_msg("resolver", GF_LOG_INFO, errno, LG_MSG_RESOLVE_HOSTNAME_FAILED,
- "could not resolve "
- "hostname for %s",
- ip);
+ gf_smsg("resolver", GF_LOG_INFO, errno, LG_MSG_RESOLVE_HOSTNAME_FAILED,
+ "hostname=%s", ip, NULL);
}
out:
return fqdn;
@@ -432,7 +438,7 @@ gf_resolve_path_parent(const char *path)
GF_VALIDATE_OR_GOTO(THIS->name, path, out);
- if (strlen(path) <= 0) {
+ if (0 == strlen(path)) {
gf_msg_callingfn(THIS->name, GF_LOG_DEBUG, 0, LG_MSG_INVALID_STRING,
"invalid string for 'path'");
goto out;
@@ -500,9 +506,8 @@ gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
}
if ((ret = getaddrinfo(hostname, port_str, &hints, &cache->first)) !=
0) {
- gf_msg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
- "getaddrinfo failed (family:%d) (%s)", family,
- gai_strerror(ret));
+ gf_smsg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
+ "family=%d", family, "ret=%s", gai_strerror(ret), NULL);
GF_FREE(*dnscache);
*dnscache = NULL;
@@ -519,10 +524,8 @@ gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
cache->next->ai_addrlen, host, sizeof(host), service,
sizeof(service), NI_NUMERICHOST);
if (ret != 0) {
- gf_msg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "getnameinfo failed"
- " (%s)",
- gai_strerror(ret));
+ gf_smsg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto err;
}
@@ -541,10 +544,8 @@ gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
cache->next->ai_addrlen, host, sizeof(host), service,
sizeof(service), NI_NUMERICHOST);
if (ret != 0) {
- gf_msg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "getnameinfo failed"
- " (%s)",
- gai_strerror(ret));
+ gf_smsg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto err;
}
@@ -576,8 +577,14 @@ struct dnscache *
gf_dnscache_init(time_t ttl)
{
struct dnscache *cache = GF_MALLOC(sizeof(*cache), gf_common_mt_dnscache);
- if (cache) {
- cache->cache_dict = NULL;
+ if (!cache)
+ return NULL;
+
+ cache->cache_dict = dict_new();
+ if (!cache->cache_dict) {
+ GF_FREE(cache);
+ cache = NULL;
+ } else {
cache->ttl = ttl;
}
@@ -585,6 +592,20 @@ gf_dnscache_init(time_t ttl)
}
/**
+ * gf_dnscache_deinit -- cleanup resources used by struct dnscache
+ */
+void
+gf_dnscache_deinit(struct dnscache *cache)
+{
+ if (!cache) {
+ gf_msg_plain(GF_LOG_WARNING, "dnscache is NULL");
+ return;
+ }
+ dict_unref(cache->cache_dict);
+ GF_FREE(cache);
+}
+
+/**
* gf_dnscache_entry_init -- Initialize a dnscache entry
*
* @return: SUCCESS: Pointer to an allocated dnscache entry struct
@@ -632,12 +653,6 @@ gf_rev_dns_lookup_cached(const char *ip, struct dnscache *dnscache)
if (!dnscache)
goto out;
- if (!dnscache->cache_dict) {
- dnscache->cache_dict = dict_new();
- if (!dnscache->cache_dict) {
- goto out;
- }
- }
cache = dnscache->cache_dict;
/* Quick cache lookup to see if we already hold it */
@@ -645,7 +660,7 @@ gf_rev_dns_lookup_cached(const char *ip, struct dnscache *dnscache)
if (entrydata) {
dnsentry = (struct dnscache_entry *)entrydata->data;
/* First check the TTL & timestamp */
- if (time(NULL) - dnsentry->timestamp > dnscache->ttl) {
+ if (gf_time() - dnsentry->timestamp > dnscache->ttl) {
gf_dnscache_entry_deinit(dnsentry);
entrydata->data = NULL; /* Mark this as 'null' so
* dict_del () doesn't try free
@@ -676,23 +691,16 @@ gf_rev_dns_lookup_cached(const char *ip, struct dnscache *dnscache)
from_cache = _gf_false;
out:
/* Insert into the cache */
- if (fqdn && !from_cache) {
+ if (fqdn && !from_cache && ip) {
struct dnscache_entry *entry = gf_dnscache_entry_init();
- if (!entry) {
- goto out;
+ if (entry) {
+ entry->fqdn = fqdn;
+ entry->ip = gf_strdup(ip);
+ entry->timestamp = gf_time();
+ entrydata = bin_to_data(entry, sizeof(*entry));
+ dict_set(cache, (char *)ip, entrydata);
}
- entry->fqdn = fqdn;
- if (!ip) {
- gf_dnscache_entry_deinit(entry);
- goto out;
- }
-
- entry->ip = gf_strdup(ip);
- entry->timestamp = time(NULL);
-
- entrydata = bin_to_data(entry, sizeof(*entry));
- dict_set(cache, (char *)ip, entrydata);
}
return fqdn;
}
@@ -901,7 +909,7 @@ gf_print_trace(int32_t signum, glusterfs_ctx_t *ctx)
char msg[1024] = {
0,
};
- char timestr[64] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
call_stack_t *stack = NULL;
@@ -941,7 +949,7 @@ gf_print_trace(int32_t signum, glusterfs_ctx_t *ctx)
{
/* Dump the timestamp of the crash too, so the previous logs
can be related */
- gf_time_fmt(timestr, sizeof timestr, time(NULL), gf_timefmt_FT);
+ gf_time_fmt(timestr, sizeof timestr, gf_time(), gf_timefmt_FT);
gf_msg_plain_nomem(GF_LOG_ALERT, "time of crash: ");
gf_msg_plain_nomem(GF_LOG_ALERT, timestr);
}
@@ -1943,6 +1951,74 @@ gf_string2boolean(const char *str, gf_boolean_t *b)
}
int
+gf_strn2boolean(const char *str, const int len, gf_boolean_t *b)
+{
+ if (str == NULL) {
+ gf_msg_callingfn(THIS->name, GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+ "argument invalid");
+ return -1;
+ }
+
+ switch (len) {
+ case 1:
+ if (strcasecmp(str, "1") == 0) {
+ *b = _gf_true;
+ return 0;
+ } else if (strcasecmp(str, "0") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 2:
+ if (strcasecmp(str, "on") == 0) {
+ *b = _gf_true;
+ return 0;
+ } else if (strcasecmp(str, "no") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 3:
+ if (strcasecmp(str, "yes") == 0) {
+ *b = _gf_true;
+ return 0;
+ } else if (strcasecmp(str, "off") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 4:
+ if (strcasecmp(str, "true") == 0) {
+ *b = _gf_true;
+ return 0;
+ }
+ break;
+ case 5:
+ if (strcasecmp(str, "false") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 6:
+ if (strcasecmp(str, "enable") == 0) {
+ *b = _gf_true;
+ return 0;
+ }
+ break;
+ case 7:
+ if (strcasecmp(str, "disable") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ default:
+ return -1;
+ break;
+ }
+ return -1;
+}
+
+int
gf_lockfd(int fd)
{
struct gf_flock fl;
@@ -2037,8 +2113,8 @@ get_checksum_for_path(char *path, uint32_t *checksum, int op_version)
fd = open(path, O_RDWR);
if (fd == -1) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_PATH_ERROR,
- "Unable to open %s", path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_PATH_OPEN_FAILED,
+ "path=%s", path, NULL);
goto out;
}
@@ -2071,8 +2147,8 @@ get_file_mtime(const char *path, time_t *stamp)
ret = sys_stat(path, &f_stat);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
- "failed to stat %s", path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
+ "path=%s", path, NULL);
goto out;
}
@@ -2131,14 +2207,14 @@ gf_is_ip_in_net(const char *network, const char *ip_str)
/* Convert IP address to a long */
ret = inet_pton(family, ip_str, &ip_buf);
if (ret < 0)
- gf_msg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
- "inet_pton() failed");
+ gf_smsg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
+ NULL);
/* Convert network IP address to a long */
ret = inet_pton(family, net_ip, &net_ip_buf);
if (ret < 0) {
- gf_msg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
- "inet_pton() failed");
+ gf_smsg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
+ NULL);
goto out;
}
@@ -2718,8 +2794,8 @@ gf_boolean_t
gf_sock_union_equal_addr(union gf_sock_union *a, union gf_sock_union *b)
{
if (!a || !b) {
- gf_msg("common-utils", GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Invalid arguments to gf_sock_union_equal_addr");
+ gf_smsg("common-utils", GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
+ "gf_sock_union_equal_addr", NULL);
return _gf_false;
}
@@ -2947,8 +3023,8 @@ gf_roundup_power_of_two(int32_t nr)
int32_t result = 1;
if (nr < 0) {
- gf_msg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
- "negative number passed");
+ gf_smsg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
+ NULL);
result = -1;
goto out;
}
@@ -2971,8 +3047,8 @@ gf_roundup_next_power_of_two(int32_t nr)
int32_t result = 1;
if (nr < 0) {
- gf_msg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
- "negative number passed");
+ gf_smsg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
+ NULL);
result = -1;
goto out;
}
@@ -2985,16 +3061,6 @@ out:
}
int
-get_vol_type(int type, int dist_count, int brick_count)
-{
- if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
- (dist_count < brick_count))
- type = type + GF_CLUSTER_TYPE_MAX - 1;
-
- return type;
-}
-
-int
validate_brick_name(char *brick)
{
char *delimiter = NULL;
@@ -3067,7 +3133,7 @@ get_mem_size()
memsize = page_size * num_pages;
#endif
-#if defined GF_DARWIN_HOST_OS
+#if defined GF_DARWIN_HOST_OS || defined __FreeBSD__
size_t len = sizeof(memsize);
int name[] = {CTL_HW, HW_PHYSMEM};
@@ -3156,8 +3222,7 @@ gf_canonicalize_path(char *path)
out:
if (ret)
- gf_msg("common-utils", GF_LOG_ERROR, 0, LG_MSG_PATH_ERROR,
- "Path manipulation failed");
+ gf_smsg("common-utils", GF_LOG_ERROR, 0, LG_MSG_PATH_ERROR, NULL);
GF_FREE(tmppath);
@@ -3211,19 +3276,15 @@ gf_get_reserved_ports()
* continue with older method of using any of the available
* port? For now 2nd option is considered.
*/
- gf_msg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
- "could not open the file "
- "/proc/sys/net/ipv4/ip_local_reserved_ports for "
- "getting reserved ports info");
+ gf_smsg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
+ " /proc/sys/net/ipv4/ip_local_reserved_ports", NULL);
goto out;
}
ret = sys_read(proc_fd, buffer, sizeof(buffer) - 1);
if (ret < 0) {
- gf_msg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
- "could not read the file %s for"
- " getting reserved ports info",
- proc_file);
+ gf_smsg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
+ "file=%s", proc_file, NULL);
goto out;
}
@@ -3251,10 +3312,8 @@ gf_process_reserved_ports(unsigned char *ports, uint32_t ceiling)
ports_info = gf_get_reserved_ports();
if (!ports_info) {
- gf_msg("glusterfs", GF_LOG_WARNING, 0, LG_MSG_RESERVED_PORTS_ERROR,
- "Not able to get reserved"
- " ports, hence there is a possibility that glusterfs "
- "may consume reserved port");
+ gf_smsg("glusterfs", GF_LOG_WARNING, 0, LG_MSG_RESERVED_PORTS_ERROR,
+ NULL);
goto out;
}
@@ -3291,8 +3350,8 @@ gf_ports_reserved(char *blocked_port, unsigned char *ports, uint32_t ceiling)
blocked_port[strlen(blocked_port) - 1] = '\0';
if (gf_string2int32(blocked_port, &tmp_port1) == 0) {
if (tmp_port1 > GF_PORT_MAX || tmp_port1 < 0) {
- gf_msg("glusterfs-socket", GF_LOG_WARNING, 0,
- LG_MSG_INVALID_PORT, "invalid port %d", tmp_port1);
+ gf_smsg("glusterfs-socket", GF_LOG_WARNING, 0,
+ LG_MSG_INVALID_PORT, "port=%d", tmp_port1, NULL);
result = _gf_true;
goto out;
} else {
@@ -3303,10 +3362,8 @@ gf_ports_reserved(char *blocked_port, unsigned char *ports, uint32_t ceiling)
BIT_SET(ports, tmp_port1);
}
} else {
- gf_msg("glusterfs-socket", GF_LOG_WARNING, 0, LG_MSG_INVALID_PORT,
- "%s is not a valid port "
- "identifier",
- blocked_port);
+ gf_smsg("glusterfs-socket", GF_LOG_WARNING, 0, LG_MSG_INVALID_PORT,
+ "port=%s", blocked_port, NULL);
result = _gf_true;
goto out;
}
@@ -3408,10 +3465,8 @@ gf_get_hostname_from_ip(char *client_ip, char **hostname)
ret = getnameinfo(client_sockaddr, addr_sz, client_hostname,
sizeof(client_hostname), NULL, 0, 0);
if (ret) {
- gf_msg("common-utils", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "Could not lookup hostname "
- "of %s : %s",
- client_ip, gai_strerror(ret));
+ gf_smsg("common-utils", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ip=%s", client_ip, "ret=%s", gai_strerror(ret), NULL);
ret = -1;
goto out;
}
@@ -3440,8 +3495,8 @@ gf_interface_search(char *ip)
ret = getifaddrs(&ifaddr);
if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETIFADDRS_FAILED,
- "getifaddrs() failed: %s\n", gai_strerror(ret));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETIFADDRS_FAILED, "ret=%s",
+ gai_strerror(ret), NULL);
goto out;
}
@@ -3465,10 +3520,8 @@ gf_interface_search(char *ip)
host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "getnameinfo() "
- "failed: %s\n",
- gai_strerror(ret));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto out;
}
@@ -3518,14 +3571,12 @@ get_ip_from_addrinfo(struct addrinfo *addr, char **ip)
break;
default:
- gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY,
- "Invalid family");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY, NULL);
return NULL;
}
if (!inet_ntop(addr->ai_family, in_addr, buf, sizeof(buf))) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_CONVERSION_FAILED,
- "String conversion failed");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, LG_MSG_CONVERSION_FAILED, NULL);
return NULL;
}
@@ -3560,10 +3611,9 @@ gf_is_loopback_localhost(const struct sockaddr *sa, char *hostname)
default:
if (hostname)
- gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY,
- "unknown "
- "address family %d for %s",
- sa->sa_family, hostname);
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY,
+ "family=%d", sa->sa_family, "hostname=%s", hostname,
+ NULL);
break;
}
@@ -3593,8 +3643,8 @@ gf_is_local_addr(char *hostname)
ret = getaddrinfo(hostname, NULL, &hints, &result);
if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
- "error in getaddrinfo: %s\n", gai_strerror(ret));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto out;
}
@@ -3642,15 +3692,15 @@ gf_is_same_address(char *name1, char *name2)
gai_err = getaddrinfo(name1, NULL, &hints, &addr1);
if (gai_err != 0) {
- gf_msg(name1, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED,
- "error in getaddrinfo: %s\n", gai_strerror(gai_err));
+ gf_smsg(name1, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED, "error=%s",
+ gai_strerror(gai_err), NULL);
goto out;
}
gai_err = getaddrinfo(name2, NULL, &hints, &addr2);
if (gai_err != 0) {
- gf_msg(name2, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED,
- "error in getaddrinfo: %s\n", gai_strerror(gai_err));
+ gf_smsg(name2, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED, "error=%s",
+ gai_strerror(gai_err), NULL);
goto out;
}
@@ -3787,8 +3837,10 @@ gf_set_volfile_server_common(cmd_args_t *cmd_args, const char *host,
if ((!strcmp(tmp->volfile_server, server->volfile_server) &&
!strcmp(tmp->transport, server->transport) &&
(tmp->port == server->port))) {
- errno = EEXIST;
- ret = -1;
+ /* Duplicate option given, log and ignore */
+ gf_smsg("gluster", GF_LOG_INFO, EEXIST, LG_MSG_DUPLICATE_ENTRY,
+ NULL);
+ ret = 0;
goto out;
}
}
@@ -3965,15 +4017,14 @@ gf_thread_set_vname(pthread_t thread, const char *name, va_list args)
sizeof(thread_name) - sizeof(GF_THREAD_NAME_PREFIX) + 1,
name, args);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PTHREAD_NAMING_FAILED,
- "Failed to compose thread name ('%s')", name);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PTHREAD_NAMING_FAILED,
+ "name=%s", name, NULL);
return;
}
if (ret >= sizeof(thread_name)) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PTHREAD_NAMING_FAILED,
- "Thread name is too long. It has been truncated ('%s')",
- thread_name);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_THREAD_NAME_TOO_LONG,
+ "name=%s", thread_name, NULL);
}
#ifdef GF_LINUX_HOST_OS
@@ -3987,8 +4038,8 @@ gf_thread_set_vname(pthread_t thread, const char *name, va_list args)
ret = ENOSYS;
#endif
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_WARNING, ret, LG_MSG_PTHREAD_NAMING_FAILED,
- "Could not set thread name: %s", thread_name);
+ gf_smsg(THIS->name, GF_LOG_WARNING, ret, LG_MSG_SET_THREAD_FAILED,
+ "name=%s", thread_name, NULL);
}
}
@@ -4023,8 +4074,8 @@ gf_thread_vcreate(pthread_t *thread, const pthread_attr_t *attr,
ret = pthread_create(thread, attr, start_routine, arg);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_FAILED,
- "Thread creation failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_THREAD_CREATE_FAILED,
+ NULL);
ret = -1;
} else if (name != NULL) {
gf_thread_set_vname(*thread, name, args);
@@ -4060,8 +4111,8 @@ gf_thread_create_detached(pthread_t *thread, void *(*start_routine)(void *),
ret = pthread_attr_init(&attr);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_ATTR_INIT_FAILED,
- "Thread attribute initialization failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_ATTR_INIT_FAILED,
+ NULL);
return -1;
}
@@ -4083,8 +4134,7 @@ gf_skip_header_section(int fd, int header_len)
ret = sys_lseek(fd, header_len, SEEK_SET);
if (ret == (off_t)-1) {
- gf_msg("", GF_LOG_ERROR, 0, LG_MSG_SKIP_HEADER_FAILED,
- "Failed to skip header section");
+ gf_smsg("", GF_LOG_ERROR, 0, LG_MSG_SKIP_HEADER_FAILED, NULL);
} else {
ret = 0;
}
@@ -4097,6 +4147,14 @@ gf_skip_header_section(int fd, int header_len)
gf_boolean_t
gf_is_pid_running(int pid)
{
+#ifdef __FreeBSD__
+ int ret = -1;
+
+ ret = sys_kill(pid, 0);
+ if (ret < 0) {
+ return _gf_false;
+ }
+#else
char fname[32] = {
0,
};
@@ -4110,6 +4168,7 @@ gf_is_pid_running(int pid)
}
sys_close(fd);
+#endif
return _gf_true;
}
@@ -4134,8 +4193,8 @@ gf_is_service_running(char *pidfile, int *pid)
ret = fscanf(file, "%d", pid);
if (ret <= 0) {
- gf_msg("", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
- "Unable to read pidfile: %s", pidfile);
+ gf_smsg("", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED, "pidfile=%s",
+ pidfile, NULL);
*pid = -1;
running = _gf_false;
goto out;
@@ -4209,10 +4268,10 @@ gf_check_log_format(const char *value)
log_format = gf_logformat_withmsgid;
if (log_format == -1)
- gf_msg(
- THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
- "Invalid log-format. possible values are " GF_LOG_FORMAT_NO_MSG_ID
- "|" GF_LOG_FORMAT_WITH_MSG_ID);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
+ "possible_values=" GF_LOG_FORMAT_NO_MSG_ID
+ "|" GF_LOG_FORMAT_WITH_MSG_ID,
+ NULL);
return log_format;
}
@@ -4228,9 +4287,9 @@ gf_check_logger(const char *value)
logger = gf_logger_syslog;
if (logger == -1)
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
- "Invalid logger. possible values are " GF_LOGGER_GLUSTER_LOG
- "|" GF_LOGGER_SYSLOG);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
+ "possible_values=" GF_LOGGER_GLUSTER_LOG "|" GF_LOGGER_SYSLOG,
+ NULL);
return logger;
}
@@ -4302,8 +4361,8 @@ gf_set_timestamp(const char *src, const char *dest)
ret = sys_stat(src, &sb);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
- "stat on %s", src);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
+ "stat=%s", src, NULL);
goto out;
}
/* The granularity is nano seconds if `utimensat()` is available,
@@ -4319,8 +4378,8 @@ gf_set_timestamp(const char *src, const char *dest)
/* dirfd = 0 is ignored because `dest` is an absolute path. */
ret = sys_utimensat(AT_FDCWD, dest, new_time, AT_SYMLINK_NOFOLLOW);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMENSAT_FAILED,
- "utimensat on %s", dest);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMENSAT_FAILED,
+ "dest=%s", dest, NULL);
}
#else
new_time[0].tv_sec = sb.st_atime;
@@ -4331,8 +4390,8 @@ gf_set_timestamp(const char *src, const char *dest)
ret = sys_utimes(dest, new_time);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMES_FAILED,
- "utimes on %s", dest);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMES_FAILED,
+ "dest=%s", dest, NULL);
}
#endif
out:
@@ -4351,7 +4410,7 @@ gf_backtrace_end(char *buf, size_t frames)
frames = min(frames, GF_BACKTRACE_LEN - pos - 1);
- if (frames <= 0)
+ if (0 == frames)
return;
memset(buf + pos, ')', frames);
@@ -4397,8 +4456,8 @@ gf_backtrace_fillframes(char *buf)
*/
ret = sys_unlink(tmpl);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, LG_MSG_FILE_OP_FAILED,
- "Unable to delete temporary file: %s", tmpl);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, LG_MSG_FILE_DELETE_FAILED,
+ "temporary_file=%s", tmpl, NULL);
}
/*The most recent two frames are the calling function and
@@ -4458,8 +4517,7 @@ gf_backtrace_save(char *buf)
if ((0 == gf_backtrace_fillframes(bt)))
return bt;
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_BACKTRACE_SAVE_FAILED,
- "Failed to save the backtrace.");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_BACKTRACE_SAVE_FAILED, NULL);
return NULL;
}
@@ -4550,16 +4608,16 @@ gf_build_absolute_path(char *current_path, char *relative_path, char **path)
*/
currentpath_len = strlen(current_path);
if (current_path[0] != '/' || (currentpath_len > PATH_MAX)) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Wrong value for current path %s", current_path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_WRONG_VALUE,
+ "current-path=%s", current_path, NULL);
ret = -EINVAL;
goto err;
}
relativepath_len = strlen(relative_path);
if (relative_path[0] == '/' || (relativepath_len > PATH_MAX)) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Wrong value for relative path %s", relative_path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_WRONG_VALUE,
+ "relative-path=%s", relative_path, NULL);
ret = -EINVAL;
goto err;
}
@@ -4675,8 +4733,9 @@ recursive_rmdir(const char *delete_path)
goto out;
}
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
- while (entry) {
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
snprintf(path, PATH_MAX, "%s/%s", delete_path, entry->d_name);
ret = sys_lstat(path, &st);
if (ret == -1) {
@@ -4702,8 +4761,6 @@ recursive_rmdir(const char *delete_path)
gf_msg_debug(this->name, 0, "%s %s",
ret ? "Failed to remove" : "Removed", entry->d_name);
-
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
}
ret = sys_closedir(dir);
@@ -5027,8 +5084,8 @@ gf_zero_fill_stat(struct iatt *buf)
gf_boolean_t
gf_is_valid_xattr_namespace(char *key)
{
- static char *xattr_namespaces[] = {"trusted.", "security.", "system.",
- "user.", NULL};
+ static char *xattr_namespaces[] = {"trusted.", "system.", "user.",
+ "security.", NULL};
int i = 0;
for (i = 0; xattr_namespaces[i]; i++) {
@@ -5251,62 +5308,6 @@ glusterfs_compute_sha256(const unsigned char *content, size_t size,
return 0;
}
-char *
-get_struct_variable(int mem_num, gf_gsync_status_t *sts_val)
-{
- switch (mem_num) {
- case 0:
- return (sts_val->node);
- case 1:
- return (sts_val->master);
- case 2:
- return (sts_val->brick);
- case 3:
- return (sts_val->slave_user);
- case 4:
- return (sts_val->slave);
- case 5:
- return (sts_val->slave_node);
- case 6:
- return (sts_val->worker_status);
- case 7:
- return (sts_val->crawl_status);
- case 8:
- return (sts_val->last_synced);
- case 9:
- return (sts_val->entry);
- case 10:
- return (sts_val->data);
- case 11:
- return (sts_val->meta);
- case 12:
- return (sts_val->failures);
- case 13:
- return (sts_val->checkpoint_time);
- case 14:
- return (sts_val->checkpoint_completed);
- case 15:
- return (sts_val->checkpoint_completion_time);
- case 16:
- return (sts_val->brick_host_uuid);
- case 17:
- return (sts_val->last_synced_utc);
- case 18:
- return (sts_val->checkpoint_time_utc);
- case 19:
- return (sts_val->checkpoint_completion_time_utc);
- case 20:
- return (sts_val->slavekey);
- case 21:
- return (sts_val->session_slave);
- default:
- goto out;
- }
-
-out:
- return NULL;
-}
-
/* * Safe wrapper function for strncpy.
* This wrapper makes sure that when there is no null byte among the first n in
* source srting for strncpy function call, the string placed in dest will be
@@ -5421,3 +5422,44 @@ gf_d_type_from_ia_type(ia_type_t type)
return DT_UNKNOWN;
}
}
+
+int
+gf_nanosleep(uint64_t nsec)
+{
+ struct timespec req;
+ struct timespec rem;
+ int ret = -1;
+
+ req.tv_sec = nsec / GF_SEC_IN_NS;
+ req.tv_nsec = nsec % GF_SEC_IN_NS;
+
+ do {
+ ret = nanosleep(&req, &rem);
+ req = rem;
+ } while (ret == -1 && errno == EINTR);
+
+ return ret;
+}
+
+int
+gf_syncfs(int fd)
+{
+ int ret = 0;
+#if defined(HAVE_SYNCFS)
+ /* Linux with glibc recent enough. */
+ ret = syncfs(fd);
+#elif defined(HAVE_SYNCFS_SYS)
+ /* Linux with no library function. */
+ ret = syscall(SYS_syncfs, fd);
+#else
+ /* Fallback to generic UNIX stuff. */
+ sync();
+#endif
+ return ret;
+}
+
+char **
+get_xattrs_to_heal()
+{
+ return xattrs_to_heal;
+}
diff --git a/libglusterfs/src/compat.c b/libglusterfs/src/compat.c
index 877cda282de..8a05a30a8fe 100644
--- a/libglusterfs/src/compat.c
+++ b/libglusterfs/src/compat.c
@@ -176,7 +176,7 @@ solaris_xattr_resolve_path(const char *real_path, char **path)
if (!ret && export_path) {
strcat(export_path, "/" GF_SOLARIS_XATTR_DIR);
if (lstat(export_path, &statbuf)) {
- ret = mkdir(export_path, 0777);
+ ret = mkdir(export_path, 0755);
if (ret && (errno != EEXIST)) {
gf_msg_debug(THIS->name, 0,
"mkdir failed,"
diff --git a/libglusterfs/src/ctx.c b/libglusterfs/src/ctx.c
index 4a001c29209..3d890b04ec9 100644
--- a/libglusterfs/src/ctx.c
+++ b/libglusterfs/src/ctx.c
@@ -37,8 +37,12 @@ glusterfs_ctx_new()
ctx->log.loglevel = DEFAULT_LOG_LEVEL;
-#ifdef RUN_WITH_VALGRIND
- ctx->cmd_args.valgrind = _gf_true;
+#if defined(RUN_WITH_MEMCHECK)
+ ctx->cmd_args.vgtool = _gf_memcheck;
+#elif defined(RUN_WITH_DRD)
+ ctx->cmd_args.vgtool = _gf_drd;
+#else
+ ctx->cmd_args.vgtool = _gf_none;
#endif
/* lock is never destroyed! */
diff --git a/libglusterfs/src/defaults-tmpl.c b/libglusterfs/src/defaults-tmpl.c
index 82e7f78d7f3..3cf707f42aa 100644
--- a/libglusterfs/src/defaults-tmpl.c
+++ b/libglusterfs/src/defaults-tmpl.c
@@ -171,8 +171,11 @@ default_notify(xlator_t *this, int32_t event, void *data, ...)
/* Make sure this is not a daemon with master xlator */
pthread_mutex_lock(&graph->mutex);
{
- graph->used = 0;
- pthread_cond_broadcast(&graph->child_down_cond);
+ if (graph->parent_down ==
+ graph_total_client_xlator(graph)) {
+ graph->used = 0;
+ pthread_cond_broadcast(&graph->child_down_cond);
+ }
}
pthread_mutex_unlock(&graph->mutex);
}
diff --git a/libglusterfs/src/dict.c b/libglusterfs/src/dict.c
index b44dda33f00..1d9be9217a6 100644
--- a/libglusterfs/src/dict.c
+++ b/libglusterfs/src/dict.c
@@ -57,7 +57,6 @@ get_new_data()
GF_ATOMIC_INIT(data->refcount, 0);
data->is_static = _gf_false;
- LOCK_INIT(&data->lock);
return data;
}
@@ -98,6 +97,8 @@ get_new_dict_full(int size_hint)
}
}
+ dict->free_pair.key = NULL;
+ dict->totkvlen = 0;
LOCK_INIT(&dict->lock);
return dict;
@@ -293,8 +294,6 @@ void
data_destroy(data_t *data)
{
if (data) {
- LOCK_DESTROY(&data->lock);
-
if (!data->is_static)
GF_FREE(data->data);
@@ -325,7 +324,6 @@ data_copy(data_t *old)
}
newdata->data_type = old->data_type;
- LOCK_INIT(&newdata->lock);
return newdata;
err_out:
@@ -339,7 +337,7 @@ err_out:
* checked by callers.
*/
static data_pair_t *
-dict_lookup_common(dict_t *this, char *key, uint32_t hash)
+dict_lookup_common(const dict_t *this, const char *key, const uint32_t hash)
{
int hashval = 0;
data_pair_t *pair;
@@ -386,8 +384,8 @@ dict_lookup(dict_t *this, char *key, data_t **data)
}
static int32_t
-dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
- gf_boolean_t replace)
+dict_set_lk(dict_t *this, char *key, const int key_len, data_t *value,
+ const uint32_t hash, gf_boolean_t replace)
{
int hashval = 0;
data_pair_t *pair;
@@ -403,7 +401,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
key_free = 1;
key_hash = (uint32_t)XXH64(key, keylen, 0);
} else {
- keylen = strlen(key);
+ keylen = key_len;
key_hash = hash;
}
@@ -413,6 +411,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
if (pair) {
data_t *unref_data = pair->value;
pair->value = data_ref(value);
+ this->totkvlen += (value->len - unref_data->len);
data_unref(unref_data);
if (key_free)
GF_FREE(key);
@@ -421,16 +420,15 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
}
}
- if (this->free_pair_in_use) {
+ if (this->free_pair.key) { /* the free_pair is used */
pair = mem_get(THIS->ctx->dict_pair_pool);
if (!pair) {
if (key_free)
GF_FREE(key);
return -1;
}
- } else {
+ } else { /* assign the pair to the free pair */
pair = &this->free_pair;
- this->free_pair_in_use = _gf_true;
}
if (key_free) {
@@ -440,9 +438,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
} else {
pair->key = (char *)GF_MALLOC(keylen + 1, gf_common_mt_char);
if (!pair->key) {
- if (pair == &this->free_pair) {
- this->free_pair_in_use = _gf_false;
- } else {
+ if (pair != &this->free_pair) {
mem_put(pair);
}
return -1;
@@ -451,6 +447,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
}
pair->key_hash = key_hash;
pair->value = data_ref(value);
+ this->totkvlen += (keylen + 1 + value->len);
/* If the divisor is 1, the modulo is always 0,
* in such case avoid hash calculation.
@@ -500,12 +497,12 @@ dict_setn(dict_t *this, char *key, const int keylen, data_t *value)
}
if (key) {
- key_hash = (int32_t)XXH64(key, keylen, 0);
+ key_hash = (uint32_t)XXH64(key, keylen, 0);
}
LOCK(&this->lock);
- ret = dict_set_lk(this, key, value, key_hash, 1);
+ ret = dict_set_lk(this, key, keylen, value, key_hash, 1);
UNLOCK(&this->lock);
@@ -539,7 +536,7 @@ dict_addn(dict_t *this, char *key, const int keylen, data_t *value)
LOCK(&this->lock);
- ret = dict_set_lk(this, key, value, key_hash, 0);
+ ret = dict_set_lk(this, key, keylen, value, key_hash, 0);
UNLOCK(&this->lock);
@@ -648,6 +645,7 @@ dict_deln(dict_t *this, char *key, const int keylen)
else
this->members[hashval] = pair->hash_next;
+ this->totkvlen -= pair->value->len;
data_unref(pair->value);
if (pair->prev)
@@ -658,9 +656,10 @@ dict_deln(dict_t *this, char *key, const int keylen)
if (pair->next)
pair->next->prev = pair->prev;
+ this->totkvlen -= (strlen(pair->key) + 1);
GF_FREE(pair->key);
if (pair == &this->free_pair) {
- this->free_pair_in_use = _gf_false;
+ this->free_pair.key = NULL;
} else {
mem_put(pair);
}
@@ -700,16 +699,18 @@ dict_destroy(dict_t *this)
GF_FREE(prev->key);
if (prev != &this->free_pair) {
mem_put(prev);
+ } else {
+ this->free_pair.key = NULL;
}
total_pairs++;
prev = pair;
}
+ this->totkvlen = 0;
if (this->members != &this->members_internal) {
mem_put(this->members);
}
- GF_FREE(this->extra_free);
free(this->extra_stdfree);
/* update 'ctx->stats.dict.details' using max_count */
@@ -808,6 +809,7 @@ int_to_data(int64_t value)
data->len = gf_asprintf(&data->data, "%" PRId64, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
data->len++; /* account for terminating NULL */
@@ -827,6 +829,7 @@ data_from_int64(int64_t value)
data->len = gf_asprintf(&data->data, "%" PRId64, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
data->len++; /* account for terminating NULL */
@@ -846,6 +849,7 @@ data_from_int32(int32_t value)
data->len = gf_asprintf(&data->data, "%" PRId32, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -866,6 +870,7 @@ data_from_int16(int16_t value)
data->len = gf_asprintf(&data->data, "%" PRId16, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -886,6 +891,7 @@ data_from_int8(int8_t value)
data->len = gf_asprintf(&data->data, "%d", value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -906,6 +912,7 @@ data_from_uint64(uint64_t value)
data->len = gf_asprintf(&data->data, "%" PRIu64, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -926,6 +933,8 @@ data_from_double(double value)
data->len = gf_asprintf(&data->data, "%f", value);
if (data->len == -1) {
+ gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
data->len++; /* account for terminating NULL */
@@ -945,6 +954,7 @@ data_from_uint32(uint32_t value)
data->len = gf_asprintf(&data->data, "%" PRIu32, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -964,6 +974,8 @@ data_from_uint16(uint16_t value)
}
data->len = gf_asprintf(&data->data, "%" PRIu16, value);
if (-1 == data->len) {
+ gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -1099,117 +1111,146 @@ data_to_int64(data_t *data)
{
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
- return (int64_t)strtoull(data->data, NULL, 0);
+ char *endptr = NULL;
+ int64_t value = 0;
+
+ errno = 0;
+ value = strtoll(data->data, &endptr, 0);
+
+ if (endptr && *endptr != '\0')
+ /* Unrecognized characters at the end of string. */
+ errno = EINVAL;
+ if (errno) {
+ gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
+ LG_MSG_DATA_CONVERSION_ERROR,
+ "Error in data conversion: '%s' can't "
+ "be represented as int64_t",
+ data->data);
+ return -1;
+ }
+ return value;
}
+/* Like above but implies signed range check. */
+
+#define DATA_TO_RANGED_SIGNED(endptr, value, data, type, min, max) \
+ do { \
+ errno = 0; \
+ value = strtoll(data->data, &endptr, 0); \
+ if (endptr && *endptr != '\0') \
+ errno = EINVAL; \
+ if (errno || value > max || value < min) { \
+ gf_msg_callingfn("dict", GF_LOG_WARNING, errno, \
+ LG_MSG_DATA_CONVERSION_ERROR, \
+ "Error in data conversion: '%s' can't " \
+ "be represented as " #type, \
+ data->data); \
+ return -1; \
+ } \
+ return (type)value; \
+ } while (0)
+
int32_t
data_to_int32(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ char *endptr = NULL;
+ int64_t value = 0;
- return strtoul(data->data, NULL, 0);
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ DATA_TO_RANGED_SIGNED(endptr, value, data, int32_t, INT_MIN, INT_MAX);
}
int16_t
data_to_int16(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
-
- int16_t value = 0;
-
- errno = 0;
- value = strtol(data->data, NULL, 0);
+ char *endptr = NULL;
+ int64_t value = 0;
- if ((value > SHRT_MAX) || (value < SHRT_MIN)) {
- errno = ERANGE;
- gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
- LG_MSG_DATA_CONVERSION_ERROR,
- "Error in data"
- " conversion: detected overflow");
- return -1;
- }
-
- return (int16_t)value;
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ DATA_TO_RANGED_SIGNED(endptr, value, data, int16_t, SHRT_MIN, SHRT_MAX);
}
int8_t
data_to_int8(data_t *data)
{
+ char *endptr = NULL;
+ int64_t value = 0;
+
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ DATA_TO_RANGED_SIGNED(endptr, value, data, int8_t, CHAR_MIN, CHAR_MAX);
+}
+
+uint64_t
+data_to_uint64(data_t *data)
+{
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
- int8_t value = 0;
+ char *endptr = NULL;
+ uint64_t value = 0;
errno = 0;
- value = strtol(data->data, NULL, 0);
+ value = strtoull(data->data, &endptr, 0);
- if ((value > SCHAR_MAX) || (value < SCHAR_MIN)) {
- errno = ERANGE;
+ if (endptr && *endptr != '\0')
+ errno = EINVAL;
+ if (errno) {
gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
LG_MSG_DATA_CONVERSION_ERROR,
- "Error in data"
- " conversion: detected overflow");
+ "Error in data conversion: '%s' can't "
+ "be represented as uint64_t",
+ data->data);
return -1;
}
-
- return (int8_t)value;
+ return value;
}
-uint64_t
-data_to_uint64(data_t *data)
-{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+/* Like above but implies unsigned range check. */
- return strtoll(data->data, NULL, 0);
-}
+#define DATA_TO_RANGED_UNSIGNED(endptr, value, data, type, max) \
+ do { \
+ errno = 0; \
+ value = strtoull(data->data, &endptr, 0); \
+ if (endptr && *endptr != '\0') \
+ errno = EINVAL; \
+ if (errno || value > max) { \
+ gf_msg_callingfn("dict", GF_LOG_WARNING, errno, \
+ LG_MSG_DATA_CONVERSION_ERROR, \
+ "Error in data conversion: '%s' can't " \
+ "be represented as " #type, \
+ data->data); \
+ return -1; \
+ } \
+ return (type)value; \
+ } while (0)
uint32_t
data_to_uint32(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ char *endptr = NULL;
+ uint64_t value = 0;
- return strtol(data->data, NULL, 0);
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ DATA_TO_RANGED_UNSIGNED(endptr, value, data, uint32_t, UINT_MAX);
}
uint16_t
data_to_uint16(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
-
- uint16_t value = 0;
-
- errno = 0;
- value = strtol(data->data, NULL, 0);
-
- if ((USHRT_MAX - value) < 0) {
- errno = ERANGE;
- gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
- LG_MSG_DATA_CONVERSION_ERROR,
- "Error in data conversion: "
- "overflow detected");
- return -1;
- }
+ char *endptr = NULL;
+ uint64_t value = 0;
- return (uint16_t)value;
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ DATA_TO_RANGED_UNSIGNED(endptr, value, data, uint16_t, USHRT_MAX);
}
uint8_t
data_to_uint8(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
-
- errno = 0;
- uint32_t value = strtol(data->data, NULL, 0);
-
- if ((UCHAR_MAX - (uint8_t)value) < 0) {
- errno = ERANGE;
- gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
- LG_MSG_DATA_CONVERSION_ERROR,
- "data "
- "conversion overflow detected");
- return -1;
- }
+ char *endptr = NULL;
+ uint64_t value = 0;
- return (uint8_t)value;
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ DATA_TO_RANGED_UNSIGNED(endptr, value, data, uint8_t, UCHAR_MAX);
}
char *
@@ -1243,8 +1284,8 @@ data_to_iatt(data_t *data, char *key)
* pass more data but are backward compatible (if the initial contents
* of the struct are maintained, of course). */
if (data->len < sizeof(struct iatt)) {
- gf_msg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
- "data value for '%s' is smaller than expected", key);
+ gf_smsg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
+ "key=%s", key, NULL);
return NULL;
}
@@ -1261,8 +1302,8 @@ int
dict_remove_foreach_fn(dict_t *d, char *k, data_t *v, void *_tmp)
{
if (!d || !k) {
- gf_msg("glusterfs", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ENTRY,
- "%s is NULL", d ? "key" : "dictionary");
+ gf_smsg("glusterfs", GF_LOG_WARNING, EINVAL, LG_MSG_KEY_OR_VALUE_NULL,
+ "d=%s", d ? "key" : "dictionary", NULL);
return -1;
}
@@ -1454,32 +1495,13 @@ fail:
* -val error, val = errno
*/
-int
-dict_get_with_ref(dict_t *this, char *key, data_t **data)
-{
- if (!this || !key || !data) {
- gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "dict OR key (%s) is NULL", key);
- return -EINVAL;
- }
-
- return dict_get_with_refn(this, key, strlen(key), data);
-}
-
-int
+static int
dict_get_with_refn(dict_t *this, char *key, const int keylen, data_t **data)
{
data_pair_t *pair = NULL;
int ret = -ENOENT;
uint32_t hash;
- if (!this || !key || !data) {
- gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "dict OR key (%s) is NULL", key);
- ret = -EINVAL;
- goto err;
- }
-
hash = (uint32_t)XXH64(key, keylen, 0);
LOCK(&this->lock);
@@ -1492,10 +1514,22 @@ dict_get_with_refn(dict_t *this, char *key, const int keylen, data_t **data)
}
}
UNLOCK(&this->lock);
-err:
+
return ret;
}
+int
+dict_get_with_ref(dict_t *this, char *key, data_t **data)
+{
+ if (!this || !key || !data) {
+ gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+ "dict OR key (%s) is NULL", key);
+ return -EINVAL;
+ }
+
+ return dict_get_with_refn(this, key, strlen(key), data);
+}
+
static int
data_to_ptr_common(data_t *data, void **val)
{
@@ -1669,7 +1703,7 @@ dict_get_int8(dict_t *this, char *key, int8_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1715,7 +1749,7 @@ dict_get_int16(dict_t *this, char *key, int16_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1787,7 +1821,7 @@ dict_get_int32(dict_t *this, char *key, int32_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1852,7 +1886,7 @@ dict_get_int64(dict_t *this, char *key, int64_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1897,7 +1931,7 @@ dict_get_uint16(dict_t *this, char *key, uint16_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1942,7 +1976,7 @@ dict_get_uint32(dict_t *this, char *key, uint32_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1987,7 +2021,7 @@ dict_get_uint64(dict_t *this, char *key, uint64_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -2083,7 +2117,7 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
*/
GF_ASSERT(flag >= 0 && flag < DICT_MAX_FLAGS);
- hash = (int32_t)XXH64(key, strlen(key), 0);
+ hash = (uint32_t)XXH64(key, strlen(key), 0);
LOCK(&this->lock);
{
pair = dict_lookup_common(this, key, hash);
@@ -2097,8 +2131,8 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
} else {
ptr = GF_CALLOC(1, DICT_MAX_FLAGS / 8, gf_common_mt_char);
if (!ptr) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate flag bit array");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
+ "flag bit array", NULL);
ret = -ENOMEM;
goto err;
}
@@ -2106,8 +2140,8 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
data = data_from_dynptr(ptr, DICT_MAX_FLAGS / 8);
if (!data) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate data");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY, "data",
+ NULL);
GF_FREE(ptr);
ret = -ENOMEM;
goto err;
@@ -2118,30 +2152,29 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
else
BIT_CLEAR((unsigned char *)(data->data), flag);
- if (this->free_pair_in_use) {
+ if (this->free_pair.key) { /* the free pair is in use */
pair = mem_get0(THIS->ctx->dict_pair_pool);
if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate dict pair");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
+ "dict pair", NULL);
ret = -ENOMEM;
goto err;
}
- } else {
+ } else { /* use the free pair */
pair = &this->free_pair;
- this->free_pair_in_use = _gf_true;
}
pair->key = (char *)GF_MALLOC(strlen(key) + 1, gf_common_mt_char);
if (!pair->key) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate dict pair");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
+ "dict pair", NULL);
ret = -ENOMEM;
goto err;
}
strcpy(pair->key, key);
pair->key_hash = hash;
pair->value = data_ref(data);
-
+ this->totkvlen += (strlen(key) + 1 + data->len);
hashval = hash % this->hash_size;
pair->hash_next = this->members[hashval];
this->members[hashval] = pair;
@@ -2166,12 +2199,11 @@ err:
UNLOCK(&this->lock);
if (pair) {
- if (pair->key)
- free(pair->key);
-
- if (pair == &this->free_pair) {
- this->free_pair_in_use = _gf_false;
- } else {
+ if (pair->key) {
+ GF_FREE(pair->key);
+ pair->key = NULL;
+ }
+ if (pair != &this->free_pair) {
mem_put(pair);
}
}
@@ -2179,8 +2211,8 @@ err:
if (data)
data_destroy(data);
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_DICT_SET_FAILED,
- "unable to set key (%s) in dict ", key);
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_DICT_SET_FAILED, "key=%s", key,
+ NULL);
return ret;
}
@@ -2212,7 +2244,7 @@ dict_get_double(dict_t *this, char *key, double *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -2295,7 +2327,7 @@ dict_get_ptr(dict_t *this, char *key, void **ptr)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !ptr) {
+ if (!ptr) {
ret = -EINVAL;
goto err;
}
@@ -2325,7 +2357,7 @@ dict_get_ptr_and_len(dict_t *this, char *key, void **ptr, int *len)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !ptr) {
+ if (!ptr) {
ret = -EINVAL;
goto err;
}
@@ -2383,7 +2415,7 @@ dict_get_str(dict_t *this, char *key, char **str)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !str) {
+ if (!str) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2557,7 +2589,7 @@ dict_get_bin(dict_t *this, char *key, void **bin)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !bin) {
+ if (!bin) {
goto err;
}
@@ -2660,7 +2692,7 @@ dict_get_gfuuid(dict_t *this, char *key, uuid_t *gfid)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !gfid) {
+ if (!gfid) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2693,7 +2725,7 @@ dict_get_mdata(dict_t *this, char *key, struct mdata_iatt *mdata)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !mdata) {
+ if (!mdata) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2703,8 +2735,8 @@ dict_get_mdata(dict_t *this, char *key, struct mdata_iatt *mdata)
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_MDATA, key, -EINVAL);
if (data->len < sizeof(struct mdata_iatt)) {
- gf_msg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
- "data value for '%s' is smaller than expected", key);
+ gf_smsg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
+ "key=%s", key, NULL);
ret = -ENOBUFS;
goto err;
}
@@ -2731,7 +2763,7 @@ dict_get_iatt(dict_t *this, char *key, struct iatt *iatt)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !iatt) {
+ if (!iatt) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2791,7 +2823,7 @@ dict_get_str_boolean(dict_t *this, char *key, int default_val)
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, key, -EINVAL);
- ret = gf_string2boolean(data->data, &boo);
+ ret = gf_strn2boolean(data->data, data->len - 1, &boo);
if (ret == -1)
goto err;
@@ -2811,6 +2843,7 @@ dict_rename_key(dict_t *this, char *key, char *replace_key)
int ret = -EINVAL;
uint32_t hash;
uint32_t replacekey_hash;
+ int replacekey_len;
/* replacing a key by itself is a NO-OP */
if (strcmp(key, replace_key) == 0)
@@ -2823,7 +2856,8 @@ dict_rename_key(dict_t *this, char *key, char *replace_key)
}
hash = (uint32_t)XXH64(key, strlen(key), 0);
- replacekey_hash = (uint32_t)XXH64(replace_key, strlen(replace_key), 0);
+ replacekey_len = strlen(replace_key);
+ replacekey_hash = (uint32_t)XXH64(replace_key, replacekey_len, 0);
LOCK(&this->lock);
{
@@ -2832,8 +2866,8 @@ dict_rename_key(dict_t *this, char *key, char *replace_key)
if (!pair)
ret = -ENODATA;
else
- ret = dict_set_lk(this, replace_key, pair->value, replacekey_hash,
- 1);
+ ret = dict_set_lk(this, replace_key, replacekey_len, pair->value,
+ replacekey_hash, 1);
}
UNLOCK(&this->lock);
@@ -2866,53 +2900,15 @@ dict_serialized_length_lk(dict_t *this)
{
int ret = -EINVAL;
int count = this->count;
- int len = DICT_HDR_LEN;
- data_pair_t *pair = this->members_list;
+ const int keyhdrlen = DICT_DATA_HDR_KEY_LEN + DICT_DATA_HDR_VAL_LEN;
if (count < 0) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_COUNT_LESS_THAN_ZERO,
- "count (%d) < 0!", count);
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_COUNT_LESS_THAN_ZERO,
+ "count=%d", count, NULL);
goto out;
}
- while (count) {
- if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL,
- LG_MSG_COUNT_LESS_THAN_DATA_PAIRS,
- "less than count data pairs found!");
- goto out;
- }
-
- len += DICT_DATA_HDR_KEY_LEN + DICT_DATA_HDR_VAL_LEN;
-
- if (!pair->key) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_NULL_PTR,
- "pair->key is null!");
- goto out;
- }
-
- len += strlen(pair->key) + 1 /* for '\0' */;
-
- if (!pair->value) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_NULL_PTR,
- "pair->value is null!");
- goto out;
- }
-
- if (pair->value->len < 0) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL,
- LG_MSG_VALUE_LENGTH_LESS_THAN_ZERO, "value->len (%d) < 0",
- pair->value->len);
- goto out;
- }
-
- len += pair->value->len;
-
- pair = pair->next;
- count--;
- }
-
- ret = len;
+ ret = DICT_HDR_LEN + this->totkvlen + (count * keyhdrlen);
out:
return ret;
}
@@ -2939,14 +2935,13 @@ dict_serialize_lk(dict_t *this, char *buf)
int32_t netword = 0;
if (!buf) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
- "buf is null!");
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, NULL);
goto out;
}
if (count < 0) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
- "count (%d) < 0!", count);
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
+ "count=%d", count, NULL);
goto out;
}
@@ -2956,14 +2951,13 @@ dict_serialize_lk(dict_t *this, char *buf)
while (count) {
if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_PAIRS_LESS_THAN_COUNT,
- "less than count data pairs found!");
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_PAIRS_LESS_THAN_COUNT,
+ NULL);
goto out;
}
if (!pair->key) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_NULL_PTR,
- "pair->key is null!");
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_NULL_PTR, NULL);
goto out;
}
@@ -2973,8 +2967,7 @@ dict_serialize_lk(dict_t *this, char *buf)
buf += DICT_DATA_HDR_KEY_LEN;
if (!pair->value) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_NULL_PTR,
- "pair->value is null!");
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_NULL_PTR, NULL);
goto out;
}
@@ -3122,8 +3115,8 @@ dict_unserialize(char *orig_buf, int32_t size, dict_t **fill)
buf += DICT_HDR_LEN;
if (count < 0) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
- "count (%d) <= 0", count);
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
+ "count=%d", count, NULL);
goto out;
}
@@ -3193,7 +3186,7 @@ dict_unserialize(char *orig_buf, int32_t size, dict_t **fill)
value->is_static = _gf_false;
buf += vallen;
- ret = dict_add(*fill, key, value);
+ ret = dict_addn(*fill, key, keylen, value);
if (ret < 0)
goto out;
}
@@ -3279,32 +3272,30 @@ dict_serialize_value_with_delim_lk(dict_t *this, char *buf, int32_t *serz_len,
data_pair_t *pair = this->members_list;
if (!buf) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "buf is null");
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, NULL);
goto out;
}
if (count < 0) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
- "count (%d) < 0", count);
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "count=%d",
+ count, NULL);
goto out;
}
while (count) {
if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_PAIRS_LESS_THAN_COUNT,
- "less than count data pairs found");
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_PAIRS_LESS_THAN_COUNT,
+ NULL);
goto out;
}
if (!pair->key || !pair->value) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_KEY_OR_VALUE_NULL,
- "key or value is null");
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_KEY_OR_VALUE_NULL, NULL);
goto out;
}
if (!pair->value->data) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_NULL_VALUE_IN_DICT,
- "null value found in dict");
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_NULL_VALUE_IN_DICT, NULL);
goto out;
}
@@ -3396,12 +3387,11 @@ dict_dump_to_log(dict_t *dict)
ret = dict_dump_to_str(dict, dump, dump_size, format);
if (ret) {
- gf_msg("dict", GF_LOG_WARNING, 0, LG_MSG_FAILED_TO_LOG_DICT,
- "Failed to log dictionary");
+ gf_smsg("dict", GF_LOG_WARNING, 0, LG_MSG_FAILED_TO_LOG_DICT, NULL);
goto out;
}
- gf_msg("dict", GF_LOG_INFO, 0, LG_MSG_DICT_ERROR, "dict=%p (%s)", dict,
- dump);
+ gf_smsg("dict", GF_LOG_INFO, 0, LG_MSG_DICT_ERROR, "dict=%p", dict,
+ "dump=%s", dump, NULL);
out:
GF_FREE(dump);
@@ -3434,8 +3424,8 @@ dict_dump_to_statedump(dict_t *dict, char *dict_name, char *domain)
ret = dict_dump_to_str(dict, dump, dump_size, format);
if (ret) {
- gf_msg(domain, GF_LOG_WARNING, 0, LG_MSG_FAILED_TO_LOG_DICT,
- "Failed to log dictionary %s", dict_name);
+ gf_smsg(domain, GF_LOG_WARNING, 0, LG_MSG_FAILED_TO_LOG_DICT, "name=%s",
+ dict_name, NULL);
goto out;
}
gf_proc_dump_build_key(key, domain, "%s", dict_name);
diff --git a/libglusterfs/src/event-epoll.c b/libglusterfs/src/event-epoll.c
index bfe2648a920..fb4fb845b40 100644
--- a/libglusterfs/src/event-epoll.c
+++ b/libglusterfs/src/event-epoll.c
@@ -296,9 +296,8 @@ event_pool_new_epoll(int count, int eventthreadcount)
epfd = epoll_create(count);
if (epfd == -1) {
- gf_msg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_CREATE_FAILED,
- "epoll fd creation "
- "failed");
+ gf_smsg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_CREATE_FAILED,
+ NULL);
GF_FREE(event_pool->reg);
GF_FREE(event_pool);
event_pool = NULL;
@@ -332,8 +331,8 @@ __slot_update_events(struct event_slot_epoll *slot, int poll_in, int poll_out)
/* do nothing */
break;
default:
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_IN,
- "invalid poll_in value %d", poll_in);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_IN,
+ "value=%d", poll_in, NULL);
break;
}
@@ -348,8 +347,8 @@ __slot_update_events(struct event_slot_epoll *slot, int poll_in, int poll_out)
/* do nothing */
break;
default:
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_OUT,
- "invalid poll_out value %d", poll_out);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_OUT,
+ "value=%d", poll_out, NULL);
break;
}
}
@@ -390,8 +389,8 @@ event_register_epoll(struct event_pool *event_pool, int fd,
idx = event_slot_alloc(event_pool, fd, notify_poller_death, &slot);
if (idx == -1) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND,
- "could not find slot for fd=%d", fd);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND, "fd=%d", fd,
+ NULL);
return -1;
}
@@ -426,10 +425,8 @@ event_register_epoll(struct event_pool *event_pool, int fd,
UNLOCK(&slot->lock);
if (ret == -1) {
- gf_msg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_ADD_FAILED,
- "failed to add fd(=%d) to "
- "epoll fd(=%d)",
- fd, event_pool->fd);
+ gf_smsg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_ADD_FAILED,
+ "fd=%d", fd, "epoll_fd=%d", event_pool->fd, NULL);
event_slot_unref(event_pool, slot, idx);
idx = -1;
}
@@ -458,8 +455,8 @@ event_unregister_epoll_common(struct event_pool *event_pool, int fd, int idx,
slot = event_slot_get(event_pool, idx);
if (!slot) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND,
- "could not find slot for fd=%d idx=%d", fd, idx);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND, "fd=%d", fd,
+ "idx=%d", idx, NULL);
return -1;
}
@@ -470,10 +467,8 @@ event_unregister_epoll_common(struct event_pool *event_pool, int fd, int idx,
ret = epoll_ctl(event_pool->fd, EPOLL_CTL_DEL, fd, NULL);
if (ret == -1) {
- gf_msg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_DEL_FAILED,
- "fail to del "
- "fd(=%d) from epoll fd(=%d)",
- fd, event_pool->fd);
+ gf_smsg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_DEL_FAILED,
+ "fd=%d", fd, "epoll_fd=%d", event_pool->fd, NULL);
goto unlock;
}
@@ -525,8 +520,8 @@ event_select_on_epoll(struct event_pool *event_pool, int fd, int idx,
slot = event_slot_get(event_pool, idx);
if (!slot) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND,
- "could not find slot for fd=%d idx=%d", fd, idx);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND, "fd=%d", fd,
+ "idx=%d", idx, NULL);
return -1;
}
@@ -557,10 +552,8 @@ event_select_on_epoll(struct event_pool *event_pool, int fd, int idx,
ret = epoll_ctl(event_pool->fd, EPOLL_CTL_MOD, fd, &epoll_event);
if (ret == -1) {
- gf_msg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_MODIFY_FAILED,
- "failed to "
- "modify fd(=%d) events to %d",
- fd, epoll_event.events);
+ gf_smsg("epoll", GF_LOG_ERROR, errno, LG_MSG_EPOLL_FD_MODIFY_FAILED,
+ "fd=%d", fd, "events=%d", epoll_event.events, NULL);
}
}
unlock:
@@ -595,8 +588,8 @@ event_dispatch_epoll_handler(struct event_pool *event_pool,
slot = event_slot_get(event_pool, idx);
if (!slot) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND,
- "could not find slot for idx=%d", idx);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND, "idx=%d", idx,
+ NULL);
return -1;
}
@@ -604,20 +597,17 @@ event_dispatch_epoll_handler(struct event_pool *event_pool,
{
fd = slot->fd;
if (fd == -1) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_STALE_FD_FOUND,
- "stale fd found on "
- "idx=%d, gen=%d, events=%d, slot->gen=%d",
- idx, gen, event->events, slot->gen);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_STALE_FD_FOUND, "idx=%d",
+ idx, "gen=%d", gen, "events=%d", event->events,
+ "slot->gen=%d", slot->gen, NULL);
/* fd got unregistered in another thread */
goto pre_unlock;
}
if (gen != slot->gen) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_GENERATION_MISMATCH,
- "generation "
- "mismatch on idx=%d, gen=%d, slot->gen=%d, "
- "slot->fd=%d",
- idx, gen, slot->gen, slot->fd);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_GENERATION_MISMATCH,
+ "idx=%d", idx, "gen=%d", gen, "slot->gen=%d", slot->gen,
+ "slot->fd=%d", slot->fd, NULL);
/* slot was re-used and therefore is another fd! */
goto pre_unlock;
}
@@ -676,10 +666,8 @@ event_dispatch_epoll_worker(void *data)
GF_VALIDATE_OR_GOTO("event", event_pool, out);
- gf_msg("epoll", GF_LOG_INFO, 0, LG_MSG_STARTED_EPOLL_THREAD,
- "Started"
- " thread with index %d",
- myindex - 1);
+ gf_smsg("epoll", GF_LOG_INFO, 0, LG_MSG_STARTED_EPOLL_THREAD, "index=%d",
+ myindex - 1, NULL);
pthread_mutex_lock(&event_pool->mutex);
{
@@ -743,8 +731,8 @@ event_dispatch_epoll_worker(void *data)
}
pthread_mutex_unlock(&event_pool->mutex);
- gf_msg("epoll", GF_LOG_INFO, 0, LG_MSG_EXITED_EPOLL_THREAD,
- "Exited thread with index %d", myindex);
+ gf_smsg("epoll", GF_LOG_INFO, 0, LG_MSG_EXITED_EPOLL_THREAD,
+ "index=%d", myindex, NULL);
goto out;
}
@@ -762,8 +750,8 @@ event_dispatch_epoll_worker(void *data)
ret = event_dispatch_epoll_handler(event_pool, &event);
if (ret) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_EXITED_EPOLL_THREAD,
- "Failed to dispatch handler");
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_DISPATCH_HANDLER_FAILED,
+ NULL);
}
}
out:
@@ -827,9 +815,8 @@ event_dispatch_epoll(struct event_pool *event_pool)
if (i != 0)
pthread_detach(event_pool->pollers[i]);
} else {
- gf_msg("epoll", GF_LOG_WARNING, 0,
- LG_MSG_START_EPOLL_THREAD_FAILED,
- "Failed to start thread for index %d", i);
+ gf_smsg("epoll", GF_LOG_WARNING, 0,
+ LG_MSG_START_EPOLL_THREAD_FAILED, "index=%d", i, NULL);
if (i == 0) {
GF_FREE(ev_data);
break;
@@ -922,11 +909,9 @@ event_reconfigure_threads_epoll(struct event_pool *event_pool, int value)
event_dispatch_epoll_worker, ev_data,
"epoll%03hx", i & 0x3ff);
if (ret) {
- gf_msg("epoll", GF_LOG_WARNING, 0,
- LG_MSG_START_EPOLL_THREAD_FAILED,
- "Failed to start thread"
- " for index %d",
- i);
+ gf_smsg("epoll", GF_LOG_WARNING, 0,
+ LG_MSG_START_EPOLL_THREAD_FAILED, "index=%d", i,
+ NULL);
GF_FREE(ev_data);
} else {
pthread_detach(t_id);
@@ -989,8 +974,8 @@ event_handled_epoll(struct event_pool *event_pool, int fd, int idx, int gen)
slot = event_slot_get(event_pool, idx);
if (!slot) {
- gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND,
- "could not find slot for fd=%d idx=%d", fd, idx);
+ gf_smsg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND, "fd=%d", fd,
+ "idx=%d", idx, NULL);
return -1;
}
diff --git a/libglusterfs/src/event-poll.c b/libglusterfs/src/event-poll.c
index 14dc5e3180c..2cba963f096 100644
--- a/libglusterfs/src/event-poll.c
+++ b/libglusterfs/src/event-poll.c
@@ -48,10 +48,8 @@ __flush_fd(int fd, int idx, int gen, void *data, int poll_in, int poll_out,
do {
ret = sys_read(fd, buf, 64);
if (ret == -1 && errno != EAGAIN) {
- gf_msg("poll", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
- "read on %d returned "
- "error",
- fd);
+ gf_smsg("poll", GF_LOG_ERROR, errno, LG_MSG_READ_FILE_FAILED,
+ "fd=%d", fd, NULL);
}
} while (ret == 64);
@@ -111,8 +109,7 @@ event_pool_new_poll(int count, int eventthreadcount)
ret = pipe(event_pool->breaker);
if (ret == -1) {
- gf_msg("poll", GF_LOG_ERROR, errno, LG_MSG_PIPE_CREATE_FAILED,
- "pipe creation failed");
+ gf_smsg("poll", GF_LOG_ERROR, errno, LG_MSG_PIPE_CREATE_FAILED, NULL);
GF_FREE(event_pool->reg);
GF_FREE(event_pool);
return NULL;
@@ -120,8 +117,7 @@ event_pool_new_poll(int count, int eventthreadcount)
ret = fcntl(event_pool->breaker[0], F_SETFL, O_NONBLOCK);
if (ret == -1) {
- gf_msg("poll", GF_LOG_ERROR, errno, LG_MSG_SET_PIPE_FAILED,
- "could not set pipe to non blocking mode");
+ gf_smsg("poll", GF_LOG_ERROR, errno, LG_MSG_SET_PIPE_FAILED, NULL);
sys_close(event_pool->breaker[0]);
sys_close(event_pool->breaker[1]);
event_pool->breaker[0] = event_pool->breaker[1] = -1;
@@ -133,8 +129,7 @@ event_pool_new_poll(int count, int eventthreadcount)
ret = fcntl(event_pool->breaker[1], F_SETFL, O_NONBLOCK);
if (ret == -1) {
- gf_msg("poll", GF_LOG_ERROR, errno, LG_MSG_SET_PIPE_FAILED,
- "could not set pipe to non blocking mode");
+ gf_smsg("poll", GF_LOG_ERROR, errno, LG_MSG_SET_PIPE_FAILED, NULL);
sys_close(event_pool->breaker[0]);
sys_close(event_pool->breaker[1]);
@@ -148,8 +143,7 @@ event_pool_new_poll(int count, int eventthreadcount)
ret = event_register_poll(event_pool, event_pool->breaker[0], __flush_fd,
NULL, 1, 0, 0);
if (ret == -1) {
- gf_msg("poll", GF_LOG_ERROR, 0, LG_MSG_REGISTER_PIPE_FAILED,
- "could not register pipe fd with poll event loop");
+ gf_smsg("poll", GF_LOG_ERROR, 0, LG_MSG_REGISTER_PIPE_FAILED, NULL);
sys_close(event_pool->breaker[0]);
sys_close(event_pool->breaker[1]);
event_pool->breaker[0] = event_pool->breaker[1] = -1;
@@ -160,11 +154,8 @@ event_pool_new_poll(int count, int eventthreadcount)
}
if (eventthreadcount > 1) {
- gf_msg("poll", GF_LOG_INFO, 0, LG_MSG_POLL_IGNORE_MULTIPLE_THREADS,
- "Currently poll "
- "does not use multiple event processing threads, "
- "thread count (%d) ignored",
- eventthreadcount);
+ gf_smsg("poll", GF_LOG_INFO, 0, LG_MSG_POLL_IGNORE_MULTIPLE_THREADS,
+ "count=%d", eventthreadcount, NULL);
}
/* although, eventhreadcount for poll implementation is always
@@ -215,8 +206,8 @@ event_register_poll(struct event_pool *event_pool, int fd,
/* do nothing */
break;
default:
- gf_msg("poll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_IN,
- "invalid poll_in value %d", poll_in);
+ gf_smsg("poll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_IN,
+ "value=%d", poll_in, NULL);
break;
}
@@ -231,8 +222,8 @@ event_register_poll(struct event_pool *event_pool, int fd,
/* do nothing */
break;
default:
- gf_msg("poll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_OUT,
- "invalid poll_out value %d", poll_out);
+ gf_smsg("poll", GF_LOG_ERROR, 0, LG_MSG_INVALID_POLL_OUT,
+ "value=%d", poll_out, NULL);
break;
}
@@ -257,8 +248,8 @@ event_unregister_poll(struct event_pool *event_pool, int fd, int idx_hint)
idx = __event_getindex(event_pool, fd, idx_hint);
if (idx == -1) {
- gf_msg("poll", GF_LOG_ERROR, 0, LG_MSG_INDEX_NOT_FOUND,
- "index not found for fd=%d (idx_hint=%d)", fd, idx_hint);
+ gf_smsg("poll", GF_LOG_ERROR, 0, LG_MSG_INDEX_NOT_FOUND, "fd=%d",
+ fd, "idx_hint=%d", idx_hint, NULL);
errno = ENOENT;
goto unlock;
}
@@ -298,8 +289,8 @@ event_select_on_poll(struct event_pool *event_pool, int fd, int idx_hint,
idx = __event_getindex(event_pool, fd, idx_hint);
if (idx == -1) {
- gf_msg("poll", GF_LOG_ERROR, 0, LG_MSG_INDEX_NOT_FOUND,
- "index not found for fd=%d (idx_hint=%d)", fd, idx_hint);
+ gf_smsg("poll", GF_LOG_ERROR, 0, LG_MSG_INDEX_NOT_FOUND, "fd=%d",
+ fd, "idx_hint=%d", idx_hint, NULL);
errno = ENOENT;
goto unlock;
}
@@ -361,10 +352,8 @@ event_dispatch_poll_handler(struct event_pool *event_pool, struct pollfd *ufds,
idx = __event_getindex(event_pool, ufds[i].fd, i);
if (idx == -1) {
- gf_msg("poll", GF_LOG_ERROR, 0, LG_MSG_INDEX_NOT_FOUND,
- "index not found for "
- "fd=%d (idx_hint=%d)",
- ufds[i].fd, i);
+ gf_smsg("poll", GF_LOG_ERROR, 0, LG_MSG_INDEX_NOT_FOUND, "fd=%d",
+ ufds[i].fd, "idx_hint=%d", i, NULL);
goto unlock;
}
diff --git a/libglusterfs/src/event.c b/libglusterfs/src/event.c
index 235128b6044..402c253ca25 100644
--- a/libglusterfs/src/event.c
+++ b/libglusterfs/src/event.c
@@ -17,6 +17,7 @@
#include <string.h>
#include "glusterfs/gf-event.h"
+#include "glusterfs/timespec.h"
#include "glusterfs/common-utils.h"
#include "glusterfs/libglusterfs-messages.h"
#include "glusterfs/syscall.h"
@@ -266,7 +267,7 @@ gf_event_dispatch_destroy(struct event_pool *event_pool)
if (sys_write(fd[1], "dummy", 6) == -1) {
break;
}
- clock_gettime(CLOCK_REALTIME, &sleep_till);
+ timespec_now_realtime(&sleep_till);
sleep_till.tv_sec += 1;
ret = pthread_cond_timedwait(&event_pool->cond, &event_pool->mutex,
&sleep_till);
diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
index 25097675a9e..33157549897 100644
--- a/libglusterfs/src/events.c
+++ b/libglusterfs/src/events.c
@@ -34,62 +34,66 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
int ret = 0;
int sock = -1;
char *eventstr = NULL;
- struct sockaddr_in server;
va_list arguments;
char *msg = NULL;
glusterfs_ctx_t *ctx = NULL;
char *host = NULL;
struct addrinfo hints;
struct addrinfo *result = NULL;
+ struct addrinfo *iter_result_ptr = NULL;
xlator_t *this = THIS;
+ char *volfile_server_transport = NULL;
/* Global context */
- ctx = THIS->ctx;
+ ctx = this->ctx;
if (event < 0 || event >= EVENT_LAST) {
ret = EVENT_ERROR_INVALID_INPUTS;
goto out;
}
- /* Initialize UDP socket */
- sock = socket(AF_INET, SOCK_DGRAM, 0);
- if (sock < 0) {
- ret = EVENT_ERROR_SOCKET;
- goto out;
+ if (ctx) {
+ volfile_server_transport = ctx->cmd_args.volfile_server_transport;
+ }
+ if (!volfile_server_transport) {
+ volfile_server_transport = "tcp";
+ }
+
+ /* host = NULL returns localhost */
+ if (ctx && ctx->cmd_args.volfile_server &&
+ (strcmp(volfile_server_transport, "unix"))) {
+ /* If it is client code then volfile_server is set
+ use that information to push the events. */
+ host = ctx->cmd_args.volfile_server;
}
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_flags = AI_ADDRCONFIG;
- /* Get Host name to send message */
- if (ctx && ctx->cmd_args.volfile_server) {
- /* If it is client code then volfile_server is set
- use that information to push the events. */
- if ((getaddrinfo(ctx->cmd_args.volfile_server, NULL, &hints,
- &result)) != 0) {
- ret = EVENT_ERROR_RESOLVE;
- goto out;
- }
+ if ((getaddrinfo(host, TOSTRING(EVENT_PORT), &hints, &result)) != 0) {
+ ret = EVENT_ERROR_RESOLVE;
+ goto out;
+ }
- if (get_ip_from_addrinfo(result, &host) == NULL) {
- ret = EVENT_ERROR_RESOLVE;
- goto out;
+ // iterate over the result and break when socket creation is success.
+ for (iter_result_ptr = result; iter_result_ptr != NULL;
+ iter_result_ptr = iter_result_ptr->ai_next) {
+ sock = socket(iter_result_ptr->ai_family, iter_result_ptr->ai_socktype,
+ iter_result_ptr->ai_protocol);
+ if (sock != -1) {
+ break;
}
- } else {
- /* Localhost, Use the defined IP for localhost */
- host = gf_strdup(EVENT_HOST);
}
-
- /* Socket Configurations */
- server.sin_family = AF_INET;
- server.sin_port = htons(EVENT_PORT);
- ret = inet_pton(server.sin_family, host, &server.sin_addr);
- if (ret <= 0) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
- "inet_pton failed with return code %d", ret);
+ /*
+ * If none of the addrinfo structures lead to a successful socket
+ * creation, socket creation has failed.
+ */
+ if (sock < 0) {
+ ret = EVENT_ERROR_SOCKET;
goto out;
}
- memset(&server.sin_zero, '\0', sizeof(server.sin_zero));
va_start(arguments, fmt);
ret = gf_vasprintf(&msg, fmt, arguments);
@@ -100,16 +104,16 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
goto out;
}
- ret = gf_asprintf(&eventstr, "%u %d %s", (unsigned)time(NULL), event, msg);
-
+ ret = gf_asprintf(&eventstr, "%u %d %s", (unsigned)gf_time(), event, msg);
+ GF_FREE(msg);
if (ret <= 0) {
ret = EVENT_ERROR_MSG_FORMAT;
goto out;
}
/* Send Message */
- if (sendto(sock, eventstr, strlen(eventstr), 0, (struct sockaddr *)&server,
- sizeof(server)) <= 0) {
+ if (sendto(sock, eventstr, strlen(eventstr), 0, result->ai_addr,
+ result->ai_addrlen) <= 0) {
ret = EVENT_ERROR_SEND;
goto out;
}
@@ -121,17 +125,10 @@ out:
sys_close(sock);
}
- /* Allocated by gf_vasprintf */
- if (msg)
- GF_FREE(msg);
-
/* Allocated by gf_asprintf */
if (eventstr)
GF_FREE(eventstr);
- if (host)
- GF_FREE(host);
-
if (result)
freeaddrinfo(result);
diff --git a/libglusterfs/src/fd.c b/libglusterfs/src/fd.c
index e0767a7e61f..62606e91164 100644
--- a/libglusterfs/src/fd.c
+++ b/libglusterfs/src/fd.c
@@ -502,6 +502,32 @@ out:
}
void
+fd_close(fd_t *fd)
+{
+ xlator_t *xl, *old_THIS;
+
+ old_THIS = THIS;
+
+ for (xl = fd->inode->table->xl->graph->first; xl != NULL; xl = xl->next) {
+ if (!xl->call_cleanup) {
+ THIS = xl;
+
+ if (IA_ISDIR(fd->inode->ia_type)) {
+ if (xl->cbks->fdclosedir != NULL) {
+ xl->cbks->fdclosedir(xl, fd);
+ }
+ } else {
+ if (xl->cbks->fdclose != NULL) {
+ xl->cbks->fdclose(xl, fd);
+ }
+ }
+ }
+ }
+
+ THIS = old_THIS;
+}
+
+void
fd_unref(fd_t *fd)
{
int32_t refcount = 0;
diff --git a/libglusterfs/src/gf-dirent.c b/libglusterfs/src/gf-dirent.c
index 9c8c74beb54..a809efc97ef 100644
--- a/libglusterfs/src/gf-dirent.c
+++ b/libglusterfs/src/gf-dirent.c
@@ -276,7 +276,7 @@ gf_fill_iatt_for_dirent(gf_dirent_t *entry, inode_t *parent, xlator_t *subvol)
gf_uuid_copy(loc.pargfid, parent->gfid);
loc.name = entry->d_name;
loc.parent = inode_ref(parent);
- ret = inode_path(loc.inode, entry->d_name, &path);
+ ret = inode_path(loc.parent, entry->d_name, &path);
loc.path = path;
if (ret < 0)
goto out;
diff --git a/libglusterfs/src/gfdb/Makefile.am b/libglusterfs/src/gfdb/Makefile.am
deleted file mode 100644
index 3931e694c24..00000000000
--- a/libglusterfs/src/gfdb/Makefile.am
+++ /dev/null
@@ -1,37 +0,0 @@
-libgfdb_la_CFLAGS = -Wall $(GF_CFLAGS) $(GF_DARWIN_LIBGLUSTERFS_CFLAGS) \
- $(SQLITE_CFLAGS) -DDATADIR=\"$(localstatedir)\"
-
-libgfdb_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 -fpic \
- -I$(top_srcdir)/libglusterfs/src \
- -I$(top_srcdir)/rpc/xdr/src \
- -I$(top_builddir)/rpc/xdr/src \
- -DDATADIR=\"$(localstatedir)\"
-
-libgfdb_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
- $(SQLITE_LIBS) $(UUID_LIBS)
-
-libgfdb_la_LDFLAGS = $(GF_LDFLAGS) -version-info $(LIBGLUSTERFS_LT_VERSION)
-
-libgfdbdir = $(includedir)/glusterfs/gfdb
-
-if BUILD_GFDB
- lib_LTLIBRARIES = libgfdb.la
-endif
-
-CONTRIB_BUILDDIR = $(top_builddir)/contrib
-
-libgfdb_la_SOURCES = gfdb_data_store.c gfdb_data_store_helper.c \
- gfdb_sqlite3_helper.c gfdb_sqlite3.c
-
-noinst_HEADERS = gfdb_data_store.h gfdb_data_store_types.h \
- gfdb_sqlite3_helper.h gfdb_sqlite3.h gfdb_mem-types.h \
- gfdb_data_store_helper.h
-
-libgfdb_HEADERS = gfdb_data_store.h gfdb_data_store_types.h \
- gfdb_data_store_helper.h gfdb_sqlite3.h gfdb_mem-types.h \
- gfdb_sqlite3_helper.h
-
-CLEANFILES =
-
-$(top_builddir)/libglusterfs/src/libglusterfs.la:
- $(MAKE) -C $(top_builddir)/libglusterfs/src/ all
diff --git a/libglusterfs/src/gfdb/gfdb_data_store.c b/libglusterfs/src/gfdb/gfdb_data_store.c
deleted file mode 100644
index 02894cf7785..00000000000
--- a/libglusterfs/src/gfdb/gfdb_data_store.c
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include "gfdb_sqlite3.h"
-#include "gfdb_data_store.h"
-#include "glusterfs/list.h"
-#include "glusterfs/libglusterfs-messages.h"
-
-/******************************************************************************
- *
- * Database Connection utils/internals
- *
- * ****************************************************************************/
-
-/* GFDB Connection Node:
- * ~~~~~~~~~~~~~~~~~~~~
- * Represents the connection to the database while using libgfdb
- * The connection node is not thread safe as far as fini_db is concerned.
- * You can use a single connection node
- * to do multithreaded db operations like insert/delete/find of records.
- * But you need to wait for all the operating threads to complete i.e
- * pthread_join() and then do fini_db() to kill the connection node.
- * gfdb_conn_node_t is an opaque structure.
- * */
-struct gfdb_conn_node_t {
- gfdb_connection_t gfdb_connection;
- struct list_head conn_list;
-};
-
-/*
- * db_conn_list is the circular linked list which
- * will have all the database connections for the process
- *
- * */
-static gfdb_conn_node_t *db_conn_list;
-
-/*
- * db_conn_mutex is the mutex for db_conn_list
- *
- * */
-static pthread_mutex_t db_conn_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*Checks the sanity of the connection node*/
-#define CHECK_CONN_NODE(_conn_node) \
- do { \
- GF_ASSERT(_conn_node); \
- GF_ASSERT(_conn_node->gfdb_connection.gf_db_connection); \
- } while (0)
-
-/* Checks the sanity of the connection node and goto */
-#define CHECK_CONN_NODE_GOTO(_conn_node, label) \
- do { \
- if (!_conn_node) { \
- goto label; \
- }; \
- if (!_conn_node->gfdb_connection.gf_db_connection) { \
- goto label; \
- }; \
- } while (0)
-
-/*Check if the conn node is first in the list*/
-#define IS_FIRST_NODE(db_conn_list, _conn_node) \
- ((_conn_node == db_conn_list) ? _gf_true : _gf_false)
-
-/*Check if the conn node is the only node in the list*/
-#define IS_THE_ONLY_NODE(_conn_node) \
- ((_conn_node->conn_list.next == _conn_node->conn_list.prev) ? _gf_true \
- : _gf_false)
-
-/*Internal Function: Adds connection node to the end of
- * the db connection list.*/
-static int
-add_connection_node(gfdb_conn_node_t *_conn_node)
-{
- int ret = -1;
-
- GF_ASSERT(_conn_node);
-
- /*Lock the list*/
- ret = pthread_mutex_lock(&db_conn_mutex);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, ret, LG_MSG_LOCK_LIST_FAILED,
- "Failed lock db connection "
- "list %s",
- strerror(ret));
- ret = -1;
- goto out;
- }
-
- if (db_conn_list == NULL) {
- db_conn_list = _conn_node;
- } else {
- list_add_tail(&_conn_node->conn_list, &db_conn_list->conn_list);
- }
-
- /*unlock the list*/
- ret = pthread_mutex_unlock(&db_conn_mutex);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, ret, LG_MSG_UNLOCK_LIST_FAILED,
- "Failed unlock db "
- "connection list %s",
- strerror(ret));
- ret = -1;
- goto out;
- /*TODO What if the unlock fails.
- * Will it lead to deadlock?
- * Most of the gluster code
- * no check for unlock or destroy of mutex!*/
- }
- ret = 0;
-out:
- return ret;
-}
-
-/*Internal Function:
- * Delete connection node from the list*/
-static int
-delete_conn_node(gfdb_conn_node_t *_conn_node)
-{
- int ret = -1;
-
- GF_ASSERT(_conn_node);
-
- /*Lock of the list*/
- ret = pthread_mutex_lock(&db_conn_mutex);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, ret, LG_MSG_LOCK_LIST_FAILED,
- "Failed lock on db connection"
- " list %s",
- strerror(ret));
- goto out;
- }
-
- /*Remove the connection object from list*/
- if (IS_THE_ONLY_NODE(_conn_node)) {
- db_conn_list = NULL;
- GF_FREE(_conn_node);
- } else {
- if (IS_FIRST_NODE(db_conn_list, _conn_node)) {
- db_conn_list = list_entry(db_conn_list->conn_list.next,
- gfdb_conn_node_t, conn_list);
- }
- list_del(&_conn_node->conn_list);
- GF_FREE(_conn_node);
- }
-
- /*Release the list lock*/
- ret = pthread_mutex_unlock(&db_conn_mutex);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_WARNING, ret, LG_MSG_UNLOCK_LIST_FAILED,
- "Failed unlock on db "
- "connection list %s",
- strerror(ret));
- /*TODO What if the unlock fails.
- * Will it lead to deadlock?
- * Most of the gluster code
- * no check for unlock or destroy of mutex!*/
- ret = -1;
- goto out;
- }
- ret = 0;
-out:
- return ret;
-}
-
-/*Internal function: Used initialize/map db operation of
- * specified type of db plugin*/
-static int
-init_db_operations(gfdb_db_type_t gfdb_db_type,
- gfdb_db_operations_t *gfdb_db_operations)
-{
- int ret = -1;
-
- GF_ASSERT(gfdb_db_operations);
-
- /*Clear the gfdb_db_operations*/
- gfdb_db_operations = memset(gfdb_db_operations, 0,
- sizeof(*gfdb_db_operations));
- switch (gfdb_db_type) {
- case GFDB_SQLITE3:
- gf_sqlite3_fill_db_operations(gfdb_db_operations);
- ret = 0;
- break;
- case GFDB_HYPERDEX:
- case GFDB_HASH_FILE_STORE:
- case GFDB_ROCKS_DB:
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_UNSUPPORTED_PLUGIN,
- "Plugin not supported");
- break;
- case GFDB_INVALID_DB:
- case GFDB_DB_END:
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_INVALID_DB_TYPE,
- "Invalid DB Type");
- break;
- }
- return ret;
-}
-
-/******************************************************************************
- *
- * LIBGFDB API Functions
- *
- * ****************************************************************************/
-
-/*Libgfdb API Function: Used to initialize a db connection
- * (Constructor function for db connection object)
- * Arguments:
- * args : Dictionary containing database specific parameters
- * eg: For sqlite3, pagesize, cachesize, db name, db path
- etc
- * gfdb_db_type : Type of data base used i.e sqlite or hyperdex etc
- * Returns : if successful return the GFDB Connection node to the caller or
- * NULL in case of failure*/
-gfdb_conn_node_t *
-init_db(dict_t *args, gfdb_db_type_t gfdb_db_type)
-{
- int ret = -1;
- gfdb_conn_node_t *_conn_node = NULL;
- gfdb_db_operations_t *db_operations_t = NULL;
-
- /*Create data base connection object*/
- _conn_node = GF_CALLOC(1, sizeof(gfdb_conn_node_t), gf_mt_db_conn_node_t);
- if (!_conn_node) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "Failed mem alloc for "
- "gfdb_conn_node_t");
- goto alloc_failed;
- }
-
- /*Init the list component of db connection object*/
- INIT_LIST_HEAD(&_conn_node->conn_list);
-
- /*Add created connection node to the list*/
- ret = add_connection_node(_conn_node);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_ADD_TO_LIST_FAILED,
- "Failed to add connection "
- "node to list");
- goto _conn_failed;
- }
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
-
- /*init the db ops object of db connection object*/
- ret = init_db_operations(gfdb_db_type, db_operations_t);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_INIT_DB_FAILED,
- "Failed initializing database "
- "operation failed.");
- goto init_db_failed;
- }
-
- /*Calling the init_db_op of the respected db type*/
- GF_ASSERT(db_operations_t->init_db_op);
- ret = db_operations_t->init_db_op(
- args, &_conn_node->gfdb_connection.gf_db_connection);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_INIT_DB_FAILED,
- "Failed initializing database");
- goto init_db_failed;
- }
- _conn_node->gfdb_connection.gfdb_db_type = gfdb_db_type;
-
- return _conn_node;
-
- /*****Error Handling********/
- /* If init_db_operations or init_db of plugin failed delete
- * conn node from the list.
- * connection node will be free by delete_conn_node*/
-init_db_failed:
- ret = delete_conn_node(_conn_node);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DELETE_FROM_LIST_FAILED,
- "Failed deleting "
- "connection node from list");
- }
- return NULL;
- /*if adding to the list failed free connection node*/
-_conn_failed:
- GF_FREE(_conn_node);
- /*if allocation failed*/
-alloc_failed:
- return NULL;
- /*****Error Handling********/
-}
-
-/*Libgfdb API Function: Used to terminate/de-initialize db connection
- * (Destructor function for db connection object)
- * Arguments:
- * _conn_node : GFDB Connection node
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-fini_db(gfdb_conn_node_t *_conn_node)
-{
- int ret = -1;
- gfdb_db_operations_t *db_operations_t = NULL;
-
- CHECK_CONN_NODE_GOTO(_conn_node, empty);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
-
- GF_ASSERT(db_operations_t->fini_db_op);
-
- ret = db_operations_t->fini_db_op(
- &_conn_node->gfdb_connection.gf_db_connection);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_CLOSE_CONNECTION_FAILED,
- "Failed close the db "
- "connection");
- goto out;
- }
-
- ret = delete_conn_node(_conn_node);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DELETE_FROM_LIST_FAILED,
- "Failed deleting "
- "connection node from list");
- }
-empty:
- ret = 0;
-out:
- return ret;
-}
-
-/*Libgfdb API Function: Used to insert/update records in the database
- * NOTE: In current gfdb_sqlite plugin we use that
- * same function to delete the record. Set the
- * gfdb_fop_path to GFDB_FOP_UNDEL to delete the
- * link of inode from GF_FLINK_TB and
- * GFDB_FOP_UNDEL_ALL to delete all the records from
- * GF_FLINK_TB and GF_FILE_TB.
- * TODO: Should separate this function into the
- * delete_record function
- * Refer CTR Xlator features/changetimerecorder for usage
- * Arguments:
- * _conn_node : GFDB Connection node
- * gfdb_db_record : Record to be inserted/updated
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-insert_record(gfdb_conn_node_t *_conn_node, gfdb_db_record_t *gfdb_db_record)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->insert_record_op) {
- ret = db_operations_t->insert_record_op(gf_db_connection,
- gfdb_db_record);
- if (ret) {
- gf_msg(GFDB_DATA_STORE,
- _gfdb_log_level(GF_LOG_ERROR, gfdb_db_record->ignore_errors),
- 0, LG_MSG_INSERT_OR_UPDATE_FAILED,
- "Insert/Update"
- " operation failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Used to delete record from the database
- * NOTE: In the current gfdb_sqlite3 plugin
- * implementation this function is dummy.
- * Use the insert_record function.
- * Refer CTR Xlator features/changetimerecorder for usage
- * Arguments:
- * _conn_node : GFDB Connection node
- * gfdb_db_record : Record to be deleted
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-delete_record(gfdb_conn_node_t *_conn_node, gfdb_db_record_t *gfdb_db_record)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->delete_record_op) {
- ret = db_operations_t->delete_record_op(gf_db_connection,
- gfdb_db_record);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DELETE_FAILED,
- "Delete operation "
- "failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Compact the database.
- *
- * Arguments:
- * _conn_node : GFDB Connection node
- * _compact_active : Is compaction currently on?
- * _compact_mode_switched : Was the compaction switch flipped?
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-compact_db(gfdb_conn_node_t *_conn_node, gf_boolean_t _compact_active,
- gf_boolean_t _compact_mode_switched)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->compact_db_op) {
- ret = db_operations_t->compact_db_op(gf_db_connection, _compact_active,
- _compact_mode_switched);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_COMPACT_FAILED,
- "Compaction operation "
- "failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Query all the records from the database
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * query_limit : number to limit number of rows returned by the query
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_all(gfdb_conn_node_t *_conn_node, gf_query_callback_t query_callback,
- void *_query_cbk_args, int query_limit)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->find_all_op) {
- ret = db_operations_t->find_all_op(gf_db_connection, query_callback,
- _query_cbk_args, query_limit);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Find all operation "
- "failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Query records/files that have not changed/accessed
- * from a time in past to current time
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are not
- * changed/accessed
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_unchanged_for_time(gfdb_conn_node_t *_conn_node,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *for_time)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->find_unchanged_for_time_op) {
- ret = db_operations_t->find_unchanged_for_time_op(
- gf_db_connection, query_callback, _query_cbk_args, for_time);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Find unchanged "
- "operation failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Query records/files that have changed/accessed from a
- * time in past to current time
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are
- * changed/accessed
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_recently_changed_files(gfdb_conn_node_t *_conn_node,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *from_time)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->find_recently_changed_files_op) {
- ret = db_operations_t->find_recently_changed_files_op(
- gf_db_connection, query_callback, _query_cbk_args, from_time);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Find changed operation failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Query records/files that have not changed/accessed
- * from a time in past to current time, with
- * a desired frequency
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are not
- * changed/accessed
- * write_freq_thresold : Desired Write Frequency lower limit
- * read_freq_thresold : Desired Read Frequency lower limit
- * _clear_counters : If true, Clears all the frequency counters of
- * all files.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_unchanged_for_time_freq(gfdb_conn_node_t *_conn_node,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *for_time,
- int write_freq_thresold, int read_freq_thresold,
- gf_boolean_t _clear_counters)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->find_unchanged_for_time_freq_op) {
- ret = db_operations_t->find_unchanged_for_time_freq_op(
- gf_db_connection, query_callback, _query_cbk_args, for_time,
- write_freq_thresold, read_freq_thresold, _clear_counters);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Find unchanged with freq operation failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Query records/files that have changed/accessed from a
- * time in past to current time, with
- * a desired frequency
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are
- * changed/accessed
- * write_freq_thresold : Desired Write Frequency lower limit
- * read_freq_thresold : Desired Read Frequency lower limit
- * _clear_counters : If true, Clears all the frequency counters of
- * all files.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_recently_changed_files_freq(gfdb_conn_node_t *_conn_node,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *from_time,
- int write_freq_thresold,
- int read_freq_thresold,
- gf_boolean_t _clear_counters)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations_t = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(_conn_node);
-
- db_operations_t = &_conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = _conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations_t->find_recently_changed_files_freq_op) {
- ret = db_operations_t->find_recently_changed_files_freq_op(
- gf_db_connection, query_callback, _query_cbk_args, from_time,
- write_freq_thresold, read_freq_thresold, _clear_counters);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Find changed with freq operation failed");
- }
- }
-
- return ret;
-}
-
-/*Libgfdb API Function: Clear the heat for all the files
- *
- * Arguments:
- * conn_node : GFDB Connection node
- *
- * Returns : if successful return 0 or
- * -ve value in case of failure
- **/
-
-int
-clear_files_heat(gfdb_conn_node_t *conn_node)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(conn_node);
-
- db_operations = &conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations->clear_files_heat_op) {
- ret = db_operations->clear_files_heat_op(gf_db_connection);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0,
- LG_MSG_INSERT_OR_UPDATE_FAILED,
- "Clear files heat operation failed");
- }
- }
-
- return ret;
-}
-
-/* Libgfdb API Function: Function to extract version of the db
- * Input:
- * gfdb_conn_node_t *conn_node : GFDB Connection node
- * char **version : the version is extracted as a string and will be stored in
- * this variable. The freeing of the memory should be done by
- * the caller.
- * Return:
- * On success return the length of the version string that is
- * extracted.
- * On failure return -1
- * */
-int
-get_db_version(gfdb_conn_node_t *conn_node, char **version)
-{
- int ret = 0;
- gfdb_db_operations_t *db_operations = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(conn_node);
-
- db_operations = &conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations->get_db_version) {
- ret = db_operations->get_db_version(gf_db_connection, version);
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Get version failed");
- }
- }
-
- return ret;
-}
-
-int
-get_db_params(gfdb_conn_node_t *conn_node, char *param_key, char **param_value)
-{
- int ret = -1;
- gfdb_db_operations_t *db_operations = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(conn_node);
-
- db_operations = &conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations->get_db_params) {
- ret = db_operations->get_db_params(gf_db_connection, param_key,
- param_value);
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_FIND_OP_FAILED,
- "Get setting failed");
- }
- }
-
- return ret;
-}
-
-int
-set_db_params(gfdb_conn_node_t *conn_node, char *param_key, char *param_value)
-{
- int ret = -1;
- gfdb_db_operations_t *db_operations = NULL;
- void *gf_db_connection = NULL;
-
- CHECK_CONN_NODE(conn_node);
-
- db_operations = &conn_node->gfdb_connection.gfdb_db_operations;
- gf_db_connection = conn_node->gfdb_connection.gf_db_connection;
-
- if (db_operations->set_db_params) {
- ret = db_operations->set_db_params(gf_db_connection, param_key,
- param_value);
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0,
- LG_MSG_INSERT_OR_UPDATE_FAILED,
- "Failed to set database setting");
- }
- }
-
- return ret;
-}
-
-static const char *
-get_db_path_key()
-{
- return GFDB_SQL_PARAM_DBPATH;
-}
-
-void
-get_gfdb_methods(gfdb_methods_t *methods)
-{
- methods->init_db = init_db;
- methods->fini_db = fini_db;
- methods->find_all = find_all;
- methods->find_unchanged_for_time = find_unchanged_for_time;
- methods->find_recently_changed_files = find_recently_changed_files;
- methods->find_unchanged_for_time_freq = find_unchanged_for_time_freq;
- methods
- ->find_recently_changed_files_freq = find_recently_changed_files_freq;
- methods->clear_files_heat = clear_files_heat;
- methods->get_db_version = get_db_version;
- methods->get_db_params = get_db_params;
- methods->set_db_params = set_db_params;
- methods->get_db_path_key = get_db_path_key;
-
- /* Query Record related functions */
- methods->gfdb_query_record_new = gfdb_query_record_new;
- methods->gfdb_query_record_free = gfdb_query_record_free;
- methods->gfdb_add_link_to_query_record = gfdb_add_link_to_query_record;
- methods->gfdb_write_query_record = gfdb_write_query_record;
- methods->gfdb_read_query_record = gfdb_read_query_record;
-
- /* Link info related functions */
- methods->gfdb_link_info_new = gfdb_link_info_new;
- methods->gfdb_link_info_free = gfdb_link_info_free;
-
- /* Compaction related functions */
- methods->compact_db = compact_db;
-}
diff --git a/libglusterfs/src/gfdb/gfdb_data_store.h b/libglusterfs/src/gfdb/gfdb_data_store.h
deleted file mode 100644
index 55b2fb1bc55..00000000000
--- a/libglusterfs/src/gfdb/gfdb_data_store.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-#ifndef __GFDB_DATA_STORE_H
-#define __GFDB_DATA_STORE_H
-
-#include "glusterfs/glusterfs.h"
-#include "glusterfs/xlator.h"
-#include "glusterfs/logging.h"
-#include "glusterfs/common-utils.h"
-#include <time.h>
-#include <sys/time.h>
-
-#include "gfdb_data_store_types.h"
-
-/* GFDB Connection Node:
- * ~~~~~~~~~~~~~~~~~~~~
- * Represents the connection to the database while using libgfdb
- * The connection node is not thread safe as far as fini_db is concerned.
- * You can use a single connection node
- * to do multithreaded db operations like insert/delete/find of records.
- * But you need to wait for all the operating threads to complete i.e
- * pthread_join() and then do fini_db() to kill the connection node.
- * gfdb_conn_node_t is an opaque structure.
- * */
-typedef struct gfdb_conn_node_t gfdb_conn_node_t;
-
-/*Libgfdb API Function: Used to initialize db connection
- * Arguments:
- * args : Dictionary containing database specific parameters
- * eg: For sqlite3, pagesize, cachesize, db name, db path
- etc
- * gfdb_db_type : Type of data base used i.e sqlite or hyperdex etc
- * Returns : if successful return the GFDB Connection Node to the caller or
- * NULL value in case of failure*/
-gfdb_conn_node_t *
-init_db(dict_t *arg, gfdb_db_type_t db_type);
-
-typedef gfdb_conn_node_t *(*init_db_t)(dict_t *args,
- gfdb_db_type_t gfdb_db_type);
-
-/*Libgfdb API Function: Used to terminate/de-initialize db connection
- * (Destructor function for db connection object)
- * Arguments:
- * _conn_node : DB Connection Index of the DB Connection
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-fini_db(gfdb_conn_node_t *);
-
-typedef int (*fini_db_t)(gfdb_conn_node_t *_conn_node);
-
-/*Libgfdb API Function: Used to insert/updated records in the database
- * NOTE: In current gfdb_sqlite plugin we use that
- * same function to delete the record. Set the
- * gfdb_fop_path to GFDB_FOP_UNDEL to delete the
- * link of inode from GF_FLINK_TB and
- * GFDB_FOP_UNDEL_ALL to delete all the records from
- * GF_FLINK_TB and GF_FILE_TB.
- * TODO: Should separate this function into the
- * delete_record function
- * Refer CTR Xlator features/changetimerecorder for usage
- * Arguments:
- * _conn_node : GFDB Connection node
- * gfdb_db_record : Record to be inserted/updated
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-insert_record(gfdb_conn_node_t *, gfdb_db_record_t *gfdb_db_record);
-
-/*Libgfdb API Function: Used to delete record from the database
- * NOTE: In the current gfdb_sqlite3 plugin
- * implementation this function is dummy.
- * Use the insert_record function.
- * Refer CTR Xlator features/changetimerecorder for usage
- * Arguments:
- * _conn_node : GFDB Connection node
- * gfdb_db_record : Record to be deleted
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-delete_record(gfdb_conn_node_t *, gfdb_db_record_t *gfdb_db_record);
-
-/*Libgfdb API Function: Query all the records from the database
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * query_limit : 0 - umlimited,
- * any positive value - adds the LIMIT clause
- * to the SQL query
- *
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_all(gfdb_conn_node_t *, gf_query_callback_t query_callback,
- void *_query_cbk_args, int query_limit);
-
-typedef int (*find_all_t)(gfdb_conn_node_t *,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, int query_limit);
-
-/*Libgfdb API Function: Query records/files that have not changed/accessed
- * from a time in past to current time
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are not
- * changed/accessed
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_unchanged_for_time(gfdb_conn_node_t *, gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *for_time);
-
-typedef int (*find_unchanged_for_time_t)(gfdb_conn_node_t *_conn_node,
- gf_query_callback_t query_callback,
- void *_query_cbk_args,
- gfdb_time_t *for_time);
-
-/*Libgfdb API Function: Query records/files that have changed/accessed from a
- * time in past to current time
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are
- * changed/accessed
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_recently_changed_files(gfdb_conn_node_t *_conn,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *from_time);
-
-typedef int (*find_recently_changed_files_t)(gfdb_conn_node_t *_conn_node,
- gf_query_callback_t query_callback,
- void *_query_cbk_args,
- gfdb_time_t *from_time);
-
-/*Libgfdb API Function: Query records/files that have not changed/accessed
- * from a time in past to current time, with
- * a desired frequency
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are not
- * changed/accessed
- * write_freq_thresold : Desired Write Frequency lower limit
- * read_freq_thresold : Desired Read Frequency lower limit
- * _clear_counters : If true, Clears all the frequency counters of
- * all files.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_unchanged_for_time_freq(gfdb_conn_node_t *_conn,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *for_time,
- int write_freq_thresold, int read_freq_thresold,
- gf_boolean_t _clear_counters);
-
-typedef int (*find_unchanged_for_time_freq_t)(
- gfdb_conn_node_t *_conn_node, gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *for_time, int write_freq_thresold,
- int read_freq_thresold, gf_boolean_t _clear_counters);
-
-/*Libgfdb API Function: Query records/files that have changed/accessed from a
- * time in past to current time, with
- * a desired frequency
- * Arguments:
- * _conn_node : GFDB Connection node
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are
- * changed/accessed
- * write_freq_thresold : Desired Write Frequency lower limit
- * read_freq_thresold : Desired Read Frequency lower limit
- * _clear_counters : If true, Clears all the frequency counters of
- * all files.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-find_recently_changed_files_freq(gfdb_conn_node_t *_conn,
- gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *from_time,
- int write_freq_thresold,
- int read_freq_thresold,
- gf_boolean_t _clear_counters);
-
-typedef int (*find_recently_changed_files_freq_t)(
- gfdb_conn_node_t *_conn_node, gf_query_callback_t query_callback,
- void *_query_cbk_args, gfdb_time_t *from_time, int write_freq_thresold,
- int read_freq_thresold, gf_boolean_t _clear_counters);
-
-typedef const char *(*get_db_path_key_t)();
-
-/*Libgfdb API Function: Clear the heat for all the files
- *
- * Arguments:
- * _conn_node : GFDB Connection node
- *
- * Returns : if successful return 0 or
- * -ve value in case of failure
- **/
-int
-clear_files_heat(gfdb_conn_node_t *_conn_node);
-
-typedef int (*clear_files_heat_t)(gfdb_conn_node_t *_conn_node);
-
-/* Libgfdb API Function: Function to extract version of the db
- * Arguments:
- * gfdb_conn_node_t *_conn_node : GFDB Connection node
- * char **version : the version is extracted as a string
- * and will be stored in this variable.
- * The freeing of the memory should be done by the caller.
- * Return:
- * On success return the length of the version string that is
- * extracted.
- * On failure return -1
- * */
-int
-get_db_version(gfdb_conn_node_t *_conn_node, char **version);
-
-typedef int (*get_db_version_t)(gfdb_conn_node_t *_conn_node, char **version);
-
-/* Libgfdb API Function: Function to extract param from the db
- * Arguments:
- * gfdb_conn_node_t *_conn_node : GFDB Connection node
- * char *param_key : param to be extracted
- * char **param_value : the value of the param that is
- * extracted. This function will allocate memory
- * to pragma_value. The caller should free the memory.
- * Return:
- * On success return the length of the param value that is
- * extracted.
- * On failure return -1
- * */
-int
-get_db_params(gfdb_conn_node_t *_conn_node, char *param_key,
- char **param_value);
-
-typedef int (*get_db_params_t)(gfdb_conn_node_t *db_conn, char *param_key,
- char **param_value);
-
-/* Libgfdb API Function: Function to set db params
- * Arguments:
- * gfdb_conn_node_t *_conn_node : GFDB Connection node
- * char *param_key : param to be set
- * char *param_value : param value
- * Return:
- * On success return 0
- * On failure return -1
- * */
-int
-set_db_params(gfdb_conn_node_t *_conn_node, char *param_key, char *param_value);
-
-typedef int (*set_db_params_t)(gfdb_conn_node_t *db_conn, char *param_key,
- char *param_value);
-
-/*Libgfdb API Function: Compact the database.
- *
- * Arguments:
- * _conn_node : GFDB Connection node
- * _compact_active : Is compaction currently on?
- * _compact_mode_switched : Was the compaction switch flipped?
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-int
-compact_db(gfdb_conn_node_t *_conn_node, gf_boolean_t _compact_active,
- gf_boolean_t _compact_mode_switched);
-
-typedef int (*compact_db_t)(gfdb_conn_node_t *db_conn,
- gf_boolean_t compact_active,
- gf_boolean_t compact_mode_switched);
-
-typedef struct gfdb_methods_s {
- init_db_t init_db;
- fini_db_t fini_db;
- find_all_t find_all;
- find_unchanged_for_time_t find_unchanged_for_time;
- find_recently_changed_files_t find_recently_changed_files;
- find_unchanged_for_time_freq_t find_unchanged_for_time_freq;
- find_recently_changed_files_freq_t find_recently_changed_files_freq;
- clear_files_heat_t clear_files_heat;
- get_db_version_t get_db_version;
- get_db_params_t get_db_params;
- set_db_params_t set_db_params;
- /* Do not expose dbpath directly. Expose it via an */
- /* access function: get_db_path_key(). */
- char *dbpath;
- get_db_path_key_t get_db_path_key;
-
- /* Query Record related functions */
- gfdb_query_record_new_t gfdb_query_record_new;
- gfdb_query_record_free_t gfdb_query_record_free;
- gfdb_add_link_to_query_record_t gfdb_add_link_to_query_record;
- gfdb_write_query_record_t gfdb_write_query_record;
- gfdb_read_query_record_t gfdb_read_query_record;
-
- /* Link info related functions */
- gfdb_link_info_new_t gfdb_link_info_new;
- gfdb_link_info_free_t gfdb_link_info_free;
-
- /* Compaction related functions */
- compact_db_t compact_db;
-} gfdb_methods_t;
-
-void
-get_gfdb_methods(gfdb_methods_t *methods);
-
-typedef void (*get_gfdb_methods_t)(gfdb_methods_t *methods);
-
-#endif
diff --git a/libglusterfs/src/gfdb/gfdb_data_store_helper.c b/libglusterfs/src/gfdb/gfdb_data_store_helper.c
deleted file mode 100644
index 5f33312ad9b..00000000000
--- a/libglusterfs/src/gfdb/gfdb_data_store_helper.c
+++ /dev/null
@@ -1,588 +0,0 @@
-
-#include "gfdb_data_store_helper.h"
-#include "glusterfs/syscall.h"
-
-/******************************************************************************
- *
- * Query record related functions
- *
- * ****************************************************************************/
-
-/*Create a single link info structure*/
-gfdb_link_info_t *
-gfdb_link_info_new()
-{
- gfdb_link_info_t *link_info = NULL;
-
- link_info = GF_CALLOC(1, sizeof(gfdb_link_info_t), gf_mt_gfdb_link_info_t);
- if (!link_info) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "Memory allocation failed for "
- "link_info ");
- goto out;
- }
-
- INIT_LIST_HEAD(&link_info->list);
-
-out:
-
- return link_info;
-}
-
-/*Destroy a link info structure*/
-void
-gfdb_link_info_free(gfdb_link_info_t *link_info)
-{
- GF_FREE(link_info);
-}
-
-/*Function to create the query_record*/
-gfdb_query_record_t *
-gfdb_query_record_new()
-{
- int ret = -1;
- gfdb_query_record_t *query_record = NULL;
-
- query_record = GF_CALLOC(1, sizeof(gfdb_query_record_t),
- gf_mt_gfdb_query_record_t);
- if (!query_record) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "Memory allocation failed for "
- "query_record ");
- goto out;
- }
-
- INIT_LIST_HEAD(&query_record->link_list);
-
- ret = 0;
-out:
- if (ret == -1) {
- GF_FREE(query_record);
- }
- return query_record;
-}
-
-/*Function to delete a single linkinfo from list*/
-static void
-gfdb_delete_linkinfo_from_list(gfdb_link_info_t **link_info)
-{
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, link_info, out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, *link_info, out);
-
- /*Remove hard link from list*/
- list_del(&(*link_info)->list);
- gfdb_link_info_free(*link_info);
- link_info = NULL;
-out:
- return;
-}
-
-/*Function to destroy link_info list*/
-void
-gfdb_free_link_info_list(gfdb_query_record_t *query_record)
-{
- gfdb_link_info_t *link_info = NULL;
- gfdb_link_info_t *temp = NULL;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
-
- list_for_each_entry_safe(link_info, temp, &query_record->link_list, list)
- {
- gfdb_delete_linkinfo_from_list(&link_info);
- link_info = NULL;
- }
-
-out:
- return;
-}
-
-/* Function to add linkinfo to the query record */
-int
-gfdb_add_link_to_query_record(gfdb_query_record_t *query_record, uuid_t pgfid,
- char *base_name)
-{
- int ret = -1;
- gfdb_link_info_t *link_info = NULL;
- int base_name_len = 0;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, pgfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, base_name, out);
-
- link_info = gfdb_link_info_new();
- if (!link_info) {
- goto out;
- }
-
- gf_uuid_copy(link_info->pargfid, pgfid);
- base_name_len = strlen(base_name);
- memcpy(link_info->file_name, base_name, base_name_len);
- link_info->file_name[base_name_len] = '\0';
-
- list_add_tail(&link_info->list, &query_record->link_list);
-
- query_record->link_count++;
-
- ret = 0;
-out:
- if (ret) {
- gfdb_link_info_free(link_info);
- link_info = NULL;
- }
- return ret;
-}
-
-/*Function to destroy query record*/
-void
-gfdb_query_record_free(gfdb_query_record_t *query_record)
-{
- if (query_record) {
- gfdb_free_link_info_list(query_record);
- GF_FREE(query_record);
- }
-}
-
-/******************************************************************************
- SERIALIZATION/DE-SERIALIZATION OF QUERY RECORD
-*******************************************************************************/
-/******************************************************************************
- The on disk format of query record is as follows,
-
-+---------------------------------------------------------------------------+
-| Length of serialized query record | Serialized Query Record |
-+---------------------------------------------------------------------------+
- 4 bytes Length of serialized query record
- |
- |
- -------------------------------------------------|
- |
- |
- V
- Serialized Query Record Format:
- +---------------------------------------------------------------------------+
- | GFID | Link count | <LINK INFO> |..... | FOOTER |
- +---------------------------------------------------------------------------+
- 16 B 4 B Link Length 4 B
- | |
- | |
- -----------------------------| |
- | |
- | |
- V |
- Each <Link Info> will be serialized as |
- +-----------------------------------------------+ |
- | PGID | BASE_NAME_LENGTH | BASE_NAME | |
- +-----------------------------------------------+ |
- 16 B 4 B BASE_NAME_LENGTH |
- |
- |
- ------------------------------------------------------------------------|
- |
- |
- V
- FOOTER is a magic number 0xBAADF00D indicating the end of the record.
- This also serves as a serialized schema validator.
- * ****************************************************************************/
-
-#define GFDB_QUERY_RECORD_FOOTER 0xBAADF00D
-#define UUID_LEN 16
-
-/*Function to get the potential length of the serialized buffer*/
-static int32_t
-gfdb_query_record_serialized_length(gfdb_query_record_t *query_record)
-{
- int32_t len = -1;
- gfdb_link_info_t *link_info = NULL;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
-
- /* Length of GFID */
- len = UUID_LEN;
-
- /* length of number of links*/
- len += sizeof(int32_t);
-
- list_for_each_entry(link_info, &query_record->link_list, list)
- {
- /* length of PFID */
- len += UUID_LEN;
-
- /* Add size of base name length*/
- len += sizeof(int32_t);
-
- /* Length of base_name */
- len += strlen(link_info->file_name);
- }
-
- /* length of footer */
- len += sizeof(int32_t);
-out:
- return len;
-}
-
-/* Function for serializing query record.
- *
- * Query Record Serialization Format
- * +---------------------------------------------------------------------------+
- * | GFID | Link count | <LINK INFO> |..... | FOOTER |
- * +---------------------------------------------------------------------------+
- * 16 B 4 B Link Length 4 B
- *
- *
- * Each <Link Info> will be serialized as
- * +-----------------------------------------------+
- * | PGID | BASE_NAME_LENGTH | BASE_NAME |
- * +-----------------------------------------------+
- * 16 B 4 B BASE_NAME_LENGTH
- *
- *
- * FOOTER is a magic number 0xBAADF00D indicating the end of the record.
- * This also serves as a serialized schema validator.
- *
- * The function will allocate memory to the serialized buffer,
- * the caller needs to free it.
- * Returns the length of the serialized buffer on success
- * or -1 on failure.
- *
- * */
-static int
-gfdb_query_record_serialize(gfdb_query_record_t *query_record, char **in_buffer)
-{
- gfdb_link_info_t *link_info = NULL;
- int count = -1;
- int base_name_len = 0;
- int buffer_length = 0;
- int footer = GFDB_QUERY_RECORD_FOOTER;
- char *buffer = NULL;
- char *ret_buffer = NULL;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, (query_record->link_count > 0), out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, in_buffer, out);
-
- /* Calculate the total length of the serialized buffer */
- buffer_length = gfdb_query_record_serialized_length(query_record);
- if (buffer_length <= 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to calculate the length of "
- "serialized buffer");
- goto out;
- }
-
- /* Allocate memory to the serialized buffer */
- ret_buffer = GF_CALLOC(1, buffer_length, gf_common_mt_char);
- if (!ret_buffer) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Memory allocation failed for "
- "serialized buffer.");
- goto out;
- }
-
- buffer = ret_buffer;
-
- count = 0;
-
- /* Copying the GFID */
- memcpy(buffer, query_record->gfid, UUID_LEN);
- buffer += UUID_LEN;
- count += UUID_LEN;
-
- /* Copying the number of links */
- memcpy(buffer, &query_record->link_count, sizeof(int32_t));
- buffer += sizeof(int32_t);
- count += sizeof(int32_t);
-
- list_for_each_entry(link_info, &query_record->link_list, list)
- {
- /* Copying the PFID */
- memcpy(buffer, link_info->pargfid, UUID_LEN);
- buffer += UUID_LEN;
- count += UUID_LEN;
-
- /* Copying base name length*/
- base_name_len = strlen(link_info->file_name);
- memcpy(buffer, &base_name_len, sizeof(int32_t));
- buffer += sizeof(int32_t);
- count += sizeof(int32_t);
-
- /* Length of base_name */
- memcpy(buffer, link_info->file_name, base_name_len);
- buffer += base_name_len;
- count += base_name_len;
- }
-
- /* Copying the Footer of the record */
- memcpy(buffer, &footer, sizeof(int32_t));
- count += sizeof(int32_t);
-
-out:
- if (count < 0) {
- GF_FREE(ret_buffer);
- ret_buffer = NULL;
- }
- *in_buffer = ret_buffer;
- return count;
-}
-
-static gf_boolean_t
-is_serialized_buffer_valid(char *in_buffer, int buffer_length)
-{
- gf_boolean_t ret = _gf_false;
- int footer = 0;
-
- /* Read the footer */
- in_buffer += (buffer_length - sizeof(int32_t));
- memcpy(&footer, in_buffer, sizeof(int32_t));
-
- /*
- * if the footer is not GFDB_QUERY_RECORD_FOOTER
- * then the serialized record is invalid
- *
- * */
- if (footer != GFDB_QUERY_RECORD_FOOTER) {
- goto out;
- }
-
- ret = _gf_true;
-out:
- return ret;
-}
-
-static int
-gfdb_query_record_deserialize(char *in_buffer, int buffer_length,
- gfdb_query_record_t **query_record)
-{
- int ret = -1;
- char *buffer = NULL;
- int i = 0;
- gfdb_link_info_t *link_info = NULL;
- int count = 0;
- int base_name_len = 0;
- gfdb_query_record_t *ret_qrecord = NULL;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, in_buffer, out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, (buffer_length > 0), out);
-
- if (!is_serialized_buffer_valid(in_buffer, buffer_length)) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Invalid serialized query record");
- goto out;
- }
-
- buffer = in_buffer;
-
- ret_qrecord = gfdb_query_record_new();
- if (!ret_qrecord) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to allocate space to "
- "gfdb_query_record_t");
- goto out;
- }
-
- /* READ GFID */
- memcpy((ret_qrecord)->gfid, buffer, UUID_LEN);
- buffer += UUID_LEN;
- count += UUID_LEN;
-
- /* Read the number of link */
- memcpy(&(ret_qrecord->link_count), buffer, sizeof(int32_t));
- buffer += sizeof(int32_t);
- count += sizeof(int32_t);
-
- /* Read all the links */
- for (i = 0; i < ret_qrecord->link_count; i++) {
- if (count >= buffer_length) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Invalid serialized "
- "query record");
- ret = -1;
- goto out;
- }
-
- link_info = gfdb_link_info_new();
- if (!link_info) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to create link_info");
- goto out;
- }
-
- /* READ PGFID */
- memcpy(link_info->pargfid, buffer, UUID_LEN);
- buffer += UUID_LEN;
- count += UUID_LEN;
-
- /* Read base name length */
- memcpy(&base_name_len, buffer, sizeof(int32_t));
- buffer += sizeof(int32_t);
- count += sizeof(int32_t);
-
- /* READ basename */
- memcpy(link_info->file_name, buffer, base_name_len);
- buffer += base_name_len;
- count += base_name_len;
- link_info->file_name[base_name_len] = '\0';
-
- /* Add link_info to the list */
- list_add_tail(&link_info->list, &(ret_qrecord->link_list));
-
- /* Resetting link_info */
- link_info = NULL;
- }
-
- ret = 0;
-out:
- if (ret) {
- gfdb_query_record_free(ret_qrecord);
- ret_qrecord = NULL;
- }
- *query_record = ret_qrecord;
- return ret;
-}
-
-/* Function to write query record to file
- *
- * Disk format
- * +---------------------------------------------------------------------------+
- * | Length of serialized query record | Serialized Query Record |
- * +---------------------------------------------------------------------------+
- * 4 bytes Length of serialized query record
- *
- * Please refer gfdb_query_record_serialize () for format of
- * Serialized Query Record
- *
- * */
-int
-gfdb_write_query_record(int fd, gfdb_query_record_t *query_record)
-{
- int ret = -1;
- int buffer_len = 0;
- char *buffer = NULL;
- int write_len = 0;
- char *write_buffer = NULL;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, (fd >= 0), out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
-
- buffer_len = gfdb_query_record_serialize(query_record, &buffer);
- if (buffer_len < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to serialize query record");
- goto out;
- }
-
- /* Serialize the buffer length and write to file */
- ret = write(fd, &buffer_len, sizeof(int32_t));
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to write buffer length"
- " to file");
- goto out;
- }
-
- /* Write the serialized query record to file */
- write_len = buffer_len;
- write_buffer = buffer;
- while ((ret = write(fd, write_buffer, write_len)) < write_len) {
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, errno, LG_MSG_DB_ERROR,
- "Failed to write serialized "
- "query record to file");
- goto out;
- }
-
- write_buffer += ret;
- write_len -= ret;
- }
-
- ret = 0;
-out:
- GF_FREE(buffer);
- return ret;
-}
-
-/* Function to read query record from file.
- * Allocates memory to query record and
- * returns length of serialized query record when successful
- * Return -1 when failed.
- * Return 0 when reached EOF.
- * */
-int
-gfdb_read_query_record(int fd, gfdb_query_record_t **query_record)
-{
- int ret = -1;
- int buffer_len = 0;
- int read_len = 0;
- char *buffer = NULL;
- char *read_buffer = NULL;
-
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, (fd >= 0), out);
- GF_VALIDATE_OR_GOTO(GFDB_DATA_STORE, query_record, out);
-
- /* Read serialized query record length from the file*/
- ret = sys_read(fd, &buffer_len, sizeof(int32_t));
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed reading buffer length"
- " from file");
- goto out;
- }
- /* EOF */
- else if (ret == 0) {
- ret = 0;
- goto out;
- }
-
- /* Assumed sane range is 1B - 10MB */
- if ((buffer_len <= 0) || (buffer_len > (10 * 1024 * 1024))) {
- ret = -1;
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "buffer length range is out of bound %d", buffer_len);
- goto out;
- }
-
- /* Allocating memory to the serialization buffer */
- buffer = GF_CALLOC(1, buffer_len, gf_common_mt_char);
- if (!buffer) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to allocate space to "
- "serialized buffer");
- goto out;
- }
-
- /* Read the serialized query record from file */
- read_len = buffer_len;
- read_buffer = buffer;
- while ((ret = sys_read(fd, read_buffer, read_len)) < read_len) {
- /*Any error */
- if (ret < 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, errno, LG_MSG_DB_ERROR,
- "Failed to read serialized "
- "query record from file");
- goto out;
- }
- /* EOF */
- else if (ret == 0) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Invalid query record or "
- "corrupted query file");
- ret = -1;
- goto out;
- }
-
- read_buffer += ret;
- read_len -= ret;
- }
-
- ret = gfdb_query_record_deserialize(buffer, buffer_len, query_record);
- if (ret) {
- gf_msg(GFDB_DATA_STORE, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "Failed to de-serialize query record");
- goto out;
- }
-
- ret = buffer_len;
-out:
- GF_FREE(buffer);
- return ret;
-}
diff --git a/libglusterfs/src/gfdb/gfdb_data_store_helper.h b/libglusterfs/src/gfdb/gfdb_data_store_helper.h
deleted file mode 100644
index 3c4499bdd3b..00000000000
--- a/libglusterfs/src/gfdb/gfdb_data_store_helper.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-#ifndef __GFDB_DATA_STORE_HELPER_H
-#define __GFDB_DATA_STORE_HELPER_H
-
-#include <time.h>
-#include <sys/time.h>
-#include <string.h>
-#include <fcntl.h>
-
-#include "glusterfs/common-utils.h"
-#include "glusterfs/compat-uuid.h"
-#include "gfdb_mem-types.h"
-#include "glusterfs/dict.h"
-#include "glusterfs/byte-order.h"
-#include "glusterfs/libglusterfs-messages.h"
-
-#define GFDB_DATA_STORE "gfdbdatastore"
-
-/*******************************************************************************
- *
- * Query related data structure and functions
- *
- * ****************************************************************************/
-
-#ifdef NAME_MAX
-#define GF_NAME_MAX NAME_MAX
-#else
-#define GF_NAME_MAX 255
-#endif
-
-/*Structure to hold the link information*/
-typedef struct gfdb_link_info {
- uuid_t pargfid;
- char file_name[GF_NAME_MAX];
- struct list_head list;
-} gfdb_link_info_t;
-
-/*Structure used for querying purpose*/
-typedef struct gfdb_query_record {
- uuid_t gfid;
- /*This is the hardlink list*/
- struct list_head link_list;
- int link_count;
-} gfdb_query_record_t;
-
-/*Create a single link info structure*/
-gfdb_link_info_t *
-gfdb_link_info_new();
-typedef gfdb_link_info_t *(*gfdb_link_info_new_t)();
-
-/*Destroy a link info structure*/
-void
-gfdb_link_info_free(gfdb_link_info_t *gfdb_link_info);
-typedef void (*gfdb_link_info_free_t)(gfdb_link_info_t *gfdb_link_info);
-
-/* Function to create the query_record */
-gfdb_query_record_t *
-gfdb_query_record_new();
-typedef gfdb_query_record_t *(*gfdb_query_record_new_t)();
-
-/* Function to add linkinfo to query record */
-int
-gfdb_add_link_to_query_record(gfdb_query_record_t *gfdb_query_record,
- uuid_t pgfid, char *base_name);
-typedef int (*gfdb_add_link_to_query_record_t)(gfdb_query_record_t *, uuid_t,
- char *);
-
-/*Function to destroy query record*/
-void
-gfdb_query_record_free(gfdb_query_record_t *gfdb_query_record);
-typedef void (*gfdb_query_record_free_t)(gfdb_query_record_t *);
-
-/* Function to write query record to file */
-int
-gfdb_write_query_record(int fd, gfdb_query_record_t *gfdb_query_record);
-typedef int (*gfdb_write_query_record_t)(int, gfdb_query_record_t *);
-
-/* Function to read query record from file.
- * Allocates memory to query record and return 0 when successful
- * Return -1 when failed.
- * Return 0 when EOF.
- * */
-int
-gfdb_read_query_record(int fd, gfdb_query_record_t **gfdb_query_record);
-typedef int (*gfdb_read_query_record_t)(int, gfdb_query_record_t **);
-
-#endif \ No newline at end of file
diff --git a/libglusterfs/src/gfdb/gfdb_data_store_types.h b/libglusterfs/src/gfdb/gfdb_data_store_types.h
deleted file mode 100644
index 5ee050d4fab..00000000000
--- a/libglusterfs/src/gfdb/gfdb_data_store_types.h
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-#ifndef __GFDB_DATA_STORE_TYPE_H
-#define __GFDB_DATA_STORE_TYPE_H
-
-#include "gfdb_data_store_helper.h"
-
-/*
- * Helps in dynamically choosing log level
- * */
-static inline gf_loglevel_t
-_gfdb_log_level(gf_loglevel_t given_level, gf_boolean_t ignore_level)
-{
- return (ignore_level) ? GF_LOG_DEBUG : given_level;
-}
-
-typedef enum gf_db_operation {
- GFDB_INVALID_DB_OP = -1,
- /* Query DB OPS : All the Query DB_OP should be added */
- /* in between START and END */
- GFDB_QUERY_DB_OP_START, /* Start of Query DB_OP */
- GFDB_QUERY_DB_OP,
- GF_FTABLE_EXISTS_DB_OP,
- GFDB_QUERY_DB_OP_END, /* End of Query DB_OP */
- /* Non-Query DB OPS */
- GFDB_DB_CREATE_DB_OP,
- GFDB_GFID_EXIST_DB_OP,
- GFDB_W_INSERT_DB_OP,
- GFDB_WU_INSERT_DB_OP,
- GFDB_W_UPDATE_DB_OP,
- GFDB_WU_UPDATE_DB_OP,
- GFDB_W_DELETE_DB_OP,
- GFDB_UW_DELETE_DB_OP,
- GFDB_WFC_UPDATE_DB_OP,
- GFDB_RFC_UPDATE_DB_OP,
- GFDB_DB_COMPACT_DB_OP /* Added for VACUUM/manual compaction support */
-} gf_db_operation_t;
-
-#define GF_COL_MAX_NUM 2
-#define GF_COL_ALL " * "
-
-/* Column/fields names used in the DB.
- * If any new field is added should be updated here*/
-#define GF_COL_GF_ID "GF_ID"
-#define GF_COL_GF_PID "GF_PID"
-#define GF_COL_FILE_NAME "FNAME"
-#define GF_COL_WSEC "W_SEC"
-#define GF_COL_WMSEC "W_MSEC"
-#define GF_COL_UWSEC "UW_SEC"
-#define GF_COL_UWMSEC "UW_MSEC"
-#define GF_COL_WSEC_READ "W_READ_SEC"
-#define GF_COL_WMSEC_READ "W_READ_MSEC"
-#define GF_COL_UWSEC_READ "UW_READ_SEC"
-#define GF_COL_UWMSEC_READ "UW_READ_MSEC"
-#define GF_COL_WDEL_FLAG "W_DEL_FLAG"
-#define GF_COL_WRITE_FREQ_CNTR "WRITE_FREQ_CNTR"
-#define GF_COL_READ_FREQ_CNTR "READ_FREQ_CNTR"
-#define GF_COL_LINK_UPDATE "LINK_UPDATE"
-
-/***********************Time related********************************/
-/*1 sec = 1000000 microsec*/
-#define GFDB_MICROSEC 1000000
-
-/*All the gfdb times are represented using this structure*/
-typedef struct timeval gfdb_time_t;
-
-/*Convert time into seconds*/
-static inline uint64_t
-gfdb_time_2_usec(gfdb_time_t *gfdb_time)
-{
- GF_ASSERT(gfdb_time);
- return ((uint64_t)gfdb_time->tv_sec * GFDB_MICROSEC) + gfdb_time->tv_usec;
-}
-
-/******************************************************************************
- *
- * Insert/Update Record related data structures/functions
- *
- * ****************************************************************************/
-
-/*Indicated a generic synchronous write to the db
- * This may or may not be implemented*/
-typedef enum gfdb_sync_type {
- GFDB_INVALID_SYNC = -1,
- GFDB_DB_ASYNC,
- GFDB_DB_SYNC
-} gfdb_sync_type_t;
-
-/*Strings related to the abvove sync type*/
-#define GFDB_STR_DB_ASYNC "async"
-#define GFDB_STR_DB_SYNC "sync"
-
-/*To convert sync type from string to gfdb_sync_type_t*/
-static inline int
-gf_string2gfdbdbsync(char *sync_option)
-{
- int ret = -1;
-
- if (!sync_option)
- goto out;
- if (strcmp(sync_option, GFDB_STR_DB_ASYNC) == 0) {
- ret = GFDB_DB_ASYNC;
- } else if (strcmp(sync_option, GFDB_STR_DB_SYNC) == 0) {
- ret = GFDB_DB_SYNC;
- }
-out:
- return ret;
-}
-
-/*Indicated different types of db*/
-typedef enum gfdb_db_type {
- GFDB_INVALID_DB = -1,
- GFDB_HASH_FILE_STORE,
- GFDB_ROCKS_DB,
- GFDB_SQLITE3,
- GFDB_HYPERDEX,
- GFDB_DB_END /*Add DB type Entries above this only*/
-} gfdb_db_type_t;
-
-/*String related to the db types*/
-#define GFDB_STR_HASH_FILE_STORE "hashfile"
-#define GFDB_STR_ROCKS_DB "rocksdb"
-#define GFDB_STR_SQLITE3 "sqlite3"
-#define GFDB_STR_HYPERDEX "hyperdex"
-
-/*Convert db type in string to gfdb_db_type_t*/
-static inline int
-gf_string2gfdbdbtype(char *db_option)
-{
- int ret = -1;
-
- if (!db_option)
- goto out;
- if (strcmp(db_option, GFDB_STR_HASH_FILE_STORE) == 0) {
- ret = GFDB_HASH_FILE_STORE;
- } else if (strcmp(db_option, GFDB_STR_ROCKS_DB) == 0) {
- ret = GFDB_ROCKS_DB;
- } else if (strcmp(db_option, GFDB_STR_SQLITE3) == 0) {
- ret = GFDB_SQLITE3;
- } else if (strcmp(db_option, GFDB_STR_HYPERDEX) == 0) {
- ret = GFDB_HYPERDEX;
- }
-out:
- return ret;
-}
-
-/*Tells the path of the fop*/
-typedef enum gfdb_fop_path {
- GFDB_FOP_INVALID = -1,
- /*Filler value for zero*/
- GFDB_FOP_PATH_ZERO = 0,
- /*have wind path below this*/
- GFDB_FOP_WIND = 1,
- GFDB_FOP_WDEL = 2,
- /*have unwind path below this*/
- GFDB_FOP_UNWIND = 4,
- /*Delete unwind path*/
- GFDB_FOP_UNDEL = 8,
- GFDB_FOP_UNDEL_ALL = 16
-} gfdb_fop_path_t;
-/*Strings related to the above fop path*/
-#define GFDB_STR_FOP_INVALID "INVALID"
-#define GFDB_STR_FOP_WIND "ENTRY"
-#define GFDB_STR_FOP_UNWIND "EXIT"
-#define GFDB_STR_FOP_WDEL "WDEL"
-#define GFDB_STR_FOP_UNDEL "UNDEL"
-
-static inline gf_boolean_t
-iswindpath(gfdb_fop_path_t gfdb_fop_path)
-{
- return ((gfdb_fop_path == GFDB_FOP_WIND) ||
- (gfdb_fop_path == GFDB_FOP_WDEL))
- ? _gf_true
- : _gf_false;
-}
-
-static inline gf_boolean_t
-isunwindpath(gfdb_fop_path_t gfdb_fop_path)
-{
- return (gfdb_fop_path >= GFDB_FOP_UNWIND) ? _gf_true : _gf_false;
-}
-
-/*Tell what type of fop it was
- * Like whether a dentry fop or a inode fop
- * Read fop or a write fop etc*/
-typedef enum gfdb_fop_type {
- GFDB_FOP_INVALID_OP = -1,
- /*Filler value for zero*/
- GFDB_FOP_TYPE_ZERO = 0,
- GFDB_FOP_DENTRY_OP = 1,
- GFDB_FOP_DENTRY_CREATE_OP = 2,
- GFDB_FOP_INODE_OP = 4,
- GFDB_FOP_WRITE_OP = 8,
- GFDB_FOP_READ_OP = 16
-} gfdb_fop_type_t;
-
-#define GFDB_FOP_INODE_WRITE (GFDB_FOP_INODE_OP | GFDB_FOP_WRITE_OP)
-
-#define GFDB_FOP_DENTRY_WRITE (GFDB_FOP_DENTRY_OP | GFDB_FOP_WRITE_OP)
-
-#define GFDB_FOP_CREATE_WRITE (GFDB_FOP_DENTRY_CREATE_OP | GFDB_FOP_WRITE_OP)
-
-#define GFDB_FOP_INODE_READ (GFDB_FOP_INODE_OP | GFDB_FOP_READ_OP)
-
-static inline gf_boolean_t
-isreadfop(gfdb_fop_type_t fop_type)
-{
- return (fop_type & GFDB_FOP_READ_OP) ? _gf_true : _gf_false;
-}
-
-static inline gf_boolean_t
-isdentryfop(gfdb_fop_type_t fop_type)
-{
- return ((fop_type & GFDB_FOP_DENTRY_OP) ||
- (fop_type & GFDB_FOP_DENTRY_CREATE_OP))
- ? _gf_true
- : _gf_false;
-}
-
-static inline gf_boolean_t
-isdentrycreatefop(gfdb_fop_type_t fop_type)
-{
- return (fop_type & GFDB_FOP_DENTRY_CREATE_OP) ? _gf_true : _gf_false;
-}
-
-/*The structure that is used to send insert/update the databases
- * using insert_db api*/
-typedef struct gfdb_db_record {
- /* GFID */
- uuid_t gfid;
- /* Used during a rename refer ctr_rename() in changetimerecorder
- * xlator*/
- uuid_t old_gfid;
- /* Parent GFID */
- uuid_t pargfid;
- uuid_t old_pargfid;
- /* File names */
- char file_name[GF_NAME_MAX + 1];
- char old_file_name[GF_NAME_MAX + 1];
- /* FOP type and FOP path*/
- gfdb_fop_type_t gfdb_fop_type;
- gfdb_fop_path_t gfdb_fop_path;
- /*Time of change or access*/
- gfdb_time_t gfdb_wind_change_time;
- gfdb_time_t gfdb_unwind_change_time;
- /* For crash consistency while inserting/updating hard links */
- gf_boolean_t islinkupdate;
- /* For link consistency we do a double update i.e mark the link
- * during the wind and during the unwind we update/delete the link.
- * This has a performance hit. We give a choice here whether we need
- * link consistency to be spoton or not using link_consistency flag.
- * This will have only one link update */
- gf_boolean_t link_consistency;
- /* For dentry fops we can choose to ignore recording of unwind time */
- /* For inode fops "record_exit" volume option does the trick, */
- /* but for dentry fops we update the LINK_UPDATE, so an extra */
- /* flag is provided to ignore the recording of the unwind time. */
- gf_boolean_t do_record_uwind_time;
- /* Global flag to record or not record counters */
- gf_boolean_t do_record_counters;
- /* Global flag to Record/Not Record wind or wind time.
- * This flag will overrule do_record_uwind_time*/
- gf_boolean_t do_record_times;
- /* Ignoring errors while inserting.
- * */
- gf_boolean_t ignore_errors;
-} gfdb_db_record_t;
-
-/*******************************************************************************
- *
- * Signatures for the plugin functions
- * i.e Any plugin should implementment
- * these functions to integrate with
- * libgfdb.
- *
- * ****************************************************************************/
-
-/*Call back function for querying the database*/
-typedef int (*gf_query_callback_t)(gfdb_query_record_t *, void *);
-
-/* Used to initialize db connection
- * Arguments:
- * args : Dictionary containing database specific parameters
- * db_conn : pointer to plugin specific data base connection
- * that will be created. If the call is successful
- * db_conn will contain the plugin specific connection
- * If call is unsuccessful will have NULL.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_init_db_t)(dict_t *args, void **db_conn);
-
-/* Used to terminate/de-initialize db connection
- * (Destructor function for db connection object)
- * Arguments:
- * db_conn : plugin specific data base connection
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_fini_db_t)(void **db_conn);
-
-/*Used to insert/updated records in the database
- * Arguments:
- * db_conn : plugin specific data base connection
- * gfdb_db_record : Record to be inserted/updated
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_insert_record_t)(void *db_conn, gfdb_db_record_t *db_record);
-
-/*Used to delete record from the database
- * Arguments:
- * db_conn : plugin specific data base connection
- * gfdb_db_record : Record to be deleted
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_delete_record_t)(void *db_conn, gfdb_db_record_t *db_record);
-
-/*Used to compact the database
- * Arguments:
- * db_conn : GFDB Connection node
- * compact_active : Is compaction currently on?
- * compact_mode_switched : Was the compaction switch flipped?
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_compact_db_t)(void *db_conn, gf_boolean_t compact_active,
- gf_boolean_t compact_mode_switched);
-
-/* Query all the records from the database
- * Arguments:
- * db_conn : plugin specific data base connection
- * query_callback : Call back function that will be called
- * for every record found
- * _query_cbk_args : Custom argument passed for the call back
- * function query_callback
- * query_limit : 0 - list all files
- * positive value - add the LIMIT clause to
- * the SQL query to limit the number of records
- * returned
- *
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_find_all_t)(void *db_conn,
- gf_query_callback_t query_callback,
- void *_cbk_args, int query_limit);
-
-/* Query records/files that have not changed/accessed
- * from a time in past to current time
- * Arguments:
- * db_conn : plugin specific data base connection
- * query_callback : Call back function that will be called
- * for every record found
- * _cbk_args : Custom argument passed for the call back
- * function query_callback
- * for_time : Time from where the file/s are not
- * changed/accessed
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_find_unchanged_for_time_t)(
- void *db_conn, gf_query_callback_t query_callback, void *_cbk_args,
- gfdb_time_t *_time);
-
-/* Query records/files that have changed/accessed from a
- * time in past to current time
- * Arguments:
- * db_conn : plugin specific data base connection
- * query_callback : Call back function that will be called
- * for every record found
- * _cbk_args : Custom argument passed for the call back
- * function query_callback
- * _time : Time from where the file/s are
- * changed/accessed
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_find_recently_changed_files_t)(
- void *db_conn, gf_query_callback_t query_callback, void *_cbk_args,
- gfdb_time_t *_time);
-
-/* Query records/files that have not changed/accessed
- * from a time in past to current time, with
- * a desired frequency
- *
- * Arguments:
- * db_conn : plugin specific data base connection
- * query_callback : Call back function that will be called
- * for every record found
- * _cbk_args : Custom argument passed for the call back
- * function query_callback
- * _time : Time from where the file/s are not
- * changed/accessed
- * _write_freq : Desired Write Frequency lower limit
- * _read_freq : Desired Read Frequency lower limit
- * _clear_counters : If true, Clears all the frequency counters of
- * all files.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_find_unchanged_for_time_freq_t)(
- void *db_conn, gf_query_callback_t query_callback, void *_cbk_args,
- gfdb_time_t *_time, int _write_freq, int _read_freq,
- gf_boolean_t _clear_counters);
-
-/* Query records/files that have changed/accessed from a
- * time in past to current time, with a desired frequency
- * Arguments:
- * db_conn : plugin specific data base connection
- * query_callback : Call back function that will be called
- * for every record found
- * _cbk_args : Custom argument passed for the call back
- * function query_callback
- * _time : Time from where the file/s are
- * changed/accessed
- * _write_freq : Desired Write Frequency lower limit
- * _read_freq : Desired Read Frequency lower limit
- * _clear_counters : If true, Clears all the frequency counters of
- * all files.
- * Returns : if successful return 0 or
- * -ve value in case of failure*/
-typedef int (*gfdb_find_recently_changed_files_freq_t)(
- void *db_conn, gf_query_callback_t query_callback, void *_cbk_args,
- gfdb_time_t *_time, int _write_freq, int _read_freq,
- gf_boolean_t _clear_counters);
-
-typedef int (*gfdb_clear_files_heat_t)(void *db_conn);
-
-typedef int (*gfdb_get_db_version_t)(void *db_conn, char **version);
-
-typedef int (*gfdb_get_db_params_t)(void *db_conn, char *param_key,
- char **param_value);
-
-typedef int (*gfdb_set_db_params_t)(void *db_conn, char *param_key,
- char *param_value);
-
-/*Data structure holding all the above plugin function pointers*/
-typedef struct gfdb_db_operations {
- gfdb_init_db_t init_db_op;
- gfdb_fini_db_t fini_db_op;
- gfdb_insert_record_t insert_record_op;
- gfdb_delete_record_t delete_record_op;
- gfdb_compact_db_t compact_db_op;
- gfdb_find_all_t find_all_op;
- gfdb_find_unchanged_for_time_t find_unchanged_for_time_op;
- gfdb_find_recently_changed_files_t find_recently_changed_files_op;
- gfdb_find_unchanged_for_time_freq_t find_unchanged_for_time_freq_op;
- gfdb_find_recently_changed_files_freq_t find_recently_changed_files_freq_op;
- gfdb_clear_files_heat_t clear_files_heat_op;
- gfdb_get_db_version_t get_db_version;
- gfdb_get_db_params_t get_db_params;
- gfdb_set_db_params_t set_db_params;
-} gfdb_db_operations_t;
-
-/*******************************************************************************
- *
- * Database connection object: This objected is maitained by libgfdb for each
- * database connection created.
- * gf_db_connection : DB connection specific to the plugin
- * gfdb_db_operations : Contains all the libgfdb API implementation
- * from the plugin.
- * gfdb_db_type : Type of database
- *
- * ****************************************************************************/
-
-typedef struct gfdb_connection {
- void *gf_db_connection;
- gfdb_db_operations_t gfdb_db_operations;
- gfdb_db_type_t gfdb_db_type;
-} gfdb_connection_t;
-
-/*******************************************************************************
- *
- * Macros for get and set db options
- *
- * ****************************************************************************/
-
-/*Set param_key : str_value into param_dict*/
-#define SET_DB_PARAM_TO_DICT(comp_name, params_dict, param_key, str_value, \
- ret, error) \
- do { \
- data_t *data = NULL; \
- data = str_to_data(str_value); \
- if (!data) \
- goto error; \
- ret = dict_add(params_dict, param_key, data); \
- if (ret) { \
- gf_msg(comp_name, GF_LOG_ERROR, 0, LG_MSG_SET_PARAM_FAILED, \
- "Failed setting %s " \
- "to params dictionary", \
- param_key); \
- data_destroy(data); \
- goto error; \
- }; \
- } while (0)
-
-/*get str_value of param_key from param_dict*/
-#define GET_DB_PARAM_FROM_DICT(comp_name, params_dict, param_key, str_value, \
- error) \
- do { \
- data_t *data = NULL; \
- data = dict_get(params_dict, param_key); \
- if (!data) { \
- gf_msg(comp_name, GF_LOG_ERROR, 0, LG_MSG_GET_PARAM_FAILED, \
- "Failed to retrieve " \
- "%s from params", \
- param_key); \
- goto error; \
- } else { \
- str_value = data->data; \
- }; \
- } while (0)
-
-/*get str_value of param_key from param_dict. if param_key is not present
- * set _default_v to str_value */
-#define GET_DB_PARAM_FROM_DICT_DEFAULT(comp_name, params_dict, param_key, \
- str_value, _default_v) \
- do { \
- data_t *data = NULL; \
- data = dict_get(params_dict, param_key); \
- if (!data) { \
- str_value = _default_v; \
- gf_msg(comp_name, GF_LOG_TRACE, 0, LG_MSG_GET_PARAM_FAILED, \
- "Failed to retrieve " \
- "%s from params.Assigning default value: %s", \
- param_key, _default_v); \
- } else { \
- str_value = data->data; \
- }; \
- } while (0)
-
-#endif
diff --git a/libglusterfs/src/gfdb/gfdb_mem-types.h b/libglusterfs/src/gfdb/gfdb_mem-types.h
deleted file mode 100644
index b97cdf89446..00000000000
--- a/libglusterfs/src/gfdb/gfdb_mem-types.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- Copyright (c) 2008-2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef __GFDB_MEM_TYPES_H__
-#define __GFDB_MEM_TYPES_H__
-
-#include "glusterfs/mem-types.h"
-
-enum gfdb_mem_types_ { gfdb_mtstart = gf_common_mt_end + 1, gfdb_mt_end };
-#endif
diff --git a/libglusterfs/src/gfdb/gfdb_sqlite3.c b/libglusterfs/src/gfdb/gfdb_sqlite3.c
deleted file mode 100644
index 98232d5e379..00000000000
--- a/libglusterfs/src/gfdb/gfdb_sqlite3.c
+++ /dev/null
@@ -1,1542 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include "gfdb_sqlite3.h"
-#include "gfdb_sqlite3_helper.h"
-#include "glusterfs/libglusterfs-messages.h"
-#include "glusterfs/syscall.h"
-
-/******************************************************************************
- *
- * Util functions
- *
- * ***************************************************************************/
-gf_sql_connection_t *
-gf_sql_connection_init()
-{
- gf_sql_connection_t *gf_sql_conn = NULL;
-
- gf_sql_conn = GF_CALLOC(1, sizeof(gf_sql_connection_t),
- gf_mt_sql_connection_t);
- if (gf_sql_conn == NULL) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "Error allocating memory to "
- "gf_sql_connection_t ");
- }
-
- return gf_sql_conn;
-}
-
-void
-gf_sql_connection_fini(gf_sql_connection_t **sql_connection)
-{
- if (!sql_connection)
- return;
- GF_FREE(*sql_connection);
- *sql_connection = NULL;
-}
-
-const char *
-gf_sql_jm2str(gf_sql_journal_mode_t jm)
-{
- switch (jm) {
- case gf_sql_jm_delete:
- return GF_SQL_JM_DELETE;
- case gf_sql_jm_truncate:
- return GF_SQL_JM_TRUNCATE;
- case gf_sql_jm_persist:
- return GF_SQL_JM_PERSIST;
- case gf_sql_jm_memory:
- return GF_SQL_JM_MEMORY;
- case gf_sql_jm_wal:
- return GF_SQL_JM_WAL;
- case gf_sql_jm_off:
- return GF_SQL_JM_OFF;
- case gf_sql_jm_invalid:
- break;
- }
- return NULL;
-}
-
-gf_sql_journal_mode_t
-gf_sql_str2jm(const char *jm_str)
-{
- if (!jm_str) {
- return gf_sql_jm_invalid;
- } else if (strcmp(jm_str, GF_SQL_JM_DELETE) == 0) {
- return gf_sql_jm_delete;
- } else if (strcmp(jm_str, GF_SQL_JM_TRUNCATE) == 0) {
- return gf_sql_jm_truncate;
- } else if (strcmp(jm_str, GF_SQL_JM_PERSIST) == 0) {
- return gf_sql_jm_persist;
- } else if (strcmp(jm_str, GF_SQL_JM_MEMORY) == 0) {
- return gf_sql_jm_memory;
- } else if (strcmp(jm_str, GF_SQL_JM_WAL) == 0) {
- return gf_sql_jm_wal;
- } else if (strcmp(jm_str, GF_SQL_JM_OFF) == 0) {
- return gf_sql_jm_off;
- }
- return gf_sql_jm_invalid;
-}
-
-const char *
-gf_sql_av_t2str(gf_sql_auto_vacuum_t sql_av)
-{
- switch (sql_av) {
- case gf_sql_av_none:
- return GF_SQL_AV_NONE;
- case gf_sql_av_full:
- return GF_SQL_AV_FULL;
- case gf_sql_av_incr:
- return GF_SQL_AV_INCR;
- case gf_sql_av_invalid:
- break;
- }
- return NULL;
-}
-
-gf_sql_auto_vacuum_t
-gf_sql_str2av_t(const char *av_str)
-{
- if (!av_str) {
- return gf_sql_av_invalid;
- } else if (strcmp(av_str, GF_SQL_AV_NONE) == 0) {
- return gf_sql_av_none;
- } else if (strcmp(av_str, GF_SQL_AV_FULL) == 0) {
- return gf_sql_av_full;
- } else if (strcmp(av_str, GF_SQL_AV_INCR) == 0) {
- return gf_sql_av_incr;
- }
- return gf_sql_av_invalid;
-}
-
-const char *
-gf_sync_t2str(gf_sql_sync_t sql_sync)
-{
- switch (sql_sync) {
- case gf_sql_sync_off:
- return GF_SQL_SYNC_OFF;
- case gf_sql_sync_normal:
- return GF_SQL_SYNC_NORMAL;
- case gf_sql_sync_full:
- return GF_SQL_SYNC_FULL;
- case gf_sql_sync_invalid:
- break;
- }
- return NULL;
-}
-
-gf_sql_sync_t
-gf_sql_str2sync_t(const char *sync_str)
-{
- if (!sync_str) {
- return gf_sql_sync_invalid;
- } else if (strcmp(sync_str, GF_SQL_SYNC_OFF) == 0) {
- return gf_sql_sync_off;
- } else if (strcmp(sync_str, GF_SQL_SYNC_NORMAL) == 0) {
- return gf_sql_sync_normal;
- } else if (strcmp(sync_str, GF_SQL_SYNC_FULL) == 0) {
- return gf_sql_sync_full;
- }
- return gf_sql_sync_invalid;
-}
-
-/*TODO replace GF_CALLOC by mem_pool or iobuff if required for performance */
-static char *
-sql_stmt_init()
-{
- char *sql_stmt = NULL;
-
- sql_stmt = GF_CALLOC(GF_STMT_SIZE_MAX, sizeof(char), gf_common_mt_char);
-
- if (!sql_stmt) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "Error allocating memory to SQL "
- "Statement ");
- goto out;
- }
-out:
- return sql_stmt;
-}
-
-/*TODO replace GF_FREE by mem_pool or iobuff if required for performance */
-static void
-sql_stmt_fini(char **sql_stmt)
-{
- GF_FREE(*sql_stmt);
-}
-
-/******************************************************************************
- * DB Essential functions used by
- * > gf_open_sqlite3_conn ()
- * > gf_close_sqlite3_conn ()
- * ***************************************************************************/
-static sqlite3 *
-gf_open_sqlite3_conn(char *sqlite3_db_path, int flags)
-{
- sqlite3 *sqlite3_db_conn = NULL;
- int ret = -1;
-
- GF_ASSERT(sqlite3_db_path);
-
- /*Creates DB if not created*/
- ret = sqlite3_open_v2(sqlite3_db_path, &sqlite3_db_conn, flags, NULL);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_DB_ERROR,
- "FATAL: Could open %s : %s", sqlite3_db_path,
- sqlite3_errmsg(sqlite3_db_conn));
- }
- return sqlite3_db_conn;
-}
-
-static int
-gf_close_sqlite3_conn(sqlite3 *sqlite3_db_conn)
-{
- int ret = 0;
-
- GF_ASSERT(sqlite3_db_conn);
-
- if (sqlite3_db_conn) {
- ret = sqlite3_close(sqlite3_db_conn);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CONNECTION_ERROR,
- "FATAL: sqlite3 close"
- " connection failed %s",
- sqlite3_errmsg(sqlite3_db_conn));
- ret = -1;
- goto out;
- }
- }
- ret = 0;
-out:
- return ret;
-}
-
-/******************************************************************************
- *
- * Database init / fini / create table
- *
- * ***************************************************************************/
-
-/*Function to fill db operations*/
-void
-gf_sqlite3_fill_db_operations(gfdb_db_operations_t *gfdb_db_ops)
-{
- GF_ASSERT(gfdb_db_ops);
-
- gfdb_db_ops->init_db_op = gf_sqlite3_init;
- gfdb_db_ops->fini_db_op = gf_sqlite3_fini;
-
- gfdb_db_ops->insert_record_op = gf_sqlite3_insert;
- gfdb_db_ops->delete_record_op = gf_sqlite3_delete;
- gfdb_db_ops->compact_db_op = gf_sqlite3_vacuum;
-
- gfdb_db_ops->find_all_op = gf_sqlite3_find_all;
- gfdb_db_ops
- ->find_unchanged_for_time_op = gf_sqlite3_find_unchanged_for_time;
- gfdb_db_ops->find_recently_changed_files_op =
- gf_sqlite3_find_recently_changed_files;
- gfdb_db_ops->find_unchanged_for_time_freq_op =
- gf_sqlite3_find_unchanged_for_time_freq;
- gfdb_db_ops->find_recently_changed_files_freq_op =
- gf_sqlite3_find_recently_changed_files_freq;
-
- gfdb_db_ops->clear_files_heat_op = gf_sqlite3_clear_files_heat;
-
- gfdb_db_ops->get_db_version = gf_sqlite3_version;
-
- gfdb_db_ops->get_db_params = gf_sqlite3_pragma;
-
- gfdb_db_ops->set_db_params = gf_sqlite3_set_pragma;
-}
-
-static int
-create_filetable(sqlite3 *sqlite3_db_conn)
-{
- int ret = -1;
- char *sql_stmt = NULL;
- char *sql_strerror = NULL;
-
- GF_ASSERT(sqlite3_db_conn);
-
- sql_stmt = sql_stmt_init();
- if (!sql_stmt) {
- ret = ENOMEM;
- goto out;
- }
-
- GF_CREATE_STMT(sql_stmt);
-
- ret = sqlite3_exec(sqlite3_db_conn, sql_stmt, NULL, NULL, &sql_strerror);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_EXEC_FAILED,
- "Failed executing: %s : %s", sql_stmt, sql_strerror);
- sqlite3_free(sql_strerror);
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- sql_stmt_fini(&sql_stmt);
- return ret;
-}
-
-static int
-apply_sql_params_db(gf_sql_connection_t *sql_conn, dict_t *param_dict)
-{
- int ret = -1;
- char *temp_str = NULL;
- char sqlite3_config_str[GF_NAME_MAX] = "";
-
- GF_ASSERT(sql_conn);
- GF_ASSERT(param_dict);
-
- /*Extract sql page_size from param_dict,
- * if not specified default value will be GF_SQL_DEFAULT_PAGE_SIZE*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(GFDB_STR_SQLITE3, param_dict,
- GFDB_SQL_PARAM_PAGE_SIZE, temp_str,
- GF_SQL_DEFAULT_PAGE_SIZE);
- sql_conn->page_size = atoi(temp_str);
- /*Apply page_size on the sqlite db*/
- GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, "page_size", "%zd",
- sql_conn->page_size, ret, out);
-
- /*Extract sql cache size from param_dict,
- * if not specified default value will be
- * GF_SQL_DEFAULT_CACHE_SIZE pages*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(GFDB_STR_SQLITE3, param_dict,
- GFDB_SQL_PARAM_CACHE_SIZE, temp_str,
- GF_SQL_DEFAULT_CACHE_SIZE);
- sql_conn->cache_size = atoi(temp_str);
- /*Apply cache size on the sqlite db*/
- GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, "cache_size", "%zd",
- sql_conn->cache_size, ret, out);
-
- /*Extract sql journal mode from param_dict,
- * if not specified default value will be
- * GF_SQL_DEFAULT_JOURNAL_MODE i.e "wal"*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(GFDB_STR_SQLITE3, param_dict,
- GFDB_SQL_PARAM_JOURNAL_MODE, temp_str,
- GF_SQL_DEFAULT_JOURNAL_MODE);
- sql_conn->journal_mode = gf_sql_str2jm(temp_str);
- /*Apply journal mode to the sqlite db*/
- GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, "journal_mode", "%s", temp_str,
- ret, out);
-
- /*Only when the journal mode is WAL, wal_autocheckpoint makes sense*/
- if (sql_conn->journal_mode == gf_sql_jm_wal) {
- /*Extract sql wal auto check point from param_dict
- * if not specified default value will be
- * GF_SQL_DEFAULT_WAL_AUTOCHECKPOINT pages*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(GFDB_STR_SQLITE3, param_dict,
- GFDB_SQL_PARAM_WAL_AUTOCHECK, temp_str,
- GF_SQL_DEFAULT_WAL_AUTOCHECKPOINT);
- sql_conn->wal_autocheckpoint = atoi(temp_str);
- /*Apply wal auto check point to the sqlite db*/
- GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, "wal_autocheckpoint", "%zd",
- sql_conn->wal_autocheckpoint, ret, out);
- }
-
- /*Extract sql synchronous from param_dict
- * if not specified default value will be GF_SQL_DEFAULT_SYNC*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(GFDB_STR_SQLITE3, param_dict,
- GFDB_SQL_PARAM_SYNC, temp_str,
- GF_SQL_DEFAULT_SYNC);
- sql_conn->synchronous = gf_sql_str2sync_t(temp_str);
- /*Apply synchronous to the sqlite db*/
- GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, "synchronous", "%d",
- sql_conn->synchronous, ret, out);
-
- /*Extract sql auto_vacuum from param_dict
- * if not specified default value will be GF_SQL_DEFAULT_AUTO_VACUUM*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(GFDB_STR_SQLITE3, param_dict,
- GFDB_SQL_PARAM_AUTO_VACUUM, temp_str,
- GF_SQL_DEFAULT_AUTO_VACUUM);
- sql_conn->auto_vacuum = gf_sql_str2av_t(temp_str);
- /*Apply auto_vacuum to the sqlite db*/
- GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, "auto_vacuum", "%d",
- sql_conn->auto_vacuum, ret, out);
-
- ret = 0;
-out:
- return ret;
-}
-
-int
-gf_sqlite3_init(dict_t *args, void **db_conn)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = NULL;
- struct stat stbuf = {
- 0,
- };
- gf_boolean_t is_dbfile_exist = _gf_false;
- char *temp_str = NULL;
-
- GF_ASSERT(args);
- GF_ASSERT(db_conn);
-
- if (*db_conn != NULL) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CONNECTION_ERROR,
- "DB Connection is not "
- "empty!");
- return 0;
- }
-
- if (!sqlite3_threadsafe()) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_NOT_MULTITHREAD_MODE,
- "sqlite3 is not in multithreaded mode");
- goto out;
- }
-
- sql_conn = gf_sql_connection_init();
- if (!sql_conn) {
- goto out;
- }
-
- /*Extract sql db path from args*/
- temp_str = NULL;
- GET_DB_PARAM_FROM_DICT(GFDB_STR_SQLITE3, args, GFDB_SQL_PARAM_DBPATH,
- temp_str, out);
- strncpy(sql_conn->sqlite3_db_path, temp_str, PATH_MAX - 1);
- sql_conn->sqlite3_db_path[PATH_MAX - 1] = 0;
-
- is_dbfile_exist = (sys_stat(sql_conn->sqlite3_db_path, &stbuf) == 0)
- ? _gf_true
- : _gf_false;
-
- /*Creates DB if not created*/
- sql_conn->sqlite3_db_conn = gf_open_sqlite3_conn(
- sql_conn->sqlite3_db_path, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE);
- if (!sql_conn->sqlite3_db_conn) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CONNECTION_ERROR,
- "Failed creating db connection");
- goto out;
- }
-
- /* If the file exist we skip the config part
- * and creation of the schema */
- if (is_dbfile_exist)
- goto db_exists;
-
- /*Apply sqlite3 params to database*/
- ret = apply_sql_params_db(sql_conn, args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_SET_PARAM_FAILED,
- "Failed applying sql params"
- " to %s",
- sql_conn->sqlite3_db_path);
- goto out;
- }
-
- /*Create the schema if NOT present*/
- ret = create_filetable(sql_conn->sqlite3_db_conn);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Failed Creating %s Table", GF_FILE_TABLE);
- goto out;
- }
-
-db_exists:
- ret = 0;
-out:
- if (ret) {
- gf_sqlite3_fini((void **)&sql_conn);
- }
-
- *db_conn = sql_conn;
-
- return ret;
-}
-
-int
-gf_sqlite3_fini(void **db_conn)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = NULL;
-
- GF_ASSERT(db_conn);
- sql_conn = *db_conn;
-
- if (sql_conn) {
- if (sql_conn->sqlite3_db_conn) {
- ret = gf_close_sqlite3_conn(sql_conn->sqlite3_db_conn);
- if (ret) {
- /*Logging of error done in
- * gf_close_sqlite3_conn()*/
- goto out;
- }
- sql_conn->sqlite3_db_conn = NULL;
- }
- gf_sql_connection_fini(&sql_conn);
- }
- *db_conn = sql_conn;
- ret = 0;
-out:
- return ret;
-}
-
-/******************************************************************************
- *
- * INSERT/UPDATE/DELETE Operations
- *
- *
- * ***************************************************************************/
-
-int
-gf_sqlite3_insert(void *db_conn, gfdb_db_record_t *gfdb_db_record)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfdb_db_record, out);
-
- switch (gfdb_db_record->gfdb_fop_path) {
- case GFDB_FOP_WIND:
- ret = gf_sql_insert_wind(sql_conn, gfdb_db_record);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_INSERT_FAILED, "Failed wind insert");
- goto out;
- }
- break;
- case GFDB_FOP_UNWIND:
- ret = gf_sql_insert_unwind(sql_conn, gfdb_db_record);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_INSERT_FAILED, "Failed unwind insert");
- goto out;
- }
- break;
-
- case GFDB_FOP_WDEL:
- ret = gf_sql_update_delete_wind(sql_conn, gfdb_db_record);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed updating delete "
- "during wind");
- goto out;
- }
- break;
- case GFDB_FOP_UNDEL:
- case GFDB_FOP_UNDEL_ALL:
- ret = gf_sql_delete_unwind(sql_conn, gfdb_db_record);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_DELETE_FAILED, "Failed deleting");
- goto out;
- }
- break;
- case GFDB_FOP_INVALID:
- default:
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_INVALID_FOP,
- "Cannot record to DB: Invalid FOP");
- goto out;
- }
-
- ret = 0;
-out:
- return ret;
-}
-
-int
-gf_sqlite3_delete(void *db_conn, gfdb_db_record_t *gfdb_db_record)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfdb_db_record, out);
-
- ret = 0;
-out:
- return ret;
-}
-
-/******************************************************************************
- *
- * SELECT QUERY FUNCTIONS
- *
- *
- * ***************************************************************************/
-
-static int
-gf_get_basic_query_stmt(char **out_stmt)
-{
- int ret = -1;
- ret = gf_asprintf(out_stmt,
- "select GF_FILE_TB.GF_ID,"
- "GF_FLINK_TB.GF_PID ,"
- "GF_FLINK_TB.FNAME "
- "from GF_FLINK_TB, GF_FILE_TB "
- "where "
- "GF_FILE_TB.GF_ID = GF_FLINK_TB.GF_ID ");
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed to create base query statement");
- *out_stmt = NULL;
- }
- return ret;
-}
-
-/*
- * Find All files recorded in the DB
- * Input:
- * query_callback : query callback function to handle
- * result records from the query
- * */
-int
-gf_sqlite3_find_all(void *db_conn, gf_query_callback_t query_callback,
- void *query_cbk_args, int query_limit)
-{
- int ret = -1;
- char *query_str = NULL;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *prep_stmt = NULL;
- char *limit_query = NULL;
- char *query = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, query_callback, out);
-
- ret = gf_get_basic_query_stmt(&query_str);
- if (ret <= 0) {
- goto out;
- }
-
- query = query_str;
-
- if (query_limit > 0) {
- ret = gf_asprintf(&limit_query, "%s LIMIT %d", query, query_limit);
- if (ret < 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed creating limit query statement");
- limit_query = NULL;
- goto out;
- }
-
- query = limit_query;
- }
-
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, query, -1, &prep_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed to prepare statement %s: %s", query,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = gf_sql_query_function(prep_stmt, query_callback, query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed Query %s", query);
- goto out;
- }
-
- ret = 0;
-out:
- sqlite3_finalize(prep_stmt);
- GF_FREE(query_str);
-
- if (limit_query)
- GF_FREE(limit_query);
-
- return ret;
-}
-
-/*
- * Find recently changed files from the DB
- * Input:
- * query_callback : query callback function to handle
- * result records from the query
- * from_time : Time to define what is recent
- * */
-int
-gf_sqlite3_find_recently_changed_files(void *db_conn,
- gf_query_callback_t query_callback,
- void *query_cbk_args,
- gfdb_time_t *from_time)
-{
- int ret = -1;
- char *query_str = NULL;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *prep_stmt = NULL;
- uint64_t from_time_usec = 0;
- char *base_query_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, query_callback, out);
-
- ret = gf_get_basic_query_stmt(&base_query_str);
- if (ret <= 0) {
- goto out;
- }
-
- ret = gf_asprintf(
- &query_str,
- "%s AND"
- /*First condition: For writes*/
- "( ((" GF_COL_TB_WSEC " * " TOSTRING(
- GFDB_MICROSEC) " + " GF_COL_TB_WMSEC
- ") >= ? )"
- " OR "
- /*Second condition: For reads*/
- "((" GF_COL_TB_RWSEC " * " TOSTRING(
- GFDB_MICROSEC) " + " GF_COL_TB_RWMSEC
- ") >= ?) )"
- /* Order by write wind time in a
- * descending order i.e most hot
- * files w.r.t to write */
- " ORDER BY GF_FILE_TB.W_SEC DESC",
- base_query_str);
-
- if (ret < 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed creating query statement");
- query_str = NULL;
- goto out;
- }
-
- from_time_usec = gfdb_time_2_usec(from_time);
-
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, query_str, -1, &prep_stmt,
- 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed to prepare statement %s :"
- " %s",
- query_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 1, from_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind from_time_usec "
- "%" PRIu64 " : %s",
- from_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 2, from_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind from_time_usec "
- "%" PRIu64 " : %s ",
- from_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the query*/
- ret = gf_sql_query_function(prep_stmt, query_callback, query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed Query %s", query_str);
- goto out;
- }
-
- ret = 0;
-out:
- sqlite3_finalize(prep_stmt);
- GF_FREE(base_query_str);
- GF_FREE(query_str);
- return ret;
-}
-
-/*
- * Find unchanged files from a specified time from the DB
- * Input:
- * query_callback : query callback function to handle
- * result records from the query
- * for_time : Time from where the file/s are not changed
- * */
-int
-gf_sqlite3_find_unchanged_for_time(void *db_conn,
- gf_query_callback_t query_callback,
- void *query_cbk_args, gfdb_time_t *for_time)
-{
- int ret = -1;
- char *query_str = NULL;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *prep_stmt = NULL;
- uint64_t for_time_usec = 0;
- char *base_query_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, query_callback, out);
-
- ret = gf_get_basic_query_stmt(&base_query_str);
- if (ret <= 0) {
- goto out;
- }
-
- ret = gf_asprintf(
- &query_str,
- "%s AND "
- /*First condition: For writes*/
- "( ((" GF_COL_TB_WSEC " * " TOSTRING(
- GFDB_MICROSEC) " + " GF_COL_TB_WMSEC
- ") <= ? )"
- " AND "
- /*Second condition: For reads*/
- "((" GF_COL_TB_RWSEC " * " TOSTRING(
- GFDB_MICROSEC) " + " GF_COL_TB_RWMSEC
- ") <= ?) )"
- /* Order by write wind time in a
- * ascending order i.e most cold
- * files w.r.t to write */
- " ORDER BY GF_FILE_TB.W_SEC ASC",
- base_query_str);
-
- if (ret < 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed to create query statement");
- query_str = NULL;
- goto out;
- }
-
- for_time_usec = gfdb_time_2_usec(for_time);
-
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, query_str, -1, &prep_stmt,
- 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed to prepare statement %s :"
- " %s",
- query_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 1, for_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind for_time_usec "
- "%" PRIu64 " : %s",
- for_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 2, for_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind for_time_usec "
- "%" PRIu64 " : %s",
- for_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the query*/
- ret = gf_sql_query_function(prep_stmt, query_callback, query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed Query %s", query_str);
- goto out;
- }
-
- ret = 0;
-out:
- sqlite3_finalize(prep_stmt);
- GF_FREE(base_query_str);
- GF_FREE(query_str);
- return ret;
-}
-
-/*
- * Find recently changed files with a specific frequency from the DB
- * Input:
- * db_conn : db connection object
- * query_callback : query callback function to handle
- * result records from the query
- * from_time : Time to define what is recent
- * freq_write_cnt : Frequency thresold for write
- * freq_read_cnt : Frequency thresold for read
- * clear_counters : Clear counters (r/w) for all inodes in DB
- * */
-int
-gf_sqlite3_find_recently_changed_files_freq(
- void *db_conn, gf_query_callback_t query_callback, void *query_cbk_args,
- gfdb_time_t *from_time, int freq_write_cnt, int freq_read_cnt,
- gf_boolean_t clear_counters)
-{
- int ret = -1;
- char *query_str = NULL;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *prep_stmt = NULL;
- uint64_t from_time_usec = 0;
- char *base_query_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, query_callback, out);
-
- ret = gf_get_basic_query_stmt(&base_query_str);
- if (ret <= 0) {
- goto out;
- }
- ret = gf_asprintf(
- &query_str,
- "%s AND "
- /*First condition: For Writes*/
- "( ( ((" GF_COL_TB_WSEC " * " TOSTRING(
- GFDB_MICROSEC) " + " GF_COL_TB_WMSEC
- ") >= ? )"
- " AND "
- " (" GF_COL_TB_WFC
- " >= ? ) )"
- " OR "
- /*Second condition: For Reads */
- "( ((" GF_COL_TB_RWSEC " * " TOSTRING(
- GFDB_MICROSEC) " + " GF_COL_TB_RWMSEC
- ") >= ?)"
- " AND "
- " (" GF_COL_TB_RFC
- " >= ? ) ) )"
- /* Order by write wind time and
- * write freq in a descending
- * order
- * i.e most hot files w.r.t to
- * write */
- " ORDER BY GF_FILE_TB.W_SEC "
- "DESC, "
- "GF_FILE_TB.WRITE_FREQ_CNTR DESC",
- base_query_str);
-
- if (ret < 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed to create query statement");
- query_str = NULL;
- goto out;
- }
-
- from_time_usec = gfdb_time_2_usec(from_time);
-
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, query_str, -1, &prep_stmt,
- 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed to prepare statement %s :"
- " %s",
- query_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 1, from_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind from_time_usec "
- "%" PRIu64 " : %s",
- from_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write frequency thresold*/
- ret = sqlite3_bind_int(prep_stmt, 2, freq_write_cnt);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind freq_write_cnt "
- "%d : %s",
- freq_write_cnt, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 3, from_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind from_time_usec "
- "%" PRIu64 " : %s",
- from_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read frequency thresold*/
- ret = sqlite3_bind_int(prep_stmt, 4, freq_read_cnt);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind freq_read_cnt "
- "%d : %s",
- freq_read_cnt, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the query*/
- ret = gf_sql_query_function(prep_stmt, query_callback, query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed Query %s", query_str);
- goto out;
- }
-
- /*Clear counters*/
- if (clear_counters) {
- ret = gf_sql_clear_counters(sql_conn);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_CLEAR_COUNTER_FAILED,
- "Failed to clear"
- " counters!");
- goto out;
- }
- }
- ret = 0;
-out:
- sqlite3_finalize(prep_stmt);
- GF_FREE(base_query_str);
- GF_FREE(query_str);
- return ret;
-}
-
-/*
- * Find unchanged files from a specified time, w.r.t to frequency, from the DB
- * Input:
- * query_callback : query callback function to handle
- * result records from the query
- * for_time : Time from where the file/s are not changed
- * freq_write_cnt : Frequency thresold for write
- * freq_read_cnt : Frequency thresold for read
- * clear_counters : Clear counters (r/w) for all inodes in DB
- * */
-int
-gf_sqlite3_find_unchanged_for_time_freq(void *db_conn,
- gf_query_callback_t query_callback,
- void *query_cbk_args,
- gfdb_time_t *for_time,
- int freq_write_cnt, int freq_read_cnt,
- gf_boolean_t clear_counters)
-{
- int ret = -1;
- char *query_str = NULL;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *prep_stmt = NULL;
- uint64_t for_time_usec = 0;
- char *base_query_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, query_callback, out);
-
- ret = gf_get_basic_query_stmt(&base_query_str);
- if (ret <= 0) {
- goto out;
- }
-
- ret = gf_asprintf (&query_str, "%s AND "
- /*First condition: For Writes
- * Files that have write wind time smaller than for_time
- * OR
- * File that have write wind time greater than for_time,
- * but write_frequency less than freq_write_cnt*/
- "( ( ((" GF_COL_TB_WSEC " * " TOSTRING(GFDB_MICROSEC) " + "
- GF_COL_TB_WMSEC ") < ? )"
- " OR "
- "( (" GF_COL_TB_WFC " < ? ) AND"
- "((" GF_COL_TB_WSEC " * " TOSTRING(GFDB_MICROSEC) " + "
- GF_COL_TB_WMSEC ") >= ? ) ) )"
- " AND "
- /*Second condition: For Reads
- * Files that have read wind time smaller than for_time
- * OR
- * File that have read wind time greater than for_time,
- * but read_frequency less than freq_read_cnt*/
- "( ((" GF_COL_TB_RWSEC " * " TOSTRING(GFDB_MICROSEC) " + "
- GF_COL_TB_RWMSEC ") < ? )"
- " OR "
- "( (" GF_COL_TB_RFC " < ? ) AND"
- "((" GF_COL_TB_RWSEC " * " TOSTRING(GFDB_MICROSEC) " + "
- GF_COL_TB_RWMSEC ") >= ? ) ) ) )"
- /* Order by write wind time and write freq in ascending order
- * i.e most cold files w.r.t to write */
- " ORDER BY GF_FILE_TB.W_SEC ASC, "
- "GF_FILE_TB.WRITE_FREQ_CNTR ASC",
- base_query_str);
-
- if (ret < 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed to create query statement");
- query_str = NULL;
- goto out;
- }
-
- for_time_usec = gfdb_time_2_usec(for_time);
-
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, query_str, -1, &prep_stmt,
- 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed to prepare delete "
- "statement %s : %s",
- query_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 1, for_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind for_time_usec "
- "%" PRIu64 " : %s",
- for_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write frequency thresold*/
- ret = sqlite3_bind_int(prep_stmt, 2, freq_write_cnt);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind freq_write_cnt"
- " %d : %s",
- freq_write_cnt, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind write wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 3, for_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind for_time_usec "
- "%" PRIu64 " : %s",
- for_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 4, for_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind for_time_usec "
- "%" PRIu64 " : %s",
- for_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read frequency thresold*/
- ret = sqlite3_bind_int(prep_stmt, 5, freq_read_cnt);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind freq_read_cnt "
- "%d : %s",
- freq_read_cnt, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind read wind time*/
- ret = sqlite3_bind_int64(prep_stmt, 6, for_time_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed to bind for_time_usec "
- "%" PRIu64 " : %s",
- for_time_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the query*/
- ret = gf_sql_query_function(prep_stmt, query_callback, query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed Query %s", query_str);
- goto out;
- }
-
- /*Clear counters*/
- if (clear_counters) {
- ret = gf_sql_clear_counters(sql_conn);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_CLEAR_COUNTER_FAILED,
- "Failed to clear "
- "counters!");
- goto out;
- }
- }
-
- ret = 0;
-out:
- sqlite3_finalize(prep_stmt);
- GF_FREE(base_query_str);
- GF_FREE(query_str);
- return ret;
-}
-
-int
-gf_sqlite3_clear_files_heat(void *db_conn)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
-
- CHECK_SQL_CONN(sql_conn, out);
-
- ret = gf_sql_clear_counters(sql_conn);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CLEAR_COUNTER_FAILED,
- "Failed to clear "
- "files heat");
- goto out;
- }
-
- ret = 0;
-out:
- return ret;
-}
-
-/* Function to extract version of sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * char **version : the version is extracted as a string and will be stored in
- * this variable. The freeing of the memory should be done by
- * the caller.
- * Return:
- * On success return the length of the version string that is
- * extracted.
- * On failure return -1
- * */
-int
-gf_sqlite3_version(void *db_conn, char **version)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *pre_stmt = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
-
- ret = sqlite3_prepare_v2(sql_conn->sqlite3_db_conn,
- "SELECT SQLITE_VERSION()", -1, &pre_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed init prepare stmt %s", sqlite3_errmsg(db_conn));
- ret = -1;
- goto out;
- }
-
- ret = sqlite3_step(pre_stmt);
- if (ret != SQLITE_ROW) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_RECORD_FAILED,
- "Failed to get records "
- "from db : %s",
- sqlite3_errmsg(db_conn));
- ret = -1;
- goto out;
- }
-
- ret = gf_asprintf(version, "%s", sqlite3_column_text(pre_stmt, 0));
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed extracting version");
- }
-
-out:
- sqlite3_finalize(pre_stmt);
-
- return ret;
-}
-
-/* Function to extract PRAGMA from sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * char *pragma_key : PRAGMA or setting to be extracted
- * char **pragma_value : the value of the PRAGMA or setting that is
- * extracted. This function will allocate memory
- * to pragma_value. The caller should free the memory
- * Return:
- * On success return the length of the pragma/setting value that is
- * extracted.
- * On failure return -1
- * */
-int
-gf_sqlite3_pragma(void *db_conn, char *pragma_key, char **pragma_value)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
- sqlite3_stmt *pre_stmt = NULL;
- char *sqlstring = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pragma_key, out);
-
- ret = gf_asprintf(&sqlstring, "PRAGMA %s;", pragma_key);
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed allocating memory");
- goto out;
- }
-
- ret = sqlite3_prepare_v2(sql_conn->sqlite3_db_conn, sqlstring, -1,
- &pre_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed init prepare stmt %s", sqlite3_errmsg(db_conn));
- ret = -1;
- goto out;
- }
-
- ret = sqlite3_step(pre_stmt);
- if (ret != SQLITE_ROW) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_RECORD_FAILED,
- "Failed to get records "
- "from db : %s",
- sqlite3_errmsg(db_conn));
- ret = -1;
- goto out;
- }
-
- if (pragma_value) {
- ret = gf_asprintf(pragma_value, "%s", sqlite3_column_text(pre_stmt, 0));
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed to get %s from db", pragma_key);
- }
- }
-
- ret = 0;
-out:
- GF_FREE(sqlstring);
-
- sqlite3_finalize(pre_stmt);
-
- return ret;
-}
-
-/* Function to set PRAGMA to sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * char *pragma_key : PRAGMA to be set
- * char *pragma_value : the value of the PRAGMA
- * Return:
- * On success return 0
- * On failure return -1
- * */
-int
-gf_sqlite3_set_pragma(void *db_conn, char *pragma_key, char *pragma_value)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
- char sqlstring[GF_NAME_MAX] = "";
- char *db_pragma_value = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pragma_key, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pragma_value, out);
-
- GF_SQLITE3_SET_PRAGMA(sqlstring, pragma_key, "%s", pragma_value, ret, out);
-
- ret = gf_sqlite3_pragma(db_conn, pragma_key, &db_pragma_value);
- if (ret < 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_QUERY_FAILED,
- "Failed to get %s pragma", pragma_key);
- } else {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_TRACE, 0, 0, "Value set on DB %s : %s",
- pragma_key, db_pragma_value);
- }
- GF_FREE(db_pragma_value);
-
- ret = 0;
-
-out:
-
- return ret;
-}
-
-/* Function to vacuum of sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * gf_boolean_t compact_active : Is compaction on?
- * gf_boolean_t compact_mode_switched : Did we just flip the compaction switch?
- * Return:
- * On success return 0
- * On failure return -1
- * */
-int
-gf_sqlite3_vacuum(void *db_conn, gf_boolean_t compact_active,
- gf_boolean_t compact_mode_switched)
-{
- int ret = -1;
- gf_sql_connection_t *sql_conn = db_conn;
- char *sqlstring = NULL;
- char *sql_strerror = NULL;
- gf_boolean_t changing_pragma = _gf_true;
-
- CHECK_SQL_CONN(sql_conn, out);
-
- if (GF_SQL_COMPACT_DEF == GF_SQL_COMPACT_NONE) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_INFO, 0, LG_MSG_COMPACT_STATUS,
- "VACUUM type is off: no VACUUM to do");
- goto out;
- }
-
- if (compact_mode_switched) {
- if (compact_active) { /* Then it was OFF before.
- So turn everything on */
- ret = 0;
- switch (GF_SQL_COMPACT_DEF) {
- case GF_SQL_COMPACT_FULL:
- ret = gf_sqlite3_set_pragma(db_conn, "auto_vacuum",
- GF_SQL_AV_FULL);
- break;
- case GF_SQL_COMPACT_INCR:
- ret = gf_sqlite3_set_pragma(db_conn, "auto_vacuum",
- GF_SQL_AV_INCR);
- break;
- case GF_SQL_COMPACT_MANUAL:
- changing_pragma = _gf_false;
- break;
- default:
- ret = -1;
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_COMPACT_FAILED, "VACUUM type undefined");
- goto out;
- break;
- }
-
- } else { /* Then it was ON before, so turn it all off */
- if (GF_SQL_COMPACT_DEF == GF_SQL_COMPACT_FULL ||
- GF_SQL_COMPACT_DEF == GF_SQL_COMPACT_INCR) {
- ret = gf_sqlite3_set_pragma(db_conn, "auto_vacuum",
- GF_SQL_AV_NONE);
- } else {
- changing_pragma = _gf_false;
- }
- }
-
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_TRACE, 0, LG_MSG_PREPARE_FAILED,
- "Failed to set the pragma");
- goto out;
- }
-
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_INFO, 0, LG_MSG_COMPACT_STATUS,
- "Turning compaction %i", GF_SQL_COMPACT_DEF);
-
- /* If we move from an auto_vacuum scheme to off, */
- /* or vice-versa, we must VACUUM to save the change. */
- /* In the case of a manual VACUUM scheme, we might as well */
- /* run a manual VACUUM now if we */
- if (changing_pragma || compact_active) {
- ret = gf_asprintf(&sqlstring, "VACUUM;");
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed allocating memory");
- goto out;
- }
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_INFO, 0, LG_MSG_COMPACT_STATUS,
- "Sealed with a VACUUM");
- }
- } else { /* We are active, so it's time to VACUUM */
- if (!compact_active) { /* Did we somehow enter an inconsistent
- state? */
- ret = -1;
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Tried to VACUUM when compaction inactive");
- goto out;
- }
-
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_TRACE, 0, LG_MSG_COMPACT_STATUS,
- "Doing regular vacuum of type %i", GF_SQL_COMPACT_DEF);
-
- switch (GF_SQL_COMPACT_DEF) {
- case GF_SQL_COMPACT_INCR: /* INCR auto_vacuum */
- ret = gf_asprintf(&sqlstring, "PRAGMA incremental_vacuum;");
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_PREPARE_FAILED, "Failed allocating memory");
- goto out;
- }
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_INFO, 0, LG_MSG_COMPACT_STATUS,
- "Will commence an incremental VACUUM");
- break;
- /* (MANUAL) Invoke the VACUUM command */
- case GF_SQL_COMPACT_MANUAL:
- ret = gf_asprintf(&sqlstring, "VACUUM;");
- if (ret <= 0) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_PREPARE_FAILED, "Failed allocating memory");
- goto out;
- }
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_INFO, 0, LG_MSG_COMPACT_STATUS,
- "Will commence a VACUUM");
- break;
- /* (FULL) The database does the compaction itself. */
- /* We cannot do anything else, so we can leave */
- /* without sending anything to the database */
- case GF_SQL_COMPACT_FULL:
- ret = 0;
- goto success;
- /* Any other state must be an error. Note that OFF */
- /* cannot hit this statement since we immediately leave */
- /* in that case */
- default:
- ret = -1;
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_COMPACT_FAILED,
- "VACUUM type undefined");
- goto out;
- break;
- }
- }
-
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_TRACE, 0, LG_MSG_COMPACT_STATUS,
- "SQLString == %s", sqlstring);
-
- ret = sqlite3_exec(sql_conn->sqlite3_db_conn, sqlstring, NULL, NULL,
- &sql_strerror);
-
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_RECORD_FAILED,
- "Failed to vacuum "
- "the db : %s",
- sqlite3_errmsg(db_conn));
- ret = -1;
- goto out;
- }
-success:
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_INFO, 0, LG_MSG_COMPACT_STATUS,
- compact_mode_switched ? "Successfully changed VACUUM on/off"
- : "DB successfully VACUUM");
-out:
- GF_FREE(sqlstring);
-
- return ret;
-}
diff --git a/libglusterfs/src/gfdb/gfdb_sqlite3.h b/libglusterfs/src/gfdb/gfdb_sqlite3.h
deleted file mode 100644
index d8240e905f7..00000000000
--- a/libglusterfs/src/gfdb/gfdb_sqlite3.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-#ifndef __GFDB_SQLITE3_H
-#define __GFDB_SQLITE3_H
-
-/*Sqlite3 header file*/
-#include <sqlite3.h>
-
-#include "glusterfs/logging.h"
-#include "gfdb_data_store_types.h"
-#include "gfdb_mem-types.h"
-#include "glusterfs/libglusterfs-messages.h"
-
-#define GF_STMT_SIZE_MAX 2048
-
-#define GF_DB_NAME "gfdb.db"
-#define GF_FILE_TABLE "GF_FILE_TB"
-#define GF_FILE_LINK_TABLE "GF_FLINK_TB"
-#define GF_MASTER_TABLE "sqlite_master"
-
-/*Since we have multiple tables to be created we put it in a transaction*/
-#define GF_CREATE_STMT(out_str) \
- do { \
- sprintf(out_str, "BEGIN; CREATE TABLE IF NOT EXISTS " GF_FILE_TABLE \
- "(GF_ID TEXT PRIMARY KEY NOT NULL, " \
- "W_SEC INTEGER NOT NULL DEFAULT 0, " \
- "W_MSEC INTEGER NOT NULL DEFAULT 0, " \
- "UW_SEC INTEGER NOT NULL DEFAULT 0, " \
- "UW_MSEC INTEGER NOT NULL DEFAULT 0, " \
- "W_READ_SEC INTEGER NOT NULL DEFAULT 0, " \
- "W_READ_MSEC INTEGER NOT NULL DEFAULT 0, " \
- "UW_READ_SEC INTEGER NOT NULL DEFAULT 0, " \
- "UW_READ_MSEC INTEGER NOT NULL DEFAULT 0, " \
- "WRITE_FREQ_CNTR INTEGER NOT NULL DEFAULT 1, " \
- "READ_FREQ_CNTR INTEGER NOT NULL DEFAULT 1); " \
- "CREATE TABLE IF NOT EXISTS " GF_FILE_LINK_TABLE \
- "(GF_ID TEXT NOT NULL, " \
- "GF_PID TEXT NOT NULL, " \
- "FNAME TEXT NOT NULL, " \
- "W_DEL_FLAG INTEGER NOT NULL DEFAULT 0, " \
- "LINK_UPDATE INTEGER NOT NULL DEFAULT 0, " \
- "PRIMARY KEY ( GF_ID, GF_PID, FNAME) " \
- ");" \
- "COMMIT;"); \
- ; \
- } while (0)
-
-#define GF_COL_TB_WSEC GF_FILE_TABLE "." GF_COL_WSEC
-#define GF_COL_TB_WMSEC GF_FILE_TABLE "." GF_COL_WMSEC
-#define GF_COL_TB_UWSEC GF_FILE_TABLE "." GF_COL_UWSEC
-#define GF_COL_TB_UWMSEC GF_FILE_TABLE "." GF_COL_UWMSEC
-#define GF_COL_TB_RWSEC GF_FILE_TABLE "." GF_COL_WSEC_READ
-#define GF_COL_TB_RWMSEC GF_FILE_TABLE "." GF_COL_WMSEC_READ
-#define GF_COL_TB_RUWSEC GF_FILE_TABLE "." GF_COL_UWSEC_READ
-#define GF_COL_TB_RUWMSEC GF_FILE_TABLE "." GF_COL_UWMSEC_READ
-#define GF_COL_TB_WFC GF_FILE_TABLE "." GF_COL_WRITE_FREQ_CNTR
-#define GF_COL_TB_RFC GF_FILE_TABLE "." GF_COL_READ_FREQ_CNTR
-
-/*******************************************************************************
- * SQLITE3 Connection details and PRAGMA
- * ****************************************************************************/
-
-#define GF_SQL_AV_NONE "none"
-#define GF_SQL_AV_FULL "full"
-#define GF_SQL_AV_INCR "incremental"
-
-#define GF_SQL_SYNC_OFF "off"
-#define GF_SQL_SYNC_NORMAL "normal"
-#define GF_SQL_SYNC_FULL "full"
-
-#define GF_SQL_JM_DELETE "delete"
-#define GF_SQL_JM_TRUNCATE "truncate"
-#define GF_SQL_JM_PERSIST "persist"
-#define GF_SQL_JM_MEMORY "memory"
-#define GF_SQL_JM_WAL "wal"
-#define GF_SQL_JM_OFF "off"
-
-#define GF_SQL_COMPACT_NONE 0
-#define GF_SQL_COMPACT_FULL 1
-#define GF_SQL_COMPACT_INCR 2
-#define GF_SQL_COMPACT_MANUAL 3
-
-#define GF_SQL_COMPACT_DEF GF_SQL_COMPACT_INCR
-typedef enum gf_sql_auto_vacuum {
- gf_sql_av_none = 0,
- gf_sql_av_full,
- gf_sql_av_incr,
- gf_sql_av_invalid
-} gf_sql_auto_vacuum_t;
-
-typedef enum gf_sql_sync {
- gf_sql_sync_off = 0,
- gf_sql_sync_normal,
- gf_sql_sync_full,
- gf_sql_sync_invalid
-} gf_sql_sync_t;
-
-typedef enum gf_sql_journal_mode {
- gf_sql_jm_wal = 0,
- gf_sql_jm_delete,
- gf_sql_jm_truncate,
- gf_sql_jm_persist,
- gf_sql_jm_memory,
- gf_sql_jm_off,
- gf_sql_jm_invalid
-} gf_sql_journal_mode_t;
-
-typedef struct gf_sql_connection {
- char sqlite3_db_path[PATH_MAX];
- sqlite3 *sqlite3_db_conn;
- ssize_t cache_size;
- ssize_t page_size;
- ssize_t wal_autocheckpoint;
- gf_sql_journal_mode_t journal_mode;
- gf_sql_sync_t synchronous;
- gf_sql_auto_vacuum_t auto_vacuum;
-} gf_sql_connection_t;
-
-#define CHECK_SQL_CONN(sql_conn, out) \
- do { \
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, sql_conn, out); \
- if (!sql_conn->sqlite3_db_conn) { \
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, \
- LG_MSG_CONNECTION_INIT_FAILED, \
- "sqlite3 connection not initialized"); \
- goto out; \
- }; \
- } while (0)
-
-#define GF_SQLITE3_SET_PRAGMA(sqlite3_config_str, param_key, format, value, \
- ret, error) \
- do { \
- sprintf(sqlite3_config_str, "PRAGMA %s = " format, param_key, value); \
- ret = sqlite3_exec(sql_conn->sqlite3_db_conn, sqlite3_config_str, \
- NULL, NULL, NULL); \
- if (ret != SQLITE_OK) { \
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_EXEC_FAILED, \
- "Failed executing: %s : %s", sqlite3_config_str, \
- sqlite3_errmsg(sql_conn->sqlite3_db_conn)); \
- ret = -1; \
- goto error; \
- }; \
- } while (0)
-
-/************************SQLITE3 PARAMS KEYS***********************************/
-#define GFDB_SQL_PARAM_DBPATH "sql-db-path"
-#define GFDB_SQL_PARAM_CACHE_SIZE "sql-db-cachesize"
-#define GFDB_SQL_PARAM_PAGE_SIZE "sql-db-pagesize"
-#define GFDB_SQL_PARAM_JOURNAL_MODE "sql-db-journalmode"
-#define GFDB_SQL_PARAM_WAL_AUTOCHECK "sql-db-wal-autocheckpoint"
-#define GFDB_SQL_PARAM_SYNC "sql-db-sync"
-#define GFDB_SQL_PARAM_AUTO_VACUUM "sql-db-autovacuum"
-
-#define GF_SQL_DEFAULT_DBPATH ""
-#define GF_SQL_DEFAULT_PAGE_SIZE "4096"
-#define GF_SQL_DEFAULT_CACHE_SIZE "12500"
-#define GF_SQL_DEFAULT_WAL_AUTOCHECKPOINT "25000"
-#define GF_SQL_DEFAULT_JOURNAL_MODE GF_SQL_JM_WAL
-#define GF_SQL_DEFAULT_SYNC GF_SQL_SYNC_OFF
-#define GF_SQL_DEFAULT_AUTO_VACUUM GF_SQL_AV_NONE
-
-/* Defines the indexs for sqlite params
- * The order should be maintained*/
-typedef enum sqlite_param_index {
- sql_dbpath_ix = 0,
- sql_pagesize_ix,
- sql_cachesize_ix,
- sql_journalmode_ix,
- sql_walautocheck_ix,
- sql_dbsync_ix,
- sql_autovacuum_ix,
- /*This should be in the end*/
- sql_index_max
-} sqlite_param_index_t;
-
-/* Array to hold the sqlite param keys
- * The order should be maintained as sqlite_param_index_t*/
-static char *sqlite_params_keys[] = {
- GFDB_SQL_PARAM_DBPATH, GFDB_SQL_PARAM_PAGE_SIZE,
- GFDB_SQL_PARAM_CACHE_SIZE, GFDB_SQL_PARAM_JOURNAL_MODE,
- GFDB_SQL_PARAM_WAL_AUTOCHECK, GFDB_SQL_PARAM_SYNC,
- GFDB_SQL_PARAM_AUTO_VACUUM};
-
-/* Array of default values for sqlite params
- * The order should be maintained as sqlite_param_index_t*/
-static char *sqlite_params_default_value[] = {GF_SQL_DEFAULT_DBPATH,
- GF_SQL_DEFAULT_PAGE_SIZE,
- GF_SQL_DEFAULT_CACHE_SIZE,
- GF_SQL_DEFAULT_JOURNAL_MODE,
- GF_SQL_DEFAULT_WAL_AUTOCHECKPOINT,
- GF_SQL_DEFAULT_SYNC,
- GF_SQL_DEFAULT_AUTO_VACUUM};
-
-/*Extract sql params from page_size to auto_vacumm
- * The dbpath is extracted in a different way*/
-static inline int
-gfdb_set_sql_params(char *comp_name, dict_t *from_dict, dict_t *to_dict)
-{
- sqlite_param_index_t sql_index = sql_pagesize_ix;
- char *_val_str = NULL;
- int ret = -1;
-
- GF_ASSERT(comp_name);
- GF_ASSERT(from_dict);
- GF_ASSERT(to_dict);
-
- /*Extract and Set of the sql params from page_size*/
- for (sql_index = sql_pagesize_ix; sql_index < sql_index_max; sql_index++) {
- _val_str = NULL;
- GET_DB_PARAM_FROM_DICT_DEFAULT(comp_name, from_dict,
- sqlite_params_keys[sql_index], _val_str,
- sqlite_params_default_value[sql_index]);
- SET_DB_PARAM_TO_DICT(comp_name, to_dict, sqlite_params_keys[sql_index],
- _val_str, ret, out);
- }
-out:
- return ret;
-}
-
-/*************************SQLITE3 GFDB PLUGINS*********************************/
-
-/*Db init and fini modules*/
-int
-gf_sqlite3_fini(void **db_conn);
-int
-gf_sqlite3_init(dict_t *args, void **db_conn);
-
-/*insert/update/delete modules*/
-int
-gf_sqlite3_insert(void *db_conn, gfdb_db_record_t *);
-int
-gf_sqlite3_delete(void *db_conn, gfdb_db_record_t *);
-
-/*querying modules*/
-int
-gf_sqlite3_find_all(void *db_conn, gf_query_callback_t, void *_query_cbk_args,
- int query_limit);
-int
-gf_sqlite3_find_unchanged_for_time(void *db_conn,
- gf_query_callback_t query_callback,
- void *_query_cbk_args,
- gfdb_time_t *for_time);
-int
-gf_sqlite3_find_recently_changed_files(void *db_conn,
- gf_query_callback_t query_callback,
- void *_query_cbk_args,
- gfdb_time_t *from_time);
-int
-gf_sqlite3_find_unchanged_for_time_freq(void *db_conn,
- gf_query_callback_t query_callback,
- void *_query_cbk_args,
- gfdb_time_t *for_time,
- int write_freq_cnt, int read_freq_cnt,
- gf_boolean_t clear_counters);
-int
-gf_sqlite3_find_recently_changed_files_freq(
- void *db_conn, gf_query_callback_t query_callback, void *_query_cbk_args,
- gfdb_time_t *from_time, int write_freq_cnt, int read_freq_cnt,
- gf_boolean_t clear_counters);
-
-int
-gf_sqlite3_clear_files_heat(void *db_conn);
-
-/* Function to extract version of sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * char **version : the version is extracted as a string and will be stored in
- * this variable. The freeing of the memory should be done by
- * the caller.
- * Return:
- * On success return the length of the version string that is
- * extracted.
- * On failure return -1
- * */
-int
-gf_sqlite3_version(void *db_conn, char **version);
-
-/* Function to extract PRAGMA or setting from sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * char *pragma_key : PRAGMA or setting to be extracted
- * char **pragma_value : the value of the PRAGMA or setting that is
- * extracted. This function will allocate memory
- * to pragma_value. The caller should free the memory
- * Return:
- * On success return the length of the pragma/setting value that is
- * extracted.
- * On failure return -1
- * */
-int
-gf_sqlite3_pragma(void *db_conn, char *pragma_key, char **pragma_value);
-
-/* Function to set PRAGMA to sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * char *pragma_key : PRAGMA to be set
- * char *pragma_value : the value of the PRAGMA
- * Return:
- * On success return 0
- * On failure return -1
- * */
-int
-gf_sqlite3_set_pragma(void *db_conn, char *pragma_key, char *pragma_value);
-
-/* Function to vacuum of sqlite db
- * Input:
- * void *db_conn : Sqlite connection
- * gf_boolean_t compact_active : Is compaction on?
- * gf_boolean_t compact_mode_switched : Did we just flip the compaction switch?
- * Return:
- * On success return 0
- * On failure return -1
- * */
-int
-gf_sqlite3_vacuum(void *db_conn, gf_boolean_t compact_active,
- gf_boolean_t compact_mode_switched);
-
-void
-gf_sqlite3_fill_db_operations(gfdb_db_operations_t *gfdb_db_ops);
-
-#endif
diff --git a/libglusterfs/src/gfdb/gfdb_sqlite3_helper.c b/libglusterfs/src/gfdb/gfdb_sqlite3_helper.c
deleted file mode 100644
index 60dd5e25e66..00000000000
--- a/libglusterfs/src/gfdb/gfdb_sqlite3_helper.c
+++ /dev/null
@@ -1,1260 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include "gfdb_sqlite3_helper.h"
-
-#define GFDB_SQL_STMT_SIZE 256
-
-/*****************************************************************************
- *
- * Helper function to execute actual sql queries
- *
- *
- * ****************************************************************************/
-
-static int
-gf_sql_delete_all(gf_sql_connection_t *sql_conn, char *gfid,
- gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *delete_file_stmt = NULL;
- sqlite3_stmt *delete_link_stmt = NULL;
- char *delete_link_str = "DELETE FROM " GF_FILE_LINK_TABLE
- " WHERE GF_ID = ? ;";
- char *delete_file_str = "DELETE FROM " GF_FILE_TABLE " WHERE GF_ID = ? ;";
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
-
- /*
- * Delete all links associated with this GFID
- *
- * */
- /*Prepare statement for delete all links*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, delete_link_str, -1,
- &delete_link_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing delete "
- "statement %s : %s",
- delete_link_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(delete_link_stmt, 1, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(delete_link_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt %s : %s", delete_link_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*
- * Delete entry from file table associated with this GFID
- *
- * */
- /*Prepare statement for delete all links*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, delete_file_str, -1,
- &delete_file_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing delete "
- "statement %s : %s",
- delete_file_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(delete_file_stmt, 1, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(delete_file_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt %s : %s", delete_file_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
-out:
- /*Free prepared statement*/
- sqlite3_finalize(delete_file_stmt);
- sqlite3_finalize(delete_link_stmt);
- return ret;
-}
-
-static int
-gf_sql_delete_link(gf_sql_connection_t *sql_conn, char *gfid, char *pargfid,
- char *basename, gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *delete_stmt = NULL;
- char *delete_str = "DELETE FROM " GF_FILE_LINK_TABLE
- " WHERE GF_ID = ? AND GF_PID = ?"
- " AND FNAME = ?;";
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pargfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, basename, out);
-
- /*Prepare statement*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, delete_str, -1,
- &delete_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing delete "
- "statement %s : %s",
- delete_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(delete_stmt, 1, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind pargfid*/
- ret = sqlite3_bind_text(delete_stmt, 2, pargfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent gfid %s "
- ": %s",
- pargfid, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind basename*/
- ret = sqlite3_bind_text(delete_stmt, 3, basename, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding basename %s : "
- "%s",
- basename, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(delete_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt %s : %s", delete_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- /*Free prepared statement*/
- sqlite3_finalize(delete_stmt);
- return ret;
-}
-
-static int
-gf_sql_update_link_flags(gf_sql_connection_t *sql_conn, char *gfid,
- char *pargfid, char *basename, int update_flag,
- gf_boolean_t is_update_or_delete,
- gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *update_stmt = NULL;
- char *update_column = NULL;
- char update_str[1024] = "";
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pargfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, basename, out);
-
- update_column = (is_update_or_delete) ? "LINK_UPDATE" : "W_DEL_FLAG";
-
- sprintf(update_str,
- "UPDATE " GF_FILE_LINK_TABLE
- " SET %s = ?"
- " WHERE GF_ID = ? AND GF_PID = ? AND FNAME = ?;",
- update_column);
-
- /*Prepare statement*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, update_str, -1,
- &update_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing update "
- "statement %s : %s",
- update_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind link_update*/
- ret = sqlite3_bind_int(update_stmt, 1, update_flag);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding update_flag %d "
- ": %s",
- update_flag, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(update_stmt, 2, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind pargfid*/
- ret = sqlite3_bind_text(update_stmt, 3, pargfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent gfid %s "
- ": %s",
- pargfid, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind basename*/
- ret = sqlite3_bind_text(update_stmt, 4, basename, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding basename %s : "
- "%s",
- basename, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(update_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt %s : %s", update_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- /*Free prepared statement*/
- sqlite3_finalize(update_stmt);
- return ret;
-}
-
-static int
-gf_sql_insert_link(gf_sql_connection_t *sql_conn, char *gfid, char *pargfid,
- char *basename, gf_boolean_t link_consistency,
- gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *insert_stmt = NULL;
- char insert_str[GFDB_SQL_STMT_SIZE] = "";
-
- sprintf(insert_str,
- "INSERT INTO " GF_FILE_LINK_TABLE
- " (GF_ID, GF_PID, FNAME,"
- " W_DEL_FLAG, LINK_UPDATE) "
- " VALUES (?, ?, ?, 0, %d);",
- link_consistency);
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pargfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, basename, out);
-
- /*Prepare statement*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, insert_str, -1,
- &insert_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing insert "
- "statement %s : %s",
- insert_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(insert_stmt, 1, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind pargfid*/
- ret = sqlite3_bind_text(insert_stmt, 2, pargfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent gfid %s "
- ": %s",
- pargfid, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind basename*/
- ret = sqlite3_bind_text(insert_stmt, 3, basename, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding basename %s : %s", basename,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(insert_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared "
- "stmt %s %s %s %s : %s",
- gfid, pargfid, basename, insert_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- /*Free prepared statement*/
- sqlite3_finalize(insert_stmt);
- return ret;
-}
-
-static int
-gf_sql_update_link(gf_sql_connection_t *sql_conn, char *gfid, char *pargfid,
- char *basename, char *old_pargfid, char *old_basename,
- gf_boolean_t link_consistency, gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *insert_stmt = NULL;
- char insert_str[GFDB_SQL_STMT_SIZE] = "";
-
- sprintf(insert_str,
- "INSERT INTO " GF_FILE_LINK_TABLE
- " (GF_ID, GF_PID, FNAME,"
- " W_DEL_FLAG, LINK_UPDATE) "
- " VALUES (? , ?, ?, 0, %d);",
- link_consistency);
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, pargfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, basename, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, old_pargfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, old_basename, out);
-
- /*
- *
- * Delete the old link
- *
- * */
- ret = gf_sql_delete_link(sql_conn, gfid, old_pargfid, old_basename,
- ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_DELETE_FAILED, "Failed deleting old link");
- goto out;
- }
-
- /*
- *
- * insert new link
- *
- * */
- /*Prepare statement*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, insert_str, -1,
- &insert_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing insert "
- "statement %s : %s",
- insert_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(insert_stmt, 1, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind new pargfid*/
- ret = sqlite3_bind_text(insert_stmt, 2, pargfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent gfid %s "
- ": %s",
- pargfid, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind new basename*/
- ret = sqlite3_bind_text(insert_stmt, 3, basename, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding basename %s : "
- "%s",
- basename, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(insert_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt %s : %s", insert_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- /*Free prepared statement*/
- sqlite3_finalize(insert_stmt);
- return ret;
-}
-
-static int
-gf_sql_insert_write_wind_time(gf_sql_connection_t *sql_conn, char *gfid,
- gfdb_time_t *wind_time,
- gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *insert_stmt = NULL;
- char *insert_str = "INSERT INTO " GF_FILE_TABLE
- "(GF_ID, W_SEC, W_MSEC, UW_SEC, UW_MSEC)"
- " VALUES (?, ?, ?, 0, 0);";
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, wind_time, out);
-
- /*Prepare statement*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, insert_str, -1,
- &insert_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing insert "
- "statement %s : %s",
- insert_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(insert_stmt, 1, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind wind secs*/
- ret = sqlite3_bind_int(insert_stmt, 2, wind_time->tv_sec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent wind "
- "secs %ld : %s",
- wind_time->tv_sec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind wind msecs*/
- ret = sqlite3_bind_int(insert_stmt, 3, wind_time->tv_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent wind "
- "msecs %ld : %s",
- wind_time->tv_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(insert_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt GFID:%s %s : %s", gfid,
- insert_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- /*Free prepared statement*/
- sqlite3_finalize(insert_stmt);
- return ret;
-}
-
-/*Update write/read times for both wind and unwind*/
-static int
-gf_update_time(gf_sql_connection_t *sql_conn, char *gfid,
- gfdb_time_t *update_time, gf_boolean_t record_counter,
- gf_boolean_t is_wind, gf_boolean_t is_read,
- gf_boolean_t ignore_errors)
-{
- int ret = -1;
- sqlite3_stmt *update_stmt = NULL;
- char update_str[1024] = "";
- char *freq_cntr_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfid, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, update_time, out);
-
- /*
- * Constructing the prepare statement string.
- *
- * */
- /*For write time*/
- if (!is_read) {
- if (is_wind) {
- /*if record counter is on*/
- freq_cntr_str = (record_counter)
- ? ", WRITE_FREQ_CNTR = WRITE_FREQ_CNTR + 1"
- : "";
-
- /*Perfectly safe as we will not go array of bound*/
- sprintf(update_str,
- "UPDATE " GF_FILE_TABLE
- " SET W_SEC = ?, W_MSEC = ? "
- " %s" /*place for read freq counters*/
- " WHERE GF_ID = ? ;",
- freq_cntr_str);
- } else {
- /*Perfectly safe as we will not go array of bound*/
- sprintf(update_str,
- "UPDATE " GF_FILE_TABLE " SET UW_SEC = ?, UW_MSEC = ? ;");
- }
- }
- /*For Read Time update*/
- else {
- if (is_wind) {
- /*if record counter is on*/
- freq_cntr_str = (record_counter)
- ? ", READ_FREQ_CNTR = READ_FREQ_CNTR + 1"
- : "";
-
- /*Perfectly safe as we will not go array of bound*/
- sprintf(update_str,
- "UPDATE " GF_FILE_TABLE
- " SET W_READ_SEC = ?, W_READ_MSEC = ? "
- " %s" /*place for read freq counters*/
- " WHERE GF_ID = ? ;",
- freq_cntr_str);
- } else {
- /*Perfectly safe as we will not go array of bound*/
- sprintf(update_str, "UPDATE " GF_FILE_TABLE
- " SET UW_READ_SEC = ?, UW_READ_MSEC = ? ;");
- }
- }
-
- /*Prepare statement*/
- ret = sqlite3_prepare(sql_conn->sqlite3_db_conn, update_str, -1,
- &update_stmt, 0);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PREPARE_FAILED,
- "Failed preparing insert "
- "statement %s : %s",
- update_str, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind time secs*/
- ret = sqlite3_bind_int(update_stmt, 1, update_time->tv_sec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent wind "
- "secs %ld : %s",
- update_time->tv_sec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind time msecs*/
- ret = sqlite3_bind_int(update_stmt, 2, update_time->tv_usec);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding parent wind "
- "msecs %ld : %s",
- update_time->tv_usec, sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Bind gfid*/
- ret = sqlite3_bind_text(update_stmt, 3, gfid, -1, NULL);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_BINDING_FAILED,
- "Failed binding gfid %s : %s", gfid,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- /*Execute the prepare statement*/
- if (sqlite3_step(update_stmt) != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, _gfdb_log_level(GF_LOG_ERROR, ignore_errors),
- 0, LG_MSG_EXEC_FAILED,
- "Failed executing the prepared stmt %s : %s", update_str,
- sqlite3_errmsg(sql_conn->sqlite3_db_conn));
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- /*Free prepared statement*/
- sqlite3_finalize(update_stmt);
- return ret;
-}
-
-/******************************************************************************
- *
- * Helper functions for gf_sqlite3_insert()
- *
- *
- * ****************************************************************************/
-
-int
-gf_sql_insert_wind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record)
-{
- int ret = -1;
- gfdb_time_t *modtime = NULL;
- char *pargfid_str = NULL;
- char *gfid_str = NULL;
- char *old_pargfid_str = NULL;
- gf_boolean_t its_wind = _gf_true; /*remains true for this function*/
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfdb_db_record, out);
-
- gfid_str = gf_strdup(uuid_utoa(gfdb_db_record->gfid));
- if (!gfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating gfid string failed.");
- goto out;
- }
-
- modtime = &gfdb_db_record->gfdb_wind_change_time;
-
- /* handle all dentry based operations */
- if (isdentryfop(gfdb_db_record->gfdb_fop_type)) {
- /*Parent GFID is always set*/
- pargfid_str = gf_strdup(uuid_utoa(gfdb_db_record->pargfid));
- if (!pargfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating gfid string "
- "failed.");
- goto out;
- }
-
- /* handle create, mknod */
- if (isdentrycreatefop(gfdb_db_record->gfdb_fop_type)) {
- /*insert link*/
- ret = gf_sql_insert_link(
- sql_conn, gfid_str, pargfid_str, gfdb_db_record->file_name,
- gfdb_db_record->link_consistency, _gf_true);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_WARNING,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_INSERT_FAILED,
- "Failed "
- "inserting link in DB");
- /* Even if link creation is failed we
- * continue with the creation of file record.
- * This covers to cases
- * 1) Lookup heal: If the file record from
- * gf_file_tb is deleted but the link record
- * still exist. Lookup heal will attempt a heal
- * with create_wind set. The link heal will fail
- * as there is already a record and if we don't
- * ignore the error we will not heal the
- * gf_file_tb.
- * 2) Rename file in cold tier: During a rename
- * of a file that is there in cold tier. We get
- * an link record created in hot tier for the
- * linkto file. When the file gets heated and
- * moves to hot tier there will be attempt from
- * ctr lookup heal to create link and file
- * record and If we don't ignore the error we
- * will not heal the gf_file_tb.
- * */
- }
- gfdb_db_record->islinkupdate = gfdb_db_record->link_consistency;
-
- /*
- * Only for create/mknod insert wind time
- * for the first time
- * */
- ret = gf_sql_insert_write_wind_time(sql_conn, gfid_str, modtime,
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_INSERT_FAILED,
- "Failed inserting wind time in DB");
- goto out;
- }
- goto out;
- }
- /*handle rename, link */
- else {
- /*rename*/
- if (strlen(gfdb_db_record->old_file_name) != 0) {
- old_pargfid_str = gf_strdup(
- uuid_utoa(gfdb_db_record->old_pargfid));
- if (!old_pargfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_CREATE_FAILED,
- "Creating gfid string failed.");
- goto out;
- }
- ret = gf_sql_update_link(
- sql_conn, gfid_str, pargfid_str, gfdb_db_record->file_name,
- old_pargfid_str, gfdb_db_record->old_file_name,
- gfdb_db_record->link_consistency,
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED, "Failed updating link");
- goto out;
- }
- gfdb_db_record->islinkupdate = gfdb_db_record->link_consistency;
- }
- /*link*/
- else {
- ret = gf_sql_insert_link(sql_conn, gfid_str, pargfid_str,
- gfdb_db_record->file_name,
- gfdb_db_record->link_consistency,
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_INSERT_FAILED,
- "Failed inserting link in DB");
- goto out;
- }
- gfdb_db_record->islinkupdate = gfdb_db_record->link_consistency;
- }
- }
- }
-
- /* update times only when said!*/
- if (gfdb_db_record->do_record_times) {
- /*All fops update times read or write*/
- ret = gf_update_time(sql_conn, gfid_str, modtime,
- gfdb_db_record->do_record_counters, its_wind,
- isreadfop(gfdb_db_record->gfdb_fop_type),
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR, gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed update wind time"
- " in DB");
- goto out;
- }
- }
-
- ret = 0;
-out:
- GF_FREE(gfid_str);
- GF_FREE(pargfid_str);
- GF_FREE(old_pargfid_str);
- return ret;
-}
-
-int
-gf_sql_insert_unwind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record)
-{
- int ret = -1;
- gfdb_time_t *modtime = NULL;
- gf_boolean_t its_wind = _gf_true; /*remains true for this function*/
- char *gfid_str = NULL;
- char *pargfid_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfdb_db_record, out);
-
- gfid_str = gf_strdup(uuid_utoa(gfdb_db_record->gfid));
- if (!gfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating gfid string failed.");
- goto out;
- }
-
- /*Only update if recording unwind is set*/
- if (gfdb_db_record->do_record_times &&
- gfdb_db_record->do_record_uwind_time) {
- modtime = &gfdb_db_record->gfdb_unwind_change_time;
- ret = gf_update_time(sql_conn, gfid_str, modtime,
- gfdb_db_record->do_record_counters, (!its_wind),
- isreadfop(gfdb_db_record->gfdb_fop_type),
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR, gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed update unwind "
- "time in DB");
- goto out;
- }
- }
-
- /*For link creation and changes we use link updated*/
- if (gfdb_db_record->islinkupdate &&
- isdentryfop(gfdb_db_record->gfdb_fop_type)) {
- pargfid_str = gf_strdup(uuid_utoa(gfdb_db_record->pargfid));
- if (!pargfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating pargfid_str string failed.");
- goto out;
- }
-
- ret = gf_sql_update_link_flags(sql_conn, gfid_str, pargfid_str,
- gfdb_db_record->file_name, 0, _gf_true,
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR, gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed updating link flags in unwind");
- goto out;
- }
- }
-
- ret = 0;
-out:
- GF_FREE(gfid_str);
- GF_FREE(pargfid_str);
- return ret;
-}
-
-int
-gf_sql_update_delete_wind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record)
-{
- int ret = -1;
- char *gfid_str = NULL;
- char *pargfid_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfdb_db_record, out);
-
- gfid_str = gf_strdup(uuid_utoa(gfdb_db_record->gfid));
- if (!gfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating gfid string failed.");
- goto out;
- }
-
- pargfid_str = gf_strdup(uuid_utoa(gfdb_db_record->pargfid));
- if (!pargfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating pargfid_str "
- "string failed.");
- goto out;
- }
-
- if (gfdb_db_record->link_consistency) {
- ret = gf_sql_update_link_flags(sql_conn, gfid_str, pargfid_str,
- gfdb_db_record->file_name, 1, _gf_false,
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR, gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed updating link flags in wind");
- goto out;
- }
- }
-
- ret = 0;
-out:
- GF_FREE(gfid_str);
- GF_FREE(pargfid_str);
- return ret;
-}
-
-int
-gf_sql_delete_unwind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record)
-{
- int ret = -1;
- char *gfid_str = NULL;
- char *pargfid_str = NULL;
- gfdb_time_t *modtime = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, gfdb_db_record, out);
-
- gfid_str = gf_strdup(uuid_utoa(gfdb_db_record->gfid));
- if (!gfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating gfid string failed.");
- goto out;
- }
-
- /*Nuke all the entries for this GFID from DB*/
- if (gfdb_db_record->gfdb_fop_path == GFDB_FOP_UNDEL_ALL) {
- gf_sql_delete_all(sql_conn, gfid_str, gfdb_db_record->ignore_errors);
- }
- /*Remove link entries only*/
- else if (gfdb_db_record->gfdb_fop_path == GFDB_FOP_UNDEL) {
- pargfid_str = gf_strdup(uuid_utoa(gfdb_db_record->pargfid));
- if (!pargfid_str) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_CREATE_FAILED,
- "Creating pargfid_str "
- "string failed.");
- goto out;
- }
-
- /* Special performance case:
- * Updating wind time in unwind for delete. This is done here
- * as in the wind path we will not know whether its the last
- * link or not. For a last link there is not use to update any
- * wind or unwind time!*/
- if (gfdb_db_record->do_record_times) {
- /*Update the wind write times*/
- modtime = &gfdb_db_record->gfdb_wind_change_time;
- ret = gf_update_time(sql_conn, gfid_str, modtime,
- gfdb_db_record->do_record_counters, _gf_true,
- isreadfop(gfdb_db_record->gfdb_fop_type),
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed update wind time in DB");
- goto out;
- }
- }
-
- modtime = &gfdb_db_record->gfdb_unwind_change_time;
-
- ret = gf_sql_delete_link(sql_conn, gfid_str, pargfid_str,
- gfdb_db_record->file_name,
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_DELETE_FAILED,
- "Failed deleting link");
- goto out;
- }
-
- if (gfdb_db_record->do_record_times &&
- gfdb_db_record->do_record_uwind_time) {
- ret = gf_update_time(sql_conn, gfid_str, modtime,
- gfdb_db_record->do_record_counters, _gf_false,
- isreadfop(gfdb_db_record->gfdb_fop_type),
- gfdb_db_record->ignore_errors);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3,
- _gfdb_log_level(GF_LOG_ERROR,
- gfdb_db_record->ignore_errors),
- 0, LG_MSG_UPDATE_FAILED,
- "Failed update unwind time in DB");
- goto out;
- }
- }
- } else {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_INVALID_UPLINK,
- "Invalid unlink option");
- goto out;
- }
- ret = 0;
-out:
- GF_FREE(gfid_str);
- GF_FREE(pargfid_str);
- return ret;
-}
-
-/******************************************************************************
- *
- * Find/Query helper functions
- *
- * ****************************************************************************/
-int
-gf_sql_query_function(sqlite3_stmt *prep_stmt,
- gf_query_callback_t query_callback, void *_query_cbk_args)
-{
- int ret = -1;
- gfdb_query_record_t *query_record = NULL;
- char *text_column = NULL;
- sqlite3 *db_conn = NULL;
- uuid_t prev_gfid = {0};
- uuid_t curr_gfid = {0};
- uuid_t pgfid = {0};
- char *base_name = NULL;
- gf_boolean_t is_first_record = _gf_true;
- gf_boolean_t is_query_empty = _gf_true;
-
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, prep_stmt, out);
- GF_VALIDATE_OR_GOTO(GFDB_STR_SQLITE3, query_callback, out);
-
- db_conn = sqlite3_db_handle(prep_stmt);
-
- /*
- * Loop to access queried rows
- * Each db record will have 3 columns
- * GFID, PGFID, FILE_NAME
- *
- * For file with multiple hard links we will get multiple query rows
- * with the same GFID, but different PGID and FILE_NAME Combination
- * For Example if a file with
- * GFID = 00000000-0000-0000-0000-000000000006
- * has 3 hardlinks file1, file2 and file3 in 3 different folder
- * with GFID's
- * 00000000-0000-0000-0000-0000EFC00001,
- * 00000000-0000-0000-0000-00000ABC0001 and
- * 00000000-0000-0000-0000-00000ABC00CD
- * Then there will be 3 records
- * GFID : 00000000-0000-0000-0000-000000000006
- * PGFID : 00000000-0000-0000-0000-0000EFC00001
- * FILE_NAME : file1
- *
- * GFID : 00000000-0000-0000-0000-000000000006
- * PGFID : 00000000-0000-0000-0000-00000ABC0001
- * FILE_NAME : file2
- *
- * GFID : 00000000-0000-0000-0000-000000000006
- * PGFID : 00000000-0000-0000-0000-00000ABC00CD
- * FILE_NAME : file3
- *
- * This is retrieved and added to a single query_record
- *
- * query_record->gfid = 00000000-0000-0000-0000-000000000006
- * ->link_info = {00000000-0000-0000-0000-0000EFC00001,
- * "file1"}
- * |
- * V
- * link_info = {00000000-0000-0000-0000-00000ABC0001,
- * "file2"}
- * |
- * V
- * link_info = {00000000-0000-0000-0000-00000ABC0001,
- * "file3",
- * list}
- *
- * This query record is sent to the registered query_callback()
- *
- * */
- while ((ret = sqlite3_step(prep_stmt)) == SQLITE_ROW) {
- if (sqlite3_column_count(prep_stmt) > 0) {
- is_query_empty = _gf_false;
-
- /*Retrieving GFID - column index is 0*/
- text_column = (char *)sqlite3_column_text(prep_stmt, 0);
- if (!text_column) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_ID_FAILED,
- "Failed to"
- "retrieve GFID");
- goto out;
- }
- ret = gf_uuid_parse(text_column, curr_gfid);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PARSE_FAILED,
- "Failed to parse "
- "GFID");
- goto out;
- }
-
- /*
- * if the previous record was not of the current gfid
- * call the call_back function and send the
- * query record, which will have all the link_info
- * objects associated with this gfid
- *
- * */
- if (gf_uuid_compare(curr_gfid, prev_gfid) != 0) {
- /* If this is not the first record */
- if (!is_first_record) {
- /*Call the call_back function provided*/
- ret = query_callback(query_record, _query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_QUERY_CALL_BACK_FAILED,
- "Query call back "
- "failed");
- goto out;
- }
- }
-
- /*Clear the query record*/
- gfdb_query_record_free(query_record);
- query_record = NULL;
- query_record = gfdb_query_record_new();
- if (!query_record) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_CREATE_FAILED,
- "Failed to create "
- "query_record");
- goto out;
- }
-
- gf_uuid_copy(query_record->gfid, curr_gfid);
- gf_uuid_copy(prev_gfid, curr_gfid);
- }
-
- /* Get PGFID */
- text_column = (char *)sqlite3_column_text(prep_stmt, 1);
- if (!text_column) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_ID_FAILED,
- "Failed to"
- " retrieve GF_ID");
- goto out;
- }
- ret = gf_uuid_parse(text_column, pgfid);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_PARSE_FAILED,
- "Failed to parse "
- "GF_ID");
- goto out;
- }
-
- /* Get Base name */
- text_column = (char *)sqlite3_column_text(prep_stmt, 2);
- if (!text_column) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_ID_FAILED,
- "Failed to"
- " retrieve GF_ID");
- goto out;
- }
- base_name = text_column;
-
- /* Add link info to the list */
- ret = gfdb_add_link_to_query_record(query_record, pgfid, base_name);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_ID_FAILED,
- "Failed to"
- " add link info to query record");
- goto out;
- }
-
- is_first_record = _gf_false;
- }
- }
-
- if (ret != SQLITE_DONE) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_GET_RECORD_FAILED,
- "Failed to retrieve records "
- "from db : %s",
- sqlite3_errmsg(db_conn));
- ret = -1;
- goto out;
- }
-
- if (!is_query_empty) {
- /*
- * Call the call_back function for the last record from the
- * Database
- * */
- ret = query_callback(query_record, _query_cbk_args);
- if (ret) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0,
- LG_MSG_QUERY_CALL_BACK_FAILED, "Query call back failed");
- goto out;
- }
- }
-
- ret = 0;
-out:
- gfdb_query_record_free(query_record);
- query_record = NULL;
- return ret;
-}
-
-int
-gf_sql_clear_counters(gf_sql_connection_t *sql_conn)
-{
- int ret = -1;
- char *sql_strerror = NULL;
- char *query_str = NULL;
-
- CHECK_SQL_CONN(sql_conn, out);
-
- query_str = "UPDATE " GF_FILE_TABLE " SET " GF_COL_READ_FREQ_CNTR
- " = 0 , " GF_COL_WRITE_FREQ_CNTR " = 0 ;";
-
- ret = sqlite3_exec(sql_conn->sqlite3_db_conn, query_str, NULL, NULL,
- &sql_strerror);
- if (ret != SQLITE_OK) {
- gf_msg(GFDB_STR_SQLITE3, GF_LOG_ERROR, 0, LG_MSG_EXEC_FAILED,
- "Failed to execute: %s : %s", query_str, sql_strerror);
- sqlite3_free(sql_strerror);
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- return ret;
-}
diff --git a/libglusterfs/src/gfdb/gfdb_sqlite3_helper.h b/libglusterfs/src/gfdb/gfdb_sqlite3_helper.h
deleted file mode 100644
index f19344a353c..00000000000
--- a/libglusterfs/src/gfdb/gfdb_sqlite3_helper.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-#ifndef __GFDB_SQLITE3_HELPER_H
-#define __GFDB_SQLITE3_HELPER_H
-
-#include "gfdb_sqlite3.h"
-
-/******************************************************************************
- *
- * Helper functions for gf_sqlite3_insert()
- *
- * ****************************************************************************/
-
-int
-gf_sql_insert_wind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record);
-
-int
-gf_sql_insert_unwind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record);
-
-int
-gf_sql_update_delete_wind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record);
-
-int
-gf_sql_delete_unwind(gf_sql_connection_t *sql_conn,
- gfdb_db_record_t *gfdb_db_record);
-
-/******************************************************************************
- *
- * Find/Query helper functions
- *
- * ****************************************************************************/
-
-int
-gf_sql_query_function(sqlite3_stmt *prep_stmt,
- gf_query_callback_t query_callback,
- void *_query_cbk_args);
-
-int
-gf_sql_clear_counters(gf_sql_connection_t *sql_conn);
-
-#endif
diff --git a/libglusterfs/src/gidcache.c b/libglusterfs/src/gidcache.c
index 40fcffbb35e..64a93802f76 100644
--- a/libglusterfs/src/gidcache.c
+++ b/libglusterfs/src/gidcache.c
@@ -10,6 +10,7 @@
#include "glusterfs/gidcache.h"
#include "glusterfs/mem-pool.h"
+#include "glusterfs/common-utils.h"
/*
* We treat this as a very simple set-associative LRU cache, with entries aged
@@ -64,7 +65,7 @@ gid_cache_lookup(gid_cache_t *cache, uint64_t id, uint64_t uid, uint64_t gid)
time_t now;
const gid_list_t *agl;
- now = time(NULL);
+ now = gf_time();
LOCK(&cache->gc_lock);
bucket = id % cache->gc_nbuckets;
agl = BUCKET_START(cache->gc_cache, bucket);
@@ -132,7 +133,7 @@ gid_cache_add(gid_cache_t *cache, gid_list_t *gl)
if (!cache->gc_max_age)
return 0;
- now = time(NULL);
+ now = gf_time();
LOCK(&cache->gc_lock);
/*
diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c
index 63b6358bbe0..ae06f8be386 100644
--- a/libglusterfs/src/globals.c
+++ b/libglusterfs/src/globals.c
@@ -314,7 +314,18 @@ glusterfs_cleanup(void *ptr)
GF_FREE(thread_syncopctx.groups);
}
- mem_pool_thread_destructor();
+ mem_pool_thread_destructor(NULL);
+}
+
+void
+gf_thread_needs_cleanup(void)
+{
+ /* The value stored in free_key TLS is not really used for anything, but
+ * pthread implementation doesn't call the TLS destruction function unless
+ * it's != NULL. This function must be called whenever something is
+ * allocated for this thread so that glusterfs_cleanup() will be called
+ * and resources can be released. */
+ (void)pthread_setspecific(free_key, (void *)1);
}
static void
diff --git a/libglusterfs/src/glusterfs/client_t.h b/libglusterfs/src/glusterfs/client_t.h
index 8ef3665a9c2..a2c508e1d5c 100644
--- a/libglusterfs/src/glusterfs/client_t.h
+++ b/libglusterfs/src/glusterfs/client_t.h
@@ -15,6 +15,17 @@
#include "glusterfs/locking.h" /* for gf_lock_t, not included by glusterfs.h */
#include "glusterfs/atomic.h" /* for gf_atomic_t */
+/* auth_data structure is required by RPC layer. But as it is also used in
+ * client_t structure validation, comparision, it is critical that it is defined
+ * in the larger scope of libglusterfs, instead of libgfrpc. With this change,
+ * even RPC will use this structure */
+#define GF_CLIENTT_AUTH_BYTES 400
+typedef struct client_auth_data {
+ int flavour;
+ int datalen;
+ char authdata[GF_CLIENTT_AUTH_BYTES];
+} client_auth_data_t;
+
struct client_ctx {
void *ctx_key;
void *ctx_value;
@@ -78,21 +89,12 @@ typedef struct clienttable clienttable_t;
*/
#define GF_CLIENTENTRY_ALLOCATED -2
-struct rpcsvc_auth_data;
-
-client_t *
-gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
- char *subdir_mount);
-
void
gf_client_put(client_t *client, gf_boolean_t *detached);
clienttable_t *
gf_clienttable_alloc(void);
-void
-gf_client_clienttable_destroy(clienttable_t *clienttable);
-
client_t *
gf_client_ref(client_t *client);
@@ -138,4 +140,8 @@ gf_client_dump_inodes(xlator_t *this);
int
gf_client_disconnect(client_t *client);
+client_t *
+gf_client_get(xlator_t *this, client_auth_data_t *cred, char *client_uid,
+ char *subdir_mount);
+
#endif /* _CLIENT_T_H */
diff --git a/libglusterfs/src/glusterfs/common-utils.h b/libglusterfs/src/glusterfs/common-utils.h
index d52904021e1..f297fdab5c9 100644
--- a/libglusterfs/src/glusterfs/common-utils.h
+++ b/libglusterfs/src/glusterfs/common-utils.h
@@ -18,6 +18,7 @@
#include <string.h>
#include <assert.h>
#include <pthread.h>
+#include <unistd.h>
#include <openssl/md5.h>
#ifndef GF_BSD_HOST_OS
#include <alloca.h>
@@ -26,6 +27,11 @@
#include <fnmatch.h>
#include <uuid/uuid.h>
+/* FreeBSD, etc. */
+#ifndef __BITS_PER_LONG
+#define __BITS_PER_LONG (CHAR_BIT * (sizeof(long)))
+#endif
+
#ifndef ffsll
#define ffsll(x) __builtin_ffsll(x)
#endif
@@ -42,7 +48,6 @@ trap(void);
#include "glusterfs/compat-uuid.h"
#include "glusterfs/iatt.h"
#include "glusterfs/libglusterfs-messages.h"
-#include "protocol-common.h"
#define STRINGIFY(val) #val
#define TOSTRING(val) STRINGIFY(val)
@@ -78,7 +83,6 @@ trap(void);
#define GF_UNIT_PERCENT_STRING "%"
#define GEOREP "geo-replication"
-#define GHADOOP "glusterfs-hadoop"
#define GLUSTERD_NAME "glusterd"
#define GF_SELINUX_XATTR_KEY "security.selinux"
@@ -116,12 +120,16 @@ trap(void);
#define GF_HOUR_IN_SECONDS (60 * 60)
#define GF_DAY_IN_SECONDS (24 * 60 * 60)
#define GF_WEEK_IN_SECONDS (7 * 24 * 60 * 60)
+#define GF_SEC_IN_NS 1000000000
+#define GF_MS_IN_NS 1000000
+#define GF_US_IN_NS 1000
/* Default timeout for both barrier and changelog translator */
#define BARRIER_TIMEOUT "120"
/* Default value of signing waiting time to sign a file for bitrot */
#define SIGNING_TIMEOUT "120"
+#define BR_WORKERS "4"
/* xxhash */
#define GF_XXH64_DIGEST_LENGTH 8
@@ -146,6 +154,9 @@ trap(void);
#define GF_THREAD_NAME_LIMIT 16
#define GF_THREAD_NAME_PREFIX "glfs_"
+/* Advisory buffer size for formatted timestamps (see gf_time_fmt) */
+#define GF_TIMESTR_SIZE 256
+
/*
* we could have initialized these as +ve values and treated
* them as negative while comparing etc.. (which would have
@@ -165,7 +176,8 @@ enum _gf_special_pid {
GF_CLIENT_PID_SCRUB = -9,
GF_CLIENT_PID_TIER_DEFRAG = -10,
GF_SERVER_PID_TRASH = -11,
- GF_CLIENT_PID_ADD_REPLICA_MOUNT = -12
+ GF_CLIENT_PID_ADD_REPLICA_MOUNT = -12,
+ GF_CLIENT_PID_SET_UTIME = -13,
};
enum _gf_xlator_ipc_targets {
@@ -177,6 +189,12 @@ enum _gf_xlator_ipc_targets {
typedef enum _gf_special_pid gf_special_pid_t;
typedef enum _gf_xlator_ipc_targets _gf_xlator_ipc_targets_t;
+/* Array to hold custom xattr keys */
+extern char *xattrs_to_heal[];
+
+char **
+get_xattrs_to_heal();
+
/* The DHT file rename operation is not a straightforward rename.
* It involves creating linkto and linkfiles, and can unlink or rename the
* source file depending on the hashed and cached subvols for the source
@@ -237,6 +255,8 @@ list_node_del(struct list_node *node);
struct dnscache *
gf_dnscache_init(time_t ttl);
+void
+gf_dnscache_deinit(struct dnscache *cache);
struct dnscache_entry *
gf_dnscache_entry_init(void);
void
@@ -423,9 +443,6 @@ BIT_VALUE(unsigned char *array, unsigned int index)
} \
} while (0)
-#define GF_FILE_CONTENT_REQUESTED(_xattr_req, _content_limit) \
- (dict_get_uint64(_xattr_req, "glusterfs.content", _content_limit) == 0)
-
#ifdef DEBUG
#define GF_ASSERT(x) assert(x);
#else
@@ -438,6 +455,15 @@ BIT_VALUE(unsigned char *array, unsigned int index)
} while (0)
#endif
+/* Compile-time assert, borrowed from Linux kernel. */
+#ifdef HAVE_STATIC_ASSERT
+#define GF_STATIC_ASSERT(expr, ...) \
+ __gf_static_assert(expr, ##__VA_ARGS__, #expr)
+#define __gf_static_assert(expr, msg, ...) _Static_assert(expr, msg)
+#else
+#define GF_STATIC_ASSERT(expr, ...)
+#endif
+
#define GF_ABORT(msg...) \
do { \
gf_msg_callingfn("", GF_LOG_CRITICAL, 0, LG_MSG_ASSERTION_FAILED, \
@@ -468,18 +494,15 @@ union gf_sock_union {
#define IOV_MIN(n) min(IOV_MAX, n)
-#define GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scr) \
- do { \
- entry = NULL; \
- if (dir) { \
- entry = sys_readdir(dir, scr); \
- while (entry && (!strcmp(entry->d_name, ".") || \
- !fnmatch("*.tmp", entry->d_name, 0) || \
- !strcmp(entry->d_name, ".."))) { \
- entry = sys_readdir(dir, scr); \
- } \
- } \
- } while (0)
+static inline gf_boolean_t
+gf_irrelevant_entry(struct dirent *entry)
+{
+ GF_ASSERT(entry);
+
+ return (!strcmp(entry->d_name, ".") ||
+ !fnmatch("*.tmp", entry->d_name, 0) ||
+ !strcmp(entry->d_name, ".."));
+}
static inline void
iov_free(struct iovec *vector, int count)
@@ -790,7 +813,7 @@ typedef enum {
} gf_timefmts;
static inline char *
-gf_time_fmt(char *dst, size_t sz_dst, time_t utime, unsigned int fmt)
+gf_time_fmt_tv(char *dst, size_t sz_dst, struct timeval *tv, unsigned int fmt)
{
extern void _gf_timestuff(const char ***, const char ***);
static gf_timefmts timefmt_last = (gf_timefmts)-1;
@@ -798,6 +821,8 @@ gf_time_fmt(char *dst, size_t sz_dst, time_t utime, unsigned int fmt)
static const char **zeros;
struct tm tm, *res;
int localtime = 0;
+ int len = 0;
+ int pos = 0;
if (timefmt_last == ((gf_timefmts)-1)) {
_gf_timestuff(&fmts, &zeros);
@@ -807,15 +832,35 @@ gf_time_fmt(char *dst, size_t sz_dst, time_t utime, unsigned int fmt)
fmt = gf_timefmt_default;
}
localtime = gf_log_get_localtime();
- res = localtime ? localtime_r(&utime, &tm) : gmtime_r(&utime, &tm);
- if (utime && (res != NULL)) {
- strftime(dst, sz_dst, fmts[fmt], &tm);
+ res = localtime ? localtime_r(&tv->tv_sec, &tm)
+ : gmtime_r(&tv->tv_sec, &tm);
+ if (tv->tv_sec && (res != NULL)) {
+ len = strftime(dst, sz_dst, fmts[fmt], &tm);
+ if (len == 0)
+ return dst;
+ pos += len;
+ if (tv->tv_usec >= 0) {
+ len = snprintf(dst + pos, sz_dst - pos, ".%" GF_PRI_SUSECONDS,
+ tv->tv_usec);
+ if (len >= sz_dst - pos)
+ return dst;
+ pos += len;
+ }
+ strftime(dst + pos, sz_dst - pos, " %z", &tm);
} else {
strncpy(dst, "N/A", sz_dst);
}
return dst;
}
+static inline char *
+gf_time_fmt(char *dst, size_t sz_dst, time_t utime, unsigned int fmt)
+{
+ struct timeval tv = {utime, -1};
+
+ return gf_time_fmt_tv(dst, sz_dst, &tv, fmt);
+}
+
/* This function helps us use gfid (unique identity) to generate inode's unique
* number in glusterfs.
*/
@@ -906,6 +951,8 @@ gf_string2percent_or_bytesize(const char *str, double *n,
int
gf_string2boolean(const char *str, gf_boolean_t *b);
int
+gf_strn2boolean(const char *str, const int len, gf_boolean_t *b);
+int
gf_string2percent(const char *str, double *n);
int
gf_string2time(const char *str, uint32_t *n);
@@ -992,8 +1039,6 @@ gf_is_str_int(const char *value);
char *gf_uint64_2human_readable(uint64_t);
int
-get_vol_type(int type, int dist_count, int brick_count);
-int
validate_brick_name(char *brick);
char *
get_host_name(char *word, char **host);
@@ -1145,8 +1190,6 @@ gf_getgrouplist(const char *user, gid_t group, gid_t **groups);
int
glusterfs_compute_sha256(const unsigned char *content, size_t size,
char *sha256_hash);
-char *
-get_struct_variable(int mem_num, gf_gsync_status_t *sts_val);
char *
gf_strncpy(char *dest, const char *src, const size_t dest_size);
@@ -1166,4 +1209,48 @@ find_xlator_option_in_cmd_args_t(const char *option_name, cmd_args_t *args);
int
gf_d_type_from_ia_type(ia_type_t type);
+int
+gf_syncfs(int fd);
+
+int
+gf_nanosleep(uint64_t nsec);
+
+static inline time_t
+gf_time(void)
+{
+ return time(NULL);
+}
+
+/* Return delta value in microseconds. */
+
+static inline double
+gf_tvdiff(struct timeval *start, struct timeval *end)
+{
+ struct timeval t;
+
+ if (start->tv_usec > end->tv_usec)
+ t.tv_sec = end->tv_sec - 1, t.tv_usec = end->tv_usec + 1000000;
+ else
+ t.tv_sec = end->tv_sec, t.tv_usec = end->tv_usec;
+
+ return (double)(t.tv_sec - start->tv_sec) * 1e6 +
+ (double)(t.tv_usec - start->tv_usec);
+}
+
+/* Return delta value in nanoseconds. */
+
+static inline double
+gf_tsdiff(struct timespec *start, struct timespec *end)
+{
+ struct timespec t;
+
+ if (start->tv_nsec > end->tv_nsec)
+ t.tv_sec = end->tv_sec - 1, t.tv_nsec = end->tv_nsec + 1000000000;
+ else
+ t.tv_sec = end->tv_sec, t.tv_nsec = end->tv_nsec;
+
+ return (double)(t.tv_sec - start->tv_sec) * 1e9 +
+ (double)(t.tv_nsec - start->tv_nsec);
+}
+
#endif /* _COMMON_UTILS_H */
diff --git a/libglusterfs/src/glusterfs/compat.h b/libglusterfs/src/glusterfs/compat.h
index d2686499973..bf00d903152 100644
--- a/libglusterfs/src/glusterfs/compat.h
+++ b/libglusterfs/src/glusterfs/compat.h
@@ -528,6 +528,9 @@ dirname_r(char *path);
/* Use run API, see run.h */
#include <stdlib.h> /* system(), mkostemp() */
#include <stdio.h> /* popen() */
+#ifdef GF_LINUX_HOST_OS
+#include <sys/sysmacros.h>
+#endif
#pragma GCC poison system mkostemp popen
#endif
diff --git a/libglusterfs/src/glusterfs/dict.h b/libglusterfs/src/glusterfs/dict.h
index 35337251360..d0467c6dfb6 100644
--- a/libglusterfs/src/glusterfs/dict.h
+++ b/libglusterfs/src/glusterfs/dict.h
@@ -25,9 +25,6 @@ typedef struct _data_pair data_pair_t;
#define dict_add_sizen(this, key, value) dict_addn(this, key, SLEN(key), value)
-#define dict_get_with_ref_sizen(this, key, value) \
- dict_get_with_refn(this, key, SLEN(key), value)
-
#define dict_get_sizen(this, key) dict_getn(this, key, SLEN(key))
#define dict_del_sizen(this, key) dict_deln(this, key, SLEN(key))
@@ -97,9 +94,8 @@ typedef struct _data_pair data_pair_t;
struct _data {
char *data;
gf_atomic_t refcount;
- gf_lock_t lock;
gf_dict_data_type_t data_type;
- int32_t len;
+ uint32_t len;
gf_boolean_t is_static;
};
@@ -119,12 +115,12 @@ struct _dict {
gf_atomic_t refcount;
data_pair_t **members;
data_pair_t *members_list;
- char *extra_free;
char *extra_stdfree;
gf_lock_t lock;
data_pair_t *members_internal;
data_pair_t free_pair;
- gf_boolean_t free_pair_in_use;
+ /* Variable to store total keylen + value->len */
+ uint32_t totkvlen;
};
typedef gf_boolean_t (*dict_match_t)(dict_t *d, char *k, data_t *v, void *data);
@@ -139,6 +135,7 @@ int32_t
dict_set(dict_t *this, char *key, data_t *value);
int32_t
dict_setn(dict_t *this, char *key, const int keylen, data_t *value);
+
/* function to set a new key/value pair (without checking for duplicate) */
int32_t
dict_add(dict_t *this, char *key, data_t *value);
@@ -146,8 +143,6 @@ int32_t
dict_addn(dict_t *this, char *key, const int keylen, data_t *value);
int
dict_get_with_ref(dict_t *this, char *key, data_t **data);
-int
-dict_get_with_refn(dict_t *this, char *key, const int keylen, data_t **data);
data_t *
dict_get(dict_t *this, char *key);
data_t *
diff --git a/libglusterfs/src/glusterfs/fd.h b/libglusterfs/src/glusterfs/fd.h
index 28906d34e4d..3ffaaa60504 100644
--- a/libglusterfs/src/glusterfs/fd.h
+++ b/libglusterfs/src/glusterfs/fd.h
@@ -106,6 +106,9 @@ fd_ref(fd_t *fd);
void
fd_unref(fd_t *fd);
+void
+fd_close(fd_t *fd);
+
fd_t *
fd_create(struct _inode *inode, pid_t pid);
diff --git a/libglusterfs/src/glusterfs/gf-event.h b/libglusterfs/src/glusterfs/gf-event.h
index c0f05e7c83b..40f8fbdf10a 100644
--- a/libglusterfs/src/glusterfs/gf-event.h
+++ b/libglusterfs/src/glusterfs/gf-event.h
@@ -12,6 +12,7 @@
#define _GF_EVENT_H_
#include <pthread.h>
+#include "common-utils.h"
#include "list.h"
struct event_pool;
@@ -31,6 +32,9 @@ typedef void (*event_handler_t)(int fd, int idx, int gen, void *data,
#define EVENT_EPOLL_SLOTS 1024
#define EVENT_MAX_THREADS 1024
+/* See rpcsvc.h to check why. */
+GF_STATIC_ASSERT(EVENT_MAX_THREADS % __BITS_PER_LONG == 0);
+
struct event_pool {
struct event_ops *ops;
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index e19a7cf8b95..b22eaae6c2f 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -45,7 +45,7 @@
1 /* MIN is the fresh start op-version, mostly \
should not change */
#define GD_OP_VERSION_MAX \
- GD_OP_VERSION_7_0 /* MAX VERSION is the maximum \
+ GD_OP_VERSION_9_0 /* MAX VERSION is the maximum \
count in VME table, should \
keep changing with \
introduction of newer \
@@ -116,6 +116,13 @@
#define GD_OP_VERSION_6_0 60000 /* Op-version for GlusterFS 6.0 */
#define GD_OP_VERSION_7_0 70000 /* Op-version for GlusterFS 7.0 */
+#define GD_OP_VERSION_7_1 70100 /* Op-version for GlusterFS 7.1 */
+#define GD_OP_VERSION_7_2 70200 /* Op-version for GlusterFS 7.2 */
+#define GD_OP_VERSION_7_3 70300 /* Op-version for GlusterFS 7.3 */
+
+#define GD_OP_VERSION_8_0 80000 /* Op-version for GlusterFS 8.0 */
+
+#define GD_OP_VERSION_9_0 90000 /* Op-version for GlusterFS 9.0 */
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
@@ -162,6 +169,9 @@ glusterfs_leaseid_exist(void);
int
glusterfs_globals_init(glusterfs_ctx_t *ctx);
+void
+gf_thread_needs_cleanup(void);
+
struct tvec_base *
glusterfs_ctx_tw_get(glusterfs_ctx_t *ctx);
void
diff --git a/libglusterfs/src/glusterfs/glusterfs-acl.h b/libglusterfs/src/glusterfs/glusterfs-acl.h
index cae55e8062f..987bf5fab0b 100644
--- a/libglusterfs/src/glusterfs/glusterfs-acl.h
+++ b/libglusterfs/src/glusterfs/glusterfs-acl.h
@@ -143,7 +143,7 @@ gf_posix_acl_get_key(const acl_type_t type)
return acl_key;
}
-static inline const acl_type_t
+static inline acl_type_t
gf_posix_acl_get_type(const char *key)
{
acl_type_t type = 0;
diff --git a/libglusterfs/src/glusterfs/glusterfs-fops.h b/libglusterfs/src/glusterfs/glusterfs-fops.h
new file mode 100644
index 00000000000..030b2701608
--- /dev/null
+++ b/libglusterfs/src/glusterfs/glusterfs-fops.h
@@ -0,0 +1,241 @@
+/*
+ Copyright (c) 2008-2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERFS_FOPS_H_
+#define _GLUSTERFS_FOPS_H_
+
+#include <glusterfs/compat.h>
+
+enum glusterfs_fop_t {
+ GF_FOP_NULL = 0,
+ GF_FOP_STAT = 0 + 1,
+ GF_FOP_READLINK = 0 + 2,
+ GF_FOP_MKNOD = 0 + 3,
+ GF_FOP_MKDIR = 0 + 4,
+ GF_FOP_UNLINK = 0 + 5,
+ GF_FOP_RMDIR = 0 + 6,
+ GF_FOP_SYMLINK = 0 + 7,
+ GF_FOP_RENAME = 0 + 8,
+ GF_FOP_LINK = 0 + 9,
+ GF_FOP_TRUNCATE = 0 + 10,
+ GF_FOP_OPEN = 0 + 11,
+ GF_FOP_READ = 0 + 12,
+ GF_FOP_WRITE = 0 + 13,
+ GF_FOP_STATFS = 0 + 14,
+ GF_FOP_FLUSH = 0 + 15,
+ GF_FOP_FSYNC = 0 + 16,
+ GF_FOP_SETXATTR = 0 + 17,
+ GF_FOP_GETXATTR = 0 + 18,
+ GF_FOP_REMOVEXATTR = 0 + 19,
+ GF_FOP_OPENDIR = 0 + 20,
+ GF_FOP_FSYNCDIR = 0 + 21,
+ GF_FOP_ACCESS = 0 + 22,
+ GF_FOP_CREATE = 0 + 23,
+ GF_FOP_FTRUNCATE = 0 + 24,
+ GF_FOP_FSTAT = 0 + 25,
+ GF_FOP_LK = 0 + 26,
+ GF_FOP_LOOKUP = 0 + 27,
+ GF_FOP_READDIR = 0 + 28,
+ GF_FOP_INODELK = 0 + 29,
+ GF_FOP_FINODELK = 0 + 30,
+ GF_FOP_ENTRYLK = 0 + 31,
+ GF_FOP_FENTRYLK = 0 + 32,
+ GF_FOP_XATTROP = 0 + 33,
+ GF_FOP_FXATTROP = 0 + 34,
+ GF_FOP_FGETXATTR = 0 + 35,
+ GF_FOP_FSETXATTR = 0 + 36,
+ GF_FOP_RCHECKSUM = 0 + 37,
+ GF_FOP_SETATTR = 0 + 38,
+ GF_FOP_FSETATTR = 0 + 39,
+ GF_FOP_READDIRP = 0 + 40,
+ GF_FOP_FORGET = 0 + 41,
+ GF_FOP_RELEASE = 0 + 42,
+ GF_FOP_RELEASEDIR = 0 + 43,
+ GF_FOP_GETSPEC = 0 + 44,
+ GF_FOP_FREMOVEXATTR = 0 + 45,
+ GF_FOP_FALLOCATE = 0 + 46,
+ GF_FOP_DISCARD = 0 + 47,
+ GF_FOP_ZEROFILL = 0 + 48,
+ GF_FOP_IPC = 0 + 49,
+ GF_FOP_SEEK = 0 + 50,
+ GF_FOP_LEASE = 0 + 51,
+ GF_FOP_COMPOUND = 0 + 52,
+ GF_FOP_GETACTIVELK = 0 + 53,
+ GF_FOP_SETACTIVELK = 0 + 54,
+ GF_FOP_PUT = 0 + 55,
+ GF_FOP_ICREATE = 0 + 56,
+ GF_FOP_NAMELINK = 0 + 57,
+ GF_FOP_COPY_FILE_RANGE = 0 + 58,
+ GF_FOP_MAXVALUE = 0 + 59,
+};
+typedef enum glusterfs_fop_t glusterfs_fop_t;
+
+enum glusterfs_event_t {
+ GF_EVENT_PARENT_UP = 1,
+ GF_EVENT_POLLIN = 1 + 1,
+ GF_EVENT_POLLOUT = 1 + 2,
+ GF_EVENT_POLLERR = 1 + 3,
+ GF_EVENT_CHILD_UP = 1 + 4,
+ GF_EVENT_CHILD_DOWN = 1 + 5,
+ GF_EVENT_CHILD_CONNECTING = 1 + 6,
+ GF_EVENT_CLEANUP = 9,
+ GF_EVENT_TRANSPORT_CONNECTED = 9 + 1,
+ GF_EVENT_VOLFILE_MODIFIED = 9 + 2,
+ GF_EVENT_GRAPH_NEW = 9 + 3,
+ GF_EVENT_TRANSLATOR_INFO = 9 + 4,
+ GF_EVENT_TRANSLATOR_OP = 9 + 5,
+ GF_EVENT_AUTH_FAILED = 9 + 6,
+ GF_EVENT_VOLUME_DEFRAG = 9 + 7,
+ GF_EVENT_PARENT_DOWN = 9 + 8,
+ GF_EVENT_VOLUME_BARRIER_OP = 9 + 9,
+ GF_EVENT_UPCALL = 9 + 10,
+ GF_EVENT_SCRUB_STATUS = 9 + 11,
+ GF_EVENT_SOME_DESCENDENT_DOWN = 9 + 12,
+ GF_EVENT_SCRUB_ONDEMAND = 9 + 13,
+ GF_EVENT_SOME_DESCENDENT_UP = 9 + 14,
+ GF_EVENT_CHILD_PING = 9 + 15,
+ GF_EVENT_MAXVAL = 9 + 16,
+};
+typedef enum glusterfs_event_t glusterfs_event_t;
+
+enum gf_op_type_t {
+ GF_OP_TYPE_NULL = 0,
+ GF_OP_TYPE_FOP = 0 + 1,
+ GF_OP_TYPE_MGMT = 0 + 2,
+ GF_OP_TYPE_MAX = 0 + 3,
+};
+typedef enum gf_op_type_t gf_op_type_t;
+
+enum glusterfs_lk_cmds_t {
+ GF_LK_GETLK = 0,
+ GF_LK_SETLK = 0 + 1,
+ GF_LK_SETLKW = 0 + 2,
+ GF_LK_RESLK_LCK = 0 + 3,
+ GF_LK_RESLK_LCKW = 0 + 4,
+ GF_LK_RESLK_UNLCK = 0 + 5,
+ GF_LK_GETLK_FD = 0 + 6,
+};
+typedef enum glusterfs_lk_cmds_t glusterfs_lk_cmds_t;
+
+enum glusterfs_lk_types_t {
+ GF_LK_F_RDLCK = 0,
+ GF_LK_F_WRLCK = 0 + 1,
+ GF_LK_F_UNLCK = 0 + 2,
+ GF_LK_EOL = 0 + 3,
+};
+typedef enum glusterfs_lk_types_t glusterfs_lk_types_t;
+
+enum gf_lease_types_t {
+ NONE = 0,
+ GF_RD_LEASE = 1,
+ GF_RW_LEASE = 2,
+ GF_LEASE_MAX_TYPE = 2 + 1,
+};
+typedef enum gf_lease_types_t gf_lease_types_t;
+
+enum gf_lease_cmds_t {
+ GF_GET_LEASE = 1,
+ GF_SET_LEASE = 2,
+ GF_UNLK_LEASE = 3,
+};
+typedef enum gf_lease_cmds_t gf_lease_cmds_t;
+
+#define LEASE_ID_SIZE 16 /* 128bits */
+
+struct gf_lease {
+ gf_lease_cmds_t cmd;
+ gf_lease_types_t lease_type;
+ char lease_id[LEASE_ID_SIZE];
+ u_int lease_flags;
+};
+typedef struct gf_lease gf_lease;
+
+enum glusterfs_lk_recovery_cmds_t {
+ F_RESLK_LCK = 200,
+ F_RESLK_LCKW = 200 + 1,
+ F_RESLK_UNLCK = 200 + 2,
+ F_GETLK_FD = 200 + 3,
+};
+typedef enum glusterfs_lk_recovery_cmds_t glusterfs_lk_recovery_cmds_t;
+
+enum gf_lk_domain_t {
+ GF_LOCK_POSIX = 0,
+ GF_LOCK_INTERNAL = 1,
+};
+typedef enum gf_lk_domain_t gf_lk_domain_t;
+
+enum entrylk_cmd {
+ ENTRYLK_LOCK = 0,
+ ENTRYLK_UNLOCK = 1,
+ ENTRYLK_LOCK_NB = 2,
+};
+typedef enum entrylk_cmd entrylk_cmd;
+
+enum entrylk_type {
+ ENTRYLK_RDLCK = 0,
+ ENTRYLK_WRLCK = 1,
+};
+typedef enum entrylk_type entrylk_type;
+#define GF_MAX_LOCK_OWNER_LEN 1024 /* 1kB as per NLM */
+#define GF_LKOWNER_BUF_SIZE \
+ ((GF_MAX_LOCK_OWNER_LEN * 2) + (GF_MAX_LOCK_OWNER_LEN / 8))
+
+struct gf_lkowner_t {
+ int len;
+ char data[GF_MAX_LOCK_OWNER_LEN];
+};
+typedef struct gf_lkowner_t gf_lkowner_t;
+
+enum gf_xattrop_flags_t {
+ GF_XATTROP_ADD_ARRAY = 0,
+ GF_XATTROP_ADD_ARRAY64 = 1,
+ GF_XATTROP_OR_ARRAY = 2,
+ GF_XATTROP_AND_ARRAY = 3,
+ GF_XATTROP_GET_AND_SET = 4,
+ GF_XATTROP_ADD_ARRAY_WITH_DEFAULT = 5,
+ GF_XATTROP_ADD_ARRAY64_WITH_DEFAULT = 6,
+};
+typedef enum gf_xattrop_flags_t gf_xattrop_flags_t;
+
+enum gf_seek_what_t {
+ GF_SEEK_DATA = 0,
+ GF_SEEK_HOLE = 1,
+};
+typedef enum gf_seek_what_t gf_seek_what_t;
+
+enum gf_upcall_flags_t {
+ GF_UPCALL_NULL = 0,
+ GF_UPCALL = 1,
+ GF_UPCALL_CI_STAT = 2,
+ GF_UPCALL_CI_XATTR = 3,
+ GF_UPCALL_CI_RENAME = 4,
+ GF_UPCALL_CI_NLINK = 5,
+ GF_UPCALL_CI_FORGET = 6,
+ GF_UPCALL_LEASE_RECALL = 7,
+ GF_UPCALL_FLAGS_MAXVALUE = 8,
+};
+typedef enum gf_upcall_flags_t gf_upcall_flags_t;
+
+enum gf_dict_data_type_t {
+ GF_DATA_TYPE_UNKNOWN = 0,
+ GF_DATA_TYPE_STR_OLD = 1,
+ GF_DATA_TYPE_INT = 2,
+ GF_DATA_TYPE_UINT = 3,
+ GF_DATA_TYPE_DOUBLE = 4,
+ GF_DATA_TYPE_STR = 5,
+ GF_DATA_TYPE_PTR = 6,
+ GF_DATA_TYPE_GFUUID = 7,
+ GF_DATA_TYPE_IATT = 8,
+ GF_DATA_TYPE_MDATA = 9,
+ GF_DATA_TYPE_MAX = 10,
+};
+typedef enum gf_dict_data_type_t gf_dict_data_type_t;
+
+#endif /* !_GLUSTERFS_FOPS_H */
diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
index 3e010fa93c1..e6425618b7f 100644
--- a/libglusterfs/src/glusterfs/glusterfs.h
+++ b/libglusterfs/src/glusterfs/glusterfs.h
@@ -31,21 +31,7 @@
#include <limits.h> /* For PATH_MAX */
#include <openssl/sha.h>
-#include "glusterfs-fops.h" /* generated XDR values for FOPs */
-
-#ifndef IXDR_GET_LONG
-#define IXDR_GET_LONG(buf) ((long)IXDR_GET_U_INT32(buf))
-#endif
-#ifndef IXDR_PUT_LONG
-#define IXDR_PUT_LONG(buf, v) ((long)IXDR_PUT_INT32(buf, (long)(v)))
-#endif
-#ifndef IXDR_GET_U_LONG
-#define IXDR_GET_U_LONG(buf) ((u_long)IXDR_GET_LONG(buf))
-#endif
-#ifndef IXDR_PUT_U_LONG
-#define IXDR_PUT_U_LONG(buf, v) IXDR_PUT_LONG(buf, (long)(v))
-#endif
-
+#include "glusterfs/glusterfs-fops.h"
#include "glusterfs/list.h"
#include "glusterfs/locking.h"
#include "glusterfs/logging.h"
@@ -57,6 +43,9 @@
#define GF_YES 1
#define GF_NO 0
+#define IS_ERROR(ret) ((ret) < 0)
+#define IS_SUCCESS(ret) ((ret) >= 0)
+
#ifndef O_LARGEFILE
/* savannah bug #20053, patch for compiling on darwin */
#define O_LARGEFILE 0100000 /* from bits/fcntl.h */
@@ -91,7 +80,7 @@
#define GLUSTERD_MAX_SNAP_NAME 255
#define GLUSTERFS_SOCKET_LISTEN_BACKLOG 1024
-
+#define GLUSTERD_BRICK_SERVERS "cluster.brick-vol-servers"
#define SLEN(str) (sizeof(str) - 1)
#define ZR_MOUNTPOINT_OPT "mountpoint"
@@ -320,7 +309,6 @@ enum gf_internal_fop_indicator {
#define DHT_SKIP_NON_LINKTO_UNLINK "unlink-only-if-dht-linkto-file"
#define TIER_SKIP_NON_LINKTO_UNLINK "unlink-only-if-tier-linkto-file"
-#define TIER_LINKFILE_GFID "tier-linkfile-gfid"
#define DHT_SKIP_OPEN_FD_UNLINK "dont-unlink-for-open-fd"
#define DHT_IATT_IN_XDATA_KEY "dht-get-iatt-in-xattr"
#define DHT_MODE_IN_XDATA_KEY "dht-get-mode-in-xattr"
@@ -437,7 +425,7 @@ static const char *const FOP_PRI_STRINGS[] = {"HIGH", "NORMAL", "LOW", "LEAST"};
static inline const char *
fop_pri_to_string(gf_fop_pri_t pri)
{
- if (pri < 0)
+ if (IS_ERROR(pri))
return "UNSPEC";
if (pri >= GF_FOP_PRI_MAX)
@@ -478,6 +466,8 @@ typedef struct _server_cmdline server_cmdline_t;
#define GF_OPTION_DISABLE _gf_false
#define GF_OPTION_DEFERRED 2
+typedef enum { _gf_none, _gf_memcheck, _gf_drd } gf_valgrind_tool;
+
struct _cmd_args {
/* basic options */
char *volfile_server;
@@ -543,6 +533,7 @@ struct _cmd_args {
int client_pid_set;
unsigned uid_map_root;
int32_t lru_limit;
+ int32_t invalidate_limit;
int background_qlen;
int congestion_threshold;
char *fuse_mountopts;
@@ -569,7 +560,8 @@ struct _cmd_args {
/* Run this process with valgrind? Might want to prevent calling
* functions that prevent valgrind from working correctly, like
* dlclose(). */
- int valgrind;
+ gf_valgrind_tool vgtool;
+
int localtime_logging;
/* For the subdir mount */
@@ -589,6 +581,8 @@ struct _cmd_args {
bool global_threading;
bool brick_mux;
+
+ uint32_t fuse_dev_eperm_ratelimit_ns;
};
typedef struct _cmd_args cmd_args_t;
@@ -607,7 +601,9 @@ struct _glusterfs_graph {
in client multiplexed code path */
pthread_mutex_t mutex;
pthread_cond_t child_down_cond; /* for broadcasting CHILD_DOWN */
+ int parent_down;
char graph_uuid[128];
+ char volume_id[GF_UUID_BUF_SIZE];
};
typedef struct _glusterfs_graph glusterfs_graph_t;
@@ -737,12 +733,15 @@ struct _glusterfs_ctx {
} stats;
struct list_head volfile_list;
-
/* Add members to manage janitor threads for cleanup fd */
struct list_head janitor_fds;
- pthread_cond_t janitor_cond;
- pthread_mutex_t janitor_lock;
+ pthread_cond_t fd_cond;
+ pthread_mutex_t fd_lock;
pthread_t janitor;
+ /* The variable is use to save total posix xlator count */
+ uint32_t pxl_count;
+
+ char volume_id[GF_UUID_BUF_SIZE]; /* Used only in protocol/client */
};
typedef struct _glusterfs_ctx glusterfs_ctx_t;
diff --git a/libglusterfs/src/glusterfs/inode.h b/libglusterfs/src/glusterfs/inode.h
index 5cf2ab5080b..4b28da510c7 100644
--- a/libglusterfs/src/glusterfs/inode.h
+++ b/libglusterfs/src/glusterfs/inode.h
@@ -61,6 +61,10 @@ struct _inode_table {
xlator_t *invalidator_xl;
struct list_head invalidate; /* inodes which are in invalidation queue */
uint32_t invalidate_size; /* count of inodes in invalidation list */
+
+ /* flag to indicate whether the cleanup of the inode
+ table started or not */
+ gf_boolean_t cleanup_started;
};
struct _dentry {
@@ -107,6 +111,7 @@ struct _inode {
struct list_head list; /* active/lru/purge */
struct _inode_ctx *_ctx; /* replacement for dict_t *(inode->ctx) */
+ bool in_invalidate_list; /* Set if inode is in table invalidate list */
bool invalidate_sent; /* Set it if invalidator_fn is called for inode */
};
diff --git a/libglusterfs/src/glusterfs/iobuf.h b/libglusterfs/src/glusterfs/iobuf.h
index 792d4fe1529..4bd443efd5e 100644
--- a/libglusterfs/src/glusterfs/iobuf.h
+++ b/libglusterfs/src/glusterfs/iobuf.h
@@ -95,14 +95,14 @@ struct iobuf_arena {
void *mem_base;
struct iobuf *iobufs; /* allocated iobufs list */
- int active_cnt;
- struct iobuf active; /* head node iobuf
- (unused by itself) */
- int passive_cnt;
+ struct iobuf active; /* head node iobuf
+ (unused by itself) */
struct iobuf passive; /* head node iobuf
(unused by itself) */
uint64_t alloc_cnt; /* total allocs in this pool */
- int max_active; /* max active buffers at a given time */
+ int active_cnt;
+ int passive_cnt;
+ int max_active; /* max active buffers at a given time */
};
struct iobuf_pool {
@@ -111,7 +111,6 @@ struct iobuf_pool {
arena */
size_t default_page_size; /* default size of iobuf */
- int arena_cnt;
struct list_head all_arenas;
struct list_head arenas[GF_VARIABLE_IOBUF_COUNT];
/* array of arenas. Each element of the array is a list of arenas
@@ -125,6 +124,7 @@ struct iobuf_pool {
uint64_t request_misses; /* mostly the requests for higher
value of iobufs */
+ int arena_cnt;
int rdma_device_count;
struct list_head *mr_list[GF_RDMA_DEVICE_COUNT];
void *device[GF_RDMA_DEVICE_COUNT];
diff --git a/libglusterfs/src/glusterfs/latency.h b/libglusterfs/src/glusterfs/latency.h
index ed47b1f0cbc..4d601bbcbd6 100644
--- a/libglusterfs/src/glusterfs/latency.h
+++ b/libglusterfs/src/glusterfs/latency.h
@@ -11,13 +11,23 @@
#ifndef __LATENCY_H__
#define __LATENCY_H__
-#include "glusterfs/glusterfs.h"
+#include <inttypes.h>
+#include <time.h>
-typedef struct fop_latency {
- double min; /* min time for the call (microseconds) */
- double max; /* max time for the call (microseconds) */
- double total; /* total time (microseconds) */
+typedef struct _gf_latency {
+ uint64_t min; /* min time for the call (nanoseconds) */
+ uint64_t max; /* max time for the call (nanoseconds) */
+ uint64_t total; /* total time (nanoseconds) */
uint64_t count;
-} fop_latency_t;
+} gf_latency_t;
+gf_latency_t *
+gf_latency_new(size_t n);
+
+void
+gf_latency_reset(gf_latency_t *lat);
+
+void
+gf_latency_update(gf_latency_t *lat, struct timespec *begin,
+ struct timespec *end);
#endif /* __LATENCY_H__ */
diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h
index 7e0eebbe535..cb31dd7614b 100644
--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h
+++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h
@@ -112,6 +112,134 @@ GLFS_MSGID(
LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE,
LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED,
LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED,
- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED);
+ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, LG_MSG_DUPLICATE_ENTRY,
+ LG_MSG_THREAD_NAME_TOO_LONG, LG_MSG_SET_THREAD_FAILED,
+ LG_MSG_THREAD_CREATE_FAILED, LG_MSG_FILE_DELETE_FAILED, LG_MSG_WRONG_VALUE,
+ LG_MSG_PATH_OPEN_FAILED, LG_MSG_DISPATCH_HANDLER_FAILED,
+ LG_MSG_READ_FILE_FAILED, LG_MSG_ENTRIES_NOT_PROVIDED,
+ LG_MSG_ENTRIES_PROVIDED, LG_MSG_UNKNOWN_OPTION_TYPE,
+ LG_MSG_OPTION_DEPRECATED, LG_MSG_INVALID_INIT, LG_MSG_OBJECT_NULL,
+ LG_MSG_GRAPH_NOT_SET, LG_MSG_FILENAME_NOT_SPECIFIED, LG_MSG_STRUCT_MISS,
+ LG_MSG_METHOD_MISS, LG_MSG_INPUT_DATA_NULL, LG_MSG_OPEN_LOGFILE_FAILED);
+
+#define LG_MSG_EPOLL_FD_CREATE_FAILED_STR "epoll fd creation failed"
+#define LG_MSG_INVALID_POLL_IN_STR "invalid poll_in value"
+#define LG_MSG_INVALID_POLL_OUT_STR "invalid poll_out value"
+#define LG_MSG_SLOT_NOT_FOUND_STR "could not find slot"
+#define LG_MSG_EPOLL_FD_ADD_FAILED_STR "failed to add fd to epoll"
+#define LG_MSG_EPOLL_FD_DEL_FAILED_STR "fail to delete fd to epoll"
+#define LG_MSG_EPOLL_FD_MODIFY_FAILED_STR "failed to modify fd events"
+#define LG_MSG_STALE_FD_FOUND_STR "stale fd found"
+#define LG_MSG_GENERATION_MISMATCH_STR "generation mismatch"
+#define LG_MSG_STARTED_EPOLL_THREAD_STR "Started thread with index"
+#define LG_MSG_EXITED_EPOLL_THREAD_STR "Exited thread"
+#define LG_MSG_DISPATCH_HANDLER_FAILED_STR "Failed to dispatch handler"
+#define LG_MSG_START_EPOLL_THREAD_FAILED_STR "Failed to start thread"
+#define LG_MSG_PIPE_CREATE_FAILED_STR "pipe creation failed"
+#define LG_MSG_SET_PIPE_FAILED_STR "could not set pipe to non blocking mode"
+#define LG_MSG_REGISTER_PIPE_FAILED_STR \
+ "could not register pipe fd with poll event loop"
+#define LG_MSG_POLL_IGNORE_MULTIPLE_THREADS_STR \
+ "Currently poll does not use multiple event processing threads, count " \
+ "ignored"
+#define LG_MSG_INDEX_NOT_FOUND_STR "index not found"
+#define LG_MSG_READ_FILE_FAILED_STR "read on file returned error"
+#define LG_MSG_RB_TABLE_CREATE_FAILED_STR "Failed to create rb table bucket"
+#define LG_MSG_HASH_FUNC_ERROR_STR "Hash function not given"
+#define LG_MSG_ENTRIES_NOT_PROVIDED_STR \
+ "Both mem-pool and expected entries not provided"
+#define LG_MSG_ENTRIES_PROVIDED_STR \
+ "Both mem-pool and expected entries are provided"
+#define LG_MSG_RBTHASH_INIT_BUCKET_FAILED_STR "failed to init buckets"
+#define LG_MSG_RBTHASH_GET_ENTRY_FAILED_STR "Failed to get entry from mem-pool"
+#define LG_MSG_RBTHASH_GET_BUCKET_FAILED_STR "Failed to get bucket"
+#define LG_MSG_RBTHASH_INSERT_FAILED_STR "Failed to insert entry"
+#define LG_MSG_RBTHASH_INIT_ENTRY_FAILED_STR "Failed to init entry"
+#define LG_MSG_FILE_STAT_FAILED_STR "failed to stat"
+#define LG_MSG_INET_PTON_FAILED_STR "inet_pton() failed"
+#define LG_MSG_INVALID_ENTRY_STR "Invalid arguments"
+#define LG_MSG_NEGATIVE_NUM_PASSED_STR "negative number passed"
+#define LG_MSG_PATH_ERROR_STR "Path manipulation failed"
+#define LG_MSG_FILE_OP_FAILED_STR "could not open/read file, getting ports info"
+#define LG_MSG_RESERVED_PORTS_ERROR_STR \
+ "Not able to get reserved ports, hence there is a possibility that " \
+ "glusterfs may consume reserved port"
+#define LG_MSG_INVALID_PORT_STR "invalid port"
+#define LG_MSG_GETNAMEINFO_FAILED_STR "Could not lookup hostname"
+#define LG_MSG_GETIFADDRS_FAILED_STR "getifaddrs() failed"
+#define LG_MSG_INVALID_FAMILY_STR "Invalid family"
+#define LG_MSG_CONVERSION_FAILED_STR "String conversion failed"
+#define LG_MSG_GETADDRINFO_FAILED_STR "error in getaddrinfo"
+#define LG_MSG_DUPLICATE_ENTRY_STR "duplicate entry for volfile-server"
+#define LG_MSG_PTHREAD_NAMING_FAILED_STR "Failed to compose thread name"
+#define LG_MSG_THREAD_NAME_TOO_LONG_STR \
+ "Thread name is too long. It has been truncated"
+#define LG_MSG_SET_THREAD_FAILED_STR "Could not set thread name"
+#define LG_MSG_THREAD_CREATE_FAILED_STR "Thread creation failed"
+#define LG_MSG_PTHREAD_ATTR_INIT_FAILED_STR \
+ "Thread attribute initialization failed"
+#define LG_MSG_SKIP_HEADER_FAILED_STR "Failed to skip header section"
+#define LG_MSG_INVALID_LOG_STR "Invalid log-format"
+#define LG_MSG_UTIMENSAT_FAILED_STR "utimenstat failed"
+#define LG_MSG_UTIMES_FAILED_STR "utimes failed"
+#define LG_MSG_FILE_DELETE_FAILED_STR "Unable to delete file"
+#define LG_MSG_BACKTRACE_SAVE_FAILED_STR "Failed to save the backtrace"
+#define LG_MSG_WRONG_VALUE_STR "wrong value"
+#define LG_MSG_DIR_OP_FAILED_STR "Failed to create directory"
+#define LG_MSG_DIR_IS_SYMLINK_STR "dir is symlink"
+#define LG_MSG_RESOLVE_HOSTNAME_FAILED_STR "couldnot resolve hostname"
+#define LG_MSG_PATH_OPEN_FAILED_STR "Unable to open path"
+#define LG_MSG_NO_MEMORY_STR "Error allocating memory"
+#define LG_MSG_EVENT_NOTIFY_FAILED_STR "notification failed"
+#define LG_MSG_PER_DENTRY_FAILED_STR "per dentry fn returned"
+#define LG_MSG_PARENT_DENTRY_NOT_FOUND_STR "parent not found"
+#define LG_MSG_DENTRY_CYCLIC_LOOP_STR \
+ "detected cyclic loop formation during inode linkage"
+#define LG_MSG_CTX_NULL_STR "_ctx not found"
+#define LG_MSG_DENTRY_NOT_FOUND_STR "dentry not found"
+#define LG_MSG_OUT_OF_RANGE_STR "out of range"
+#define LG_MSG_UNKNOWN_OPTION_TYPE_STR "unknown option type"
+#define LG_MSG_VALIDATE_RETURNS_STR "validate of returned"
+#define LG_MSG_OPTION_DEPRECATED_STR \
+ "option is deprecated, continuing with correction"
+#define LG_MSG_VALIDATE_REC_FAILED_STR "validate_rec failed"
+#define LG_MSG_MAPPING_FAILED_STR "mapping failed"
+#define LG_MSG_INIT_IOBUF_FAILED_STR "init failed"
+#define LG_MSG_ARENA_NOT_FOUND_STR "arena not found"
+#define LG_MSG_PAGE_SIZE_EXCEEDED_STR \
+ "page_size of iobufs in arena being added is greater than max available"
+#define LG_MSG_POOL_NOT_FOUND_STR "pool not found"
+#define LG_MSG_IOBUF_NOT_FOUND_STR "iobuf not found"
+#define LG_MSG_DLOPEN_FAILED_STR "DL open failed"
+#define LG_MSG_DLSYM_ERROR_STR "dlsym missing"
+#define LG_MSG_LOAD_FAILED_STR "Failed to load xlator options table"
+#define LG_MSG_INPUT_DATA_NULL_STR \
+ "input data is null. cannot update the lru limit of the inode table. " \
+ "continuing with older value."
+#define LG_MSG_INIT_FAILED_STR "No init() found"
+#define LG_MSG_VOLUME_ERROR_STR \
+ "Initialization of volume failed. review your volfile again."
+#define LG_MSG_TREE_NOT_FOUND_STR "Translator tree not found"
+#define LG_MSG_SET_LOG_LEVEL_STR "setting log level"
+#define LG_MSG_INVALID_INIT_STR \
+ "Invalid log-level. possible values are DEBUG|WARNING|ERROR|NONE|TRACE"
+#define LG_MSG_OBJECT_NULL_STR "object is null, returning false."
+#define LG_MSG_GRAPH_NOT_SET_STR "Graph is not set for xlator"
+#define LG_MSG_OPEN_LOGFILE_FAILED_STR "failed to open logfile"
+#define LG_MSG_STRDUP_ERROR_STR "failed to create metrics dir"
+#define LG_MSG_FILENAME_NOT_SPECIFIED_STR "no filename specified"
+#define LG_MSG_UNDERSIZED_BUF_STR "data value is smaller than expected"
+#define LG_MSG_DICT_SET_FAILED_STR "unable to set dict"
+#define LG_MSG_COUNT_LESS_THAN_ZERO_STR "count < 0!"
+#define LG_MSG_PAIRS_LESS_THAN_COUNT_STR "less than count data pairs found"
+#define LG_MSG_NULL_PTR_STR "pair->key is null!"
+#define LG_MSG_VALUE_LENGTH_LESS_THAN_ZERO_STR "value->len < 0"
+#define LG_MSG_INVALID_ARG_STR "buf is null"
+#define LG_MSG_KEY_OR_VALUE_NULL_STR "key or value is null"
+#define LG_MSG_NULL_VALUE_IN_DICT_STR "null value found in dict"
+#define LG_MSG_FAILED_TO_LOG_DICT_STR "Failed to log dictionary"
+#define LG_MSG_DICT_ERROR_STR "dict error"
+#define LG_MSG_STRUCT_MISS_STR "struct missing"
+#define LG_MSG_METHOD_MISS_STR "method missing(init)"
#endif /* !_LG_MESSAGES_H_ */
diff --git a/libglusterfs/src/glusterfs/lkowner.h b/libglusterfs/src/glusterfs/lkowner.h
index b49e9af6bcb..692de34bc7a 100644
--- a/libglusterfs/src/glusterfs/lkowner.h
+++ b/libglusterfs/src/glusterfs/lkowner.h
@@ -11,7 +11,7 @@
#ifndef _LK_OWNER_H
#define _LK_OWNER_H
-#include "glusterfs-fops.h"
+#include "glusterfs/glusterfs-fops.h"
/* LKOWNER to string functions */
static inline void
diff --git a/libglusterfs/src/glusterfs/logging.h b/libglusterfs/src/glusterfs/logging.h
index 31ecbfcbdb6..b3a6ac191f0 100644
--- a/libglusterfs/src/glusterfs/logging.h
+++ b/libglusterfs/src/glusterfs/logging.h
@@ -369,10 +369,6 @@ gf_log_disable_suppression_before_exit(struct _glusterfs_ctx *ctx);
gf_log((xl)->name, GF_LOG_ERROR, format, ##args)
int
-_gf_slog(const char *domain, const char *file, const char *function, int line,
- gf_loglevel_t level, const char *event, ...);
-
-int
_gf_smsg(const char *domain, const char *file, const char *function,
int32_t line, gf_loglevel_t level, int errnum, int trace,
uint64_t msgid, const char *event, ...);
@@ -381,12 +377,7 @@ _gf_smsg(const char *domain, const char *file, const char *function,
#define gf_smsg(dom, level, errnum, msgid, event...) \
do { \
_gf_smsg(dom, __FILE__, __FUNCTION__, __LINE__, level, errnum, 0, \
- msgid, ##event); \
- } while (0)
-
-#define gf_slog(dom, level, event...) \
- do { \
- _gf_slog(dom, __FILE__, __FUNCTION__, __LINE__, level, ##event); \
+ msgid, msgid##_STR, ##event); \
} while (0)
#endif /* __LOGGING_H__ */
diff --git a/libglusterfs/src/glusterfs/mem-pool.h b/libglusterfs/src/glusterfs/mem-pool.h
index e0441756be7..e5b3276d047 100644
--- a/libglusterfs/src/glusterfs/mem-pool.h
+++ b/libglusterfs/src/glusterfs/mem-pool.h
@@ -202,6 +202,24 @@ out:
return dup_mem;
}
+#ifdef GF_DISABLE_MEMPOOL
+
+/* No-op memory pool enough to fit current API without massive redesign. */
+
+struct mem_pool {
+ unsigned long sizeof_type;
+};
+
+#define mem_pools_init() \
+ do { \
+ } while (0)
+#define mem_pools_fini() \
+ do { \
+ } while (0)
+#define mem_pool_thread_destructor(pool_list) (void)pool_list
+
+#else /* !GF_DISABLE_MEMPOOL */
+
/* kind of 'header' for the actual mem_pool_shared structure, this might make
* it possible to dump some more details in a statedump */
struct mem_pool {
@@ -209,10 +227,11 @@ struct mem_pool {
unsigned long sizeof_type;
unsigned long count; /* requested pool size (unused) */
char *name;
- gf_atomic_t active; /* current allocations */
+ char *xl_name;
+ gf_atomic_t active; /* current allocations */
#ifdef DEBUG
- gf_atomic_t hit; /* number of allocations served from pt_pool */
- gf_atomic_t miss; /* number of std allocs due to miss */
+ gf_atomic_t hit; /* number of allocations served from pt_pool */
+ gf_atomic_t miss; /* number of std allocs due to miss */
#endif
struct list_head owner; /* glusterfs_ctx_t->mempool_list */
glusterfs_ctx_t *ctx; /* take ctx->lock when updating owner */
@@ -244,24 +263,26 @@ typedef struct per_thread_pool {
} per_thread_pool_t;
typedef struct per_thread_pool_list {
- /*
- * These first two members are protected by the global pool lock. When
- * a thread first tries to use any pool, we create one of these. We
- * link it into the global list using thr_list so the pool-sweeper
- * thread can find it, and use pthread_setspecific so this thread can
- * find it. When the per-thread destructor runs, we "poison" the pool
- * list to prevent further allocations. This also signals to the
- * pool-sweeper thread that the list should be detached and freed after
- * the next time it's swept.
- */
+ /* thr_list is used to place the TLS pool_list into the active global list
+ * (pool_threads) or the inactive global list (pool_free_threads). It's
+ * protected by the global pool_lock. */
struct list_head thr_list;
- unsigned int poison;
+
+ /* This lock is used to update poison and the hot/cold lists of members
+ * of 'pools' array. */
+ pthread_spinlock_t lock;
+
+ /* This field is used to mark a pool_list as not being owned by any thread.
+ * This means that the sweeper thread won't be cleaning objects stored in
+ * its pools. mem_put() uses it to decide if the object being released is
+ * placed into its original pool_list or directly destroyed. */
+ bool poison;
+
/*
* There's really more than one pool, but the actual number is hidden
* in the implementation code so we just make it a single-element array
* here.
*/
- pthread_spinlock_t lock;
per_thread_pool_t pools[1];
} per_thread_pool_list_t;
@@ -284,6 +305,10 @@ void
mem_pools_init(void); /* start the pool_sweeper thread */
void
mem_pools_fini(void); /* cleanup memory pools */
+void
+mem_pool_thread_destructor(per_thread_pool_list_t *pool_list);
+
+#endif /* GF_DISABLE_MEMPOOL */
struct mem_pool *
mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
@@ -306,9 +331,6 @@ void
mem_pool_destroy(struct mem_pool *pool);
void
-mem_pool_thread_destructor(void);
-
-void
gf_mem_acct_enable_set(void *ctx);
#endif /* _MEM_POOL_H */
diff --git a/libglusterfs/src/glusterfs/mem-types.h b/libglusterfs/src/glusterfs/mem-types.h
index 832f68c968e..d45d5b68c91 100644
--- a/libglusterfs/src/glusterfs/mem-types.h
+++ b/libglusterfs/src/glusterfs/mem-types.h
@@ -101,11 +101,6 @@ enum gf_common_mem_types_ {
gf_common_mt_dnscache_entry, /* used only in one location */
gf_common_mt_parser_t, /* used only in one location */
gf_common_quota_meta_t,
- /*related to gfdb library*/
- gf_mt_gfdb_link_info_t, /* used only in one location */
- gf_mt_sql_connection_t, /* used only in one location */
- gf_mt_db_conn_node_t, /* used only in one location */
- /*related to gfdb library*/
gf_common_mt_rbuf_t, /* used only in one location */
gf_common_mt_rlist_t, /* used only in one location */
gf_common_mt_rvec_t, /* used only in one location */
@@ -138,6 +133,7 @@ enum gf_common_mem_types_ {
gf_common_volfile_t,
gf_common_mt_mgmt_v3_lock_timer_t, /* used only in one location */
gf_common_mt_server_cmdline_t, /* used only in one location */
+ gf_common_mt_latency_t,
gf_common_mt_end
};
#endif
diff --git a/libglusterfs/src/glusterfs/stack.h b/libglusterfs/src/glusterfs/stack.h
index 17585508a22..536a330d38b 100644
--- a/libglusterfs/src/glusterfs/stack.h
+++ b/libglusterfs/src/glusterfs/stack.h
@@ -45,6 +45,9 @@ typedef int32_t (*ret_fn_t)(call_frame_t *frame, call_frame_t *prev_frame,
xlator_t *this, int32_t op_ret, int32_t op_errno,
...);
+void
+gf_frame_latency_update(call_frame_t *frame);
+
struct call_pool {
union {
struct list_head all_frames;
@@ -149,8 +152,6 @@ struct _call_stack {
} while (0);
struct xlator_fops;
-void
-gf_update_latency(call_frame_t *frame);
static inline void
FRAME_DESTROY(call_frame_t *frame)
@@ -158,7 +159,7 @@ FRAME_DESTROY(call_frame_t *frame)
void *local = NULL;
if (frame->root->ctx->measure_latency)
- gf_update_latency(frame);
+ gf_frame_latency_update(frame);
list_del_init(&frame->frames);
if (frame->local) {
@@ -429,6 +430,7 @@ call_stack_alloc_groups(call_stack_t *stack, int ngrps)
if (ngrps <= SMALL_GROUP_COUNT) {
stack->groups = stack->groups_small;
} else {
+ GF_FREE(stack->groups_large);
stack->groups_large = GF_CALLOC(ngrps, sizeof(gid_t),
gf_common_mt_groups_t);
if (!stack->groups_large)
@@ -442,6 +444,12 @@ call_stack_alloc_groups(call_stack_t *stack, int ngrps)
}
static inline int
+call_stack_groups_capacity(call_stack_t *stack)
+{
+ return max(stack->ngrps, SMALL_GROUP_COUNT);
+}
+
+static inline int
call_frames_count(call_stack_t *call_stack)
{
call_frame_t *pos;
diff --git a/libglusterfs/src/glusterfs/statedump.h b/libglusterfs/src/glusterfs/statedump.h
index 89d04f94587..ce082706bdf 100644
--- a/libglusterfs/src/glusterfs/statedump.h
+++ b/libglusterfs/src/glusterfs/statedump.h
@@ -127,4 +127,6 @@ gf_proc_dump_xlator_meminfo(xlator_t *this, strfd_t *strfd);
void
gf_proc_dump_xlator_profile(xlator_t *this, strfd_t *strfd);
+void
+gf_latency_statedump_and_reset(char *key, gf_latency_t *lat);
#endif /* STATEDUMP_H */
diff --git a/libglusterfs/src/glusterfs/store.h b/libglusterfs/src/glusterfs/store.h
index 6e6e3b9ad6d..a1f70c7b840 100644
--- a/libglusterfs/src/glusterfs/store.h
+++ b/libglusterfs/src/glusterfs/store.h
@@ -59,8 +59,8 @@ int32_t
gf_store_unlink_tmppath(gf_store_handle_t *shandle);
int
-gf_store_read_and_tokenize(FILE *file, char *str, int size, char **iter_key,
- char **iter_val, gf_store_op_errno_t *store_errno);
+gf_store_read_and_tokenize(FILE *file, char **iter_key, char **iter_val,
+ gf_store_op_errno_t *store_errno);
int32_t
gf_store_retrieve_value(gf_store_handle_t *handle, char *key, char **value);
@@ -95,7 +95,7 @@ int32_t
gf_store_iter_get_matching(gf_store_iter_t *iter, char *key, char **value);
int32_t
-gf_store_iter_destroy(gf_store_iter_t *iter);
+gf_store_iter_destroy(gf_store_iter_t **iter);
char *
gf_store_strerror(gf_store_op_errno_t op_errno);
diff --git a/libglusterfs/src/glusterfs/syncop.h b/libglusterfs/src/glusterfs/syncop.h
index 3614d969264..4e9241a32fc 100644
--- a/libglusterfs/src/glusterfs/syncop.h
+++ b/libglusterfs/src/glusterfs/syncop.h
@@ -16,6 +16,8 @@
#include <ucontext.h>
#include "glusterfs/dict.h" // for dict_t
#include "glusterfs/stack.h" // for call_frame_t, STACK_DESTROY, STACK_...
+#include "glusterfs/timer.h"
+
#define SYNCENV_PROC_MAX 16
#define SYNCENV_PROC_MIN 2
#define SYNCPROC_IDLE_TIME 600
@@ -29,9 +31,15 @@
#define SYNCOPCTX_PID 0x00000008
#define SYNCOPCTX_LKOWNER 0x00000010
+#ifdef HAVE_TSAN_API
+/* Currently hardcoded within thread context maintained by the sanitizer. */
+#define TSAN_THREAD_NAMELEN 64
+#endif
+
struct synctask;
struct syncproc;
struct syncenv;
+struct synccond;
typedef int (*synctask_cbk_t)(int ret, call_frame_t *frame, void *opaque);
@@ -55,9 +63,12 @@ struct synctask {
call_frame_t *opframe;
synctask_cbk_t synccbk;
synctask_fn_t syncfn;
- synctask_state_t state;
+ struct timespec *delta;
+ gf_timer_t *timer;
+ struct synccond *synccond;
void *opaque;
void *stack;
+ synctask_state_t state;
int woken;
int slept;
int ret;
@@ -65,6 +76,13 @@ struct synctask {
uid_t uid;
gid_t gid;
+#ifdef HAVE_TSAN_API
+ struct {
+ void *fiber;
+ char name[TSAN_THREAD_NAMELEN];
+ } tsan;
+#endif
+
ucontext_t ctx;
struct syncproc *proc;
@@ -77,6 +95,14 @@ struct synctask {
struct syncproc {
pthread_t processor;
+
+#ifdef HAVE_TSAN_API
+ struct {
+ void *fiber;
+ char name[TSAN_THREAD_NAMELEN];
+ } tsan;
+#endif
+
ucontext_t sched;
struct syncenv *env;
struct synctask *current;
@@ -85,19 +111,21 @@ struct syncproc {
/* hosts the scheduler thread and framework for executing synctasks */
struct syncenv {
struct syncproc proc[SYNCENV_PROC_MAX];
- int procs;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
struct list_head runq;
- int runcount;
struct list_head waitq;
- int waitcount;
+
+ int procs;
+ int procs_idle;
+
+ int runcount;
int procmin;
int procmax;
- pthread_mutex_t mutex;
- pthread_cond_t cond;
-
size_t stacksize;
int destroy; /* FLAG to mark syncenv is in destroy mode
@@ -123,6 +151,13 @@ struct synclock {
};
typedef struct synclock synclock_t;
+struct synccond {
+ pthread_mutex_t pmutex;
+ pthread_cond_t pcond;
+ struct list_head waitq;
+};
+typedef struct synccond synccond_t;
+
struct syncbarrier {
gf_boolean_t initialized; /*Set on successful initialization*/
pthread_mutex_t guard; /* guard the remaining members, pair @cond */
@@ -219,7 +254,7 @@ struct syncopctx {
#define __yield(args) \
do { \
if (args->task) { \
- synctask_yield(args->task); \
+ synctask_yield(args->task, NULL); \
} else { \
pthread_mutex_lock(&args->mutex); \
{ \
@@ -240,7 +275,7 @@ struct syncopctx {
task = synctask_get(); \
stb->task = task; \
if (task) \
- frame = task->opframe; \
+ frame = copy_frame(task->opframe); \
else \
frame = syncop_create_frame(THIS); \
\
@@ -261,10 +296,7 @@ struct syncopctx {
STACK_WIND_COOKIE(frame, cbk, (void *)stb, subvol, fn_op, params); \
\
__yield(stb); \
- if (task) \
- STACK_RESET(frame->root); \
- else \
- STACK_DESTROY(frame->root); \
+ STACK_DESTROY(frame->root); \
} while (0)
/*
@@ -313,7 +345,9 @@ synctask_join(struct synctask *task);
void
synctask_wake(struct synctask *task);
void
-synctask_yield(struct synctask *task);
+synctask_yield(struct synctask *task, struct timespec *delta);
+void
+synctask_sleep(int32_t secs);
void
synctask_waitfor(struct synctask *task, int count);
@@ -411,6 +445,24 @@ synclock_trylock(synclock_t *lock);
int
synclock_unlock(synclock_t *lock);
+int32_t
+synccond_init(synccond_t *cond);
+
+void
+synccond_destroy(synccond_t *cond);
+
+int
+synccond_wait(synccond_t *cond, synclock_t *lock);
+
+int
+synccond_timedwait(synccond_t *cond, synclock_t *lock, struct timespec *delta);
+
+void
+synccond_signal(synccond_t *cond);
+
+void
+synccond_broadcast(synccond_t *cond);
+
int
syncbarrier_init(syncbarrier_t *barrier);
int
diff --git a/libglusterfs/src/glusterfs/syscall.h b/libglusterfs/src/glusterfs/syscall.h
index 6b33c141a5e..b6d3ab4f2ad 100644
--- a/libglusterfs/src/glusterfs/syscall.h
+++ b/libglusterfs/src/glusterfs/syscall.h
@@ -96,18 +96,27 @@ int
sys_unlink(const char *pathname);
int
+sys_unlinkat(int dfd, const char *pathname);
+
+int
sys_rmdir(const char *pathname);
int
sys_symlink(const char *oldpath, const char *newpath);
int
+sys_symlinkat(const char *oldpath, int dirfd, const char *newpath);
+
+int
sys_rename(const char *oldpath, const char *newpath);
int
sys_link(const char *oldpath, const char *newpath);
int
+sys_linkat(int oldfd, const char *oldpath, int newfd, const char *newpath);
+
+int
sys_chmod(const char *path, mode_t mode);
int
@@ -257,4 +266,13 @@ ssize_t
sys_copy_file_range(int fd_in, off64_t *off_in, int fd_out, off64_t *off_out,
size_t len, unsigned int flags);
+int
+sys_kill(pid_t pid, int sig);
+
+#ifdef __FreeBSD__
+int
+sys_sysctl(const int *name, u_int namelen, void *oldp, size_t *oldlenp,
+ const void *newp, size_t newlen);
+#endif
+
#endif /* __SYSCALL_H__ */
diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
index 42cbdc1ac93..4fd3abdaeff 100644
--- a/libglusterfs/src/glusterfs/xlator.h
+++ b/libglusterfs/src/glusterfs/xlator.h
@@ -11,12 +11,12 @@
#ifndef _XLATOR_H
#define _XLATOR_H
-#include <stdint.h> // for int32_t
-#include <sys/types.h> // for off_t, mode_t, off64_t, dev_t
-#include "glusterfs-fops.h" // for GF_FOP_MAXVALUE, entrylk_cmd
-#include "glusterfs/atomic.h" // for gf_atomic_t
-#include "glusterfs/glusterfs.h" // for gf_boolean_t, glusterfs_ctx_t
-#include "glusterfs/compat-uuid.h" // for uuid_t
+#include <stdint.h> // for int32_t
+#include <sys/types.h> // for off_t, mode_t, off64_t, dev_t
+#include "glusterfs/glusterfs-fops.h" // for GF_FOP_MAXVALUE, entrylk_cmd
+#include "glusterfs/atomic.h" // for gf_atomic_t
+#include "glusterfs/glusterfs.h" // for gf_boolean_t, glusterfs_ctx_t
+#include "glusterfs/compat-uuid.h" // for uuid_t
#include "glusterfs/compat.h"
#include "glusterfs/event-history.h"
#include "glusterfs/dict.h"
@@ -32,6 +32,8 @@
#define GF_SET_ATTR_ATIME 0x10
#define GF_SET_ATTR_MTIME 0x20
#define GF_SET_ATTR_CTIME 0x40
+#define GF_ATTR_ATIME_NOW 0x80
+#define GF_ATTR_MTIME_NOW 0x100
#define gf_attr_mode_set(mode) ((mode)&GF_SET_ATTR_MODE)
#define gf_attr_uid_set(mode) ((mode)&GF_SET_ATTR_UID)
@@ -700,6 +702,8 @@ typedef size_t (*cbk_inodectx_size_t)(xlator_t *this, inode_t *inode);
typedef size_t (*cbk_fdctx_size_t)(xlator_t *this, fd_t *fd);
+typedef void (*cbk_fdclose_t)(xlator_t *this, fd_t *fd);
+
struct xlator_cbks {
cbk_forget_t forget;
cbk_release_t release;
@@ -710,6 +714,8 @@ struct xlator_cbks {
cbk_ictxmerge_t ictxmerge;
cbk_inodectx_size_t ictxsize;
cbk_fdctx_size_t fdctxsize;
+ cbk_fdclose_t fdclose;
+ cbk_fdclose_t fdclosedir;
};
typedef int32_t (*dumpop_priv_t)(xlator_t *this);
@@ -799,7 +805,7 @@ struct _xlator {
struct {
/* for latency measurement */
- fop_latency_t latencies[GF_FOP_MAXVALUE];
+ gf_latency_t latencies[GF_FOP_MAXVALUE];
/* for latency measurement */
fop_metrics_t metrics[GF_FOP_MAXVALUE];
@@ -858,6 +864,9 @@ struct _xlator {
/* Flag to notify got CHILD_DOWN event for detach brick */
uint32_t notify_down;
+
+ /* Flag to avoid throw duplicate PARENT_DOWN event */
+ uint32_t parent_down;
};
/* This would be the only structure which needs to be exported by
@@ -1092,4 +1101,6 @@ mgmt_is_multiplexed_daemon(char *name);
gf_boolean_t
xlator_is_cleanup_starting(xlator_t *this);
+int
+graph_total_client_xlator(glusterfs_graph_t *graph);
#endif /* _XLATOR_H */
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
index 51704560164..13f298eb3bd 100644
--- a/libglusterfs/src/graph.c
+++ b/libglusterfs/src/graph.c
@@ -21,12 +21,12 @@
#include <sys/stat.h> // for stat
#include <sys/time.h> // for gettimeofday
#include <unistd.h> // for gethostname, getpid
-#include "glusterfs-fops.h" // for GF_EVENT_GRAPH_NEW, GF_...
#include "glusterfs/common-utils.h" // for gf_strncpy, gf_time_fmt
#include "glusterfs/defaults.h"
#include "glusterfs/dict.h" // for dict_foreach, dict_set_...
#include "glusterfs/globals.h" // for xlator_t, xlator_list_t
#include "glusterfs/glusterfs.h" // for glusterfs_graph_t, glus...
+#include "glusterfs/glusterfs-fops.h" // for GF_EVENT_GRAPH_NEW, GF_...
#include "glusterfs/libglusterfs-messages.h" // for LG_MSG_GRAPH_ERROR, LG_...
#include "glusterfs/list.h" // for list_add, list_del_init
#include "glusterfs/logging.h" // for gf_msg, GF_LOG_ERROR
@@ -41,7 +41,7 @@ _gf_dump_details (int argc, char **argv)
{
extern FILE *gf_log_logfile;
int i = 0;
- char timestr[64];
+ char timestr[GF_TIMESTR_SIZE];
time_t utime = 0;
pid_t mypid = 0;
struct utsname uname_buf = {{0, }, };
@@ -469,7 +469,7 @@ _xlator_check_unknown_options(xlator_t *xl, void *data)
dict_foreach(xl->options, _log_if_unknown_option, xl);
}
-int
+static int
glusterfs_graph_unknown_options(glusterfs_graph_t *graph)
{
xlator_foreach(graph->first, _xlator_check_unknown_options, NULL);
@@ -482,7 +482,7 @@ fill_uuid(char *uuid, int size, struct timeval tv)
char hostname[50] = {
0,
};
- char now_str[64];
+ char now_str[GF_TIMESTR_SIZE];
if (gethostname(hostname, sizeof(hostname) - 1) != 0) {
gf_msg("graph", GF_LOG_ERROR, errno, LG_MSG_GETHOSTNAME_FAILED,
@@ -490,9 +490,8 @@ fill_uuid(char *uuid, int size, struct timeval tv)
hostname[sizeof(hostname) - 1] = '\0';
}
- gf_time_fmt(now_str, sizeof now_str, tv.tv_sec, gf_timefmt_dirent);
- snprintf(uuid, size, "%s-%d-%s:%" GF_PRI_SUSECONDS, hostname, getpid(),
- now_str, tv.tv_usec);
+ gf_time_fmt_tv(now_str, sizeof now_str, &tv, gf_timefmt_dirent);
+ snprintf(uuid, size, "%s-%d-%s", hostname, getpid(), now_str);
return;
}
@@ -568,14 +567,13 @@ glusterfs_graph_prepare(glusterfs_graph_t *graph, glusterfs_ctx_t *ctx,
} else {
ret = glusterfs_graph_settop(graph, volume_name, _gf_false);
}
- if (!ret) {
- goto ok;
- }
- gf_msg("graph", GF_LOG_ERROR, 0, LG_MSG_GRAPH_ERROR,
- "glusterfs graph settop failed");
- return -1;
-ok:
+ if (ret) {
+ gf_msg("graph", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_ERROR,
+ "glusterfs graph settop failed");
+ errno = EINVAL;
+ return -1;
+ }
/* XXX: WORM VOLUME */
ret = glusterfs_graph_worm(graph, ctx);
@@ -1379,7 +1377,7 @@ glusterfs_graph_cleanup(void *arg)
ret = pthread_mutex_lock(&graph->mutex);
if (ret != 0) {
gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED,
- "Failed to aquire a lock");
+ "Failed to acquire a lock");
goto out;
}
/* check and wait for CHILD_DOWN for top xlator*/
@@ -1695,6 +1693,7 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
"failed to construct the graph");
goto out;
}
+ graph->parent_down = 0;
graph->last_xl = glusterfs_get_last_xlator(graph);
for (xl = graph->first; xl; xl = xl->next) {
@@ -1763,6 +1762,7 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
if (strcmp(ctx->cmd_args.process_name, "glustershd") == 0) {
ret = glusterfs_update_mux_pid(dict, volfile_obj);
if (ret == -1) {
+ GF_FREE(volfile_obj);
goto out;
}
}
diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
index 3a1f097e1af..dbadf77442d 100644
--- a/libglusterfs/src/inode.c
+++ b/libglusterfs/src/inode.c
@@ -232,15 +232,15 @@ __foreach_ancestor_dentry(dentry_t *dentry,
ret = per_dentry_fn(dentry, data);
if (ret) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PER_DENTRY_FAILED,
- "per dentry fn returned %d", ret);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PER_DENTRY_FAILED,
+ "ret=%d", ret, NULL);
goto out;
}
parent = dentry->parent;
if (!parent) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PARENT_DENTRY_NOT_FOUND,
- "parent not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PARENT_DENTRY_NOT_FOUND,
+ NULL);
goto out;
}
@@ -274,11 +274,9 @@ __is_dentry_cyclic(dentry_t *dentry)
ret = __foreach_ancestor_dentry(dentry, __check_cycle, dentry->inode);
if (ret) {
- gf_msg(dentry->inode->table->name, GF_LOG_CRITICAL, 0,
- LG_MSG_DENTRY_CYCLIC_LOOP,
- "detected cyclic loop formation during inode linkage. "
- "inode (%s) linking under itself as %s",
- uuid_utoa(dentry->inode->gfid), dentry->name);
+ gf_smsg(dentry->inode->table->name, GF_LOG_CRITICAL, 0,
+ LG_MSG_DENTRY_CYCLIC_LOOP, "gfid=%s name=-%s",
+ uuid_utoa(dentry->inode->gfid), dentry->name, NULL);
}
return ret;
@@ -337,8 +335,7 @@ __inode_ctx_free(inode_t *inode)
xlator_t *old_THIS = NULL;
if (!inode->_ctx) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_CTX_NULL,
- "_ctx not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_CTX_NULL, NULL);
goto noctx;
}
@@ -473,10 +470,45 @@ __inode_unref(inode_t *inode, bool clear)
if (__is_root_gfid(inode->gfid))
return inode;
+ /*
+ * No need to acquire inode table's lock
+ * as __inode_unref is called after acquiding
+ * the inode table's lock.
+ */
+ if (inode->table->cleanup_started && !inode->ref)
+ /*
+ * There is a good chance that, the inode
+ * on which unref came has already been
+ * zero refed and added to the purge list.
+ * This can happen when inode table is
+ * being destroyed (glfs_fini is something
+ * which destroys the inode table).
+ *
+ * Consider a directory 'a' which has a file
+ * 'b'. Now as part of inode table destruction
+ * zero refing of inodes does not happen from
+ * leaf to the root. It happens in the order
+ * inodes are present in the list. So, in this
+ * example, the dentry of 'b' would have its
+ * parent set to the inode of 'a'. So if
+ * 'a' gets zero refed first (as part of
+ * inode table cleanup) and then 'b' has to
+ * zero refed, then dentry_unset is called on
+ * the dentry of 'b' and it further goes on to
+ * call inode_unref on b's parent which is 'a'.
+ * In this situation, GF_ASSERT would be called
+ * below as the refcount of 'a' has been already set
+ * to zero.
+ *
+ * So return the inode if the inode table cleanup
+ * has already started and inode refcount is 0.
+ */
+ return inode;
+
this = THIS;
- if (clear && inode->invalidate_sent) {
- inode->invalidate_sent = false;
+ if (clear && inode->in_invalidate_list) {
+ inode->in_invalidate_list = false;
inode->table->invalidate_size--;
__inode_activate(inode);
}
@@ -490,7 +522,7 @@ __inode_unref(inode_t *inode, bool clear)
inode->_ctx[index].ref--;
}
- if (!inode->ref && !inode->invalidate_sent) {
+ if (!inode->ref && !inode->in_invalidate_list) {
inode->table->active_size--;
nlookup = GF_ATOMIC_GET(inode->nlookup);
@@ -526,14 +558,14 @@ __inode_ref(inode_t *inode, bool is_invalidate)
return inode;
if (!inode->ref) {
- if (inode->invalidate_sent) {
- inode->invalidate_sent = false;
+ if (inode->in_invalidate_list) {
+ inode->in_invalidate_list = false;
inode->table->invalidate_size--;
} else {
inode->table->lru_size--;
}
if (is_invalidate) {
- inode->invalidate_sent = true;
+ inode->in_invalidate_list = true;
inode->table->invalidate_size++;
list_move_tail(&inode->list, &inode->table->invalidate);
} else {
@@ -922,6 +954,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
inode_t *old_inode = NULL;
inode_table_t *table = NULL;
inode_t *link_inode = NULL;
+ char link_uuid_str[64] = {0}, parent_uuid_str[64] = {0};
table = inode->table;
@@ -996,11 +1029,12 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
if (!old_dentry || old_dentry->inode != link_inode) {
dentry = dentry_create(link_inode, parent, name);
if (!dentry) {
- gf_msg_callingfn(
- THIS->name, GF_LOG_ERROR, 0, LG_MSG_DENTRY_CREATE_FAILED,
- "dentry create failed on "
- "inode %s with parent %s",
- uuid_utoa(link_inode->gfid), uuid_utoa(parent->gfid));
+ gf_msg_callingfn(THIS->name, GF_LOG_ERROR, 0,
+ LG_MSG_DENTRY_CREATE_FAILED,
+ "dentry create failed on "
+ "inode %s with parent %s",
+ uuid_utoa_r(link_inode->gfid, link_uuid_str),
+ uuid_utoa_r(parent->gfid, parent_uuid_str));
errno = ENOMEM;
return NULL;
}
@@ -1206,10 +1240,10 @@ __inode_unlink(inode_t *inode, inode_t *parent, const char *name)
if (dentry) {
dentry = __dentry_unset(dentry);
} else {
- gf_msg("inode", GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
- "%s/%s: dentry not found in %s",
- uuid_utoa_r(parent->gfid, pgfid), name,
- uuid_utoa_r(inode->gfid, gfid));
+ gf_smsg("inode", GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
+ "parent-gfid=%s name=%s gfid%s",
+ uuid_utoa_r(parent->gfid, pgfid), name,
+ uuid_utoa_r(inode->gfid, gfid), NULL);
}
return dentry;
@@ -1393,10 +1427,8 @@ __inode_path(inode_t *inode, const char *name, char **bufp)
i++; /* "/" */
i += strlen(trav->name);
if (i > PATH_MAX) {
- gf_msg(table->name, GF_LOG_CRITICAL, 0, LG_MSG_DENTRY_CYCLIC_LOOP,
- "possible infinite "
- "loop detected, forcing break. name=(%s)",
- name);
+ gf_smsg(table->name, GF_LOG_CRITICAL, 0, LG_MSG_DENTRY_CYCLIC_LOOP,
+ "name=%s", name, NULL);
ret = -ENOENT;
goto out;
}
@@ -1509,6 +1541,7 @@ static int
inode_table_prune(inode_table_t *table)
{
int ret = 0;
+ int ret1 = 0;
struct list_head purge = {
0,
};
@@ -1547,6 +1580,10 @@ inode_table_prune(inode_table_t *table)
/* check for valid inode with 'nlookup' */
nlookup = GF_ATOMIC_GET(entry->nlookup);
if (nlookup) {
+ if (entry->invalidate_sent) {
+ list_move_tail(&entry->list, &table->lru);
+ continue;
+ }
__inode_ref(entry, true);
tmp = entry;
break;
@@ -1568,9 +1605,19 @@ inode_table_prune(inode_table_t *table)
if (tmp) {
xlator_t *old_THIS = THIS;
THIS = table->invalidator_xl;
- table->invalidator_fn(table->invalidator_xl, tmp);
+ ret1 = table->invalidator_fn(table->invalidator_xl, tmp);
THIS = old_THIS;
- inode_unref(tmp);
+ pthread_mutex_lock(&table->lock);
+ {
+ if (!ret1) {
+ tmp->invalidate_sent = true;
+ __inode_unref(tmp, false);
+ } else {
+ /* Move this back to the lru list*/
+ __inode_unref(tmp, true);
+ }
+ }
+ pthread_mutex_unlock(&table->lock);
}
/* Just so that if purge list is handled too, then clear it off */
@@ -1679,6 +1726,8 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
;
}
+ new->cleanup_started = _gf_false;
+
__inode_table_init_root(new);
pthread_mutex_init(&new->lock, NULL);
@@ -1829,6 +1878,7 @@ inode_table_destroy(inode_table_t *inode_table)
*/
pthread_mutex_lock(&inode_table->lock);
{
+ inode_table->cleanup_started = _gf_true;
/* Process lru list first as we need to unset their dentry
* entries (the ones which may not be unset during
* '__inode_passivate' as they were hashed) which in turn
diff --git a/libglusterfs/src/iobuf.c b/libglusterfs/src/iobuf.c
index 0e37c4fc6e2..4e7d2958764 100644
--- a/libglusterfs/src/iobuf.c
+++ b/libglusterfs/src/iobuf.c
@@ -21,7 +21,7 @@
(sizeof(gf_iobuf_init_config) / (sizeof(struct iobuf_init_config)))
/* Make sure this array is sorted based on pagesize */
-struct iobuf_init_config gf_iobuf_init_config[] = {
+static const struct iobuf_init_config gf_iobuf_init_config[] = {
/* { pagesize, num_pages }, */
{128, 1024}, {512, 512}, {2 * 1024, 512}, {8 * 1024, 128},
{32 * 1024, 64}, {128 * 1024, 32}, {256 * 1024, 8}, {1 * 1024 * 1024, 2},
@@ -41,32 +41,31 @@ gf_iobuf_get_arena_index(const size_t page_size)
}
static size_t
-gf_iobuf_get_pagesize(const size_t page_size)
+gf_iobuf_get_pagesize(const size_t page_size, int *index)
{
int i;
size_t size = 0;
for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
size = gf_iobuf_init_config[i].pagesize;
- if (page_size <= size)
+ if (page_size <= size) {
+ if (index != NULL)
+ *index = i;
return size;
+ }
}
return -1;
}
-void
+static void
__iobuf_arena_init_iobufs(struct iobuf_arena *iobuf_arena)
{
- int iobuf_cnt = 0;
+ const int iobuf_cnt = iobuf_arena->page_count;
struct iobuf *iobuf = NULL;
int offset = 0;
int i = 0;
- GF_VALIDATE_OR_GOTO("iobuf", iobuf_arena, out);
-
- iobuf_cnt = iobuf_arena->page_count;
-
iobuf_arena->iobufs = GF_CALLOC(sizeof(*iobuf), iobuf_cnt,
gf_common_mt_iobuf);
if (!iobuf_arena->iobufs)
@@ -88,27 +87,23 @@ __iobuf_arena_init_iobufs(struct iobuf_arena *iobuf_arena)
iobuf++;
}
-out:
return;
}
-void
+static void
__iobuf_arena_destroy_iobufs(struct iobuf_arena *iobuf_arena)
{
int iobuf_cnt = 0;
struct iobuf *iobuf = NULL;
int i = 0;
- GF_VALIDATE_OR_GOTO("iobuf", iobuf_arena, out);
-
- iobuf_cnt = iobuf_arena->page_count;
-
if (!iobuf_arena->iobufs) {
gf_msg_callingfn(THIS->name, GF_LOG_ERROR, 0, LG_MSG_IOBUFS_NOT_FOUND,
"iobufs not found");
return;
}
+ iobuf_cnt = iobuf_arena->page_count;
iobuf = iobuf_arena->iobufs;
for (i = 0; i < iobuf_cnt; i++) {
GF_ASSERT(GF_ATOMIC_GET(iobuf->ref) == 0);
@@ -120,11 +115,10 @@ __iobuf_arena_destroy_iobufs(struct iobuf_arena *iobuf_arena)
GF_FREE(iobuf_arena->iobufs);
-out:
return;
}
-void
+static void
__iobuf_arena_destroy(struct iobuf_pool *iobuf_pool,
struct iobuf_arena *iobuf_arena)
{
@@ -143,12 +137,13 @@ out:
return;
}
-struct iobuf_arena *
+static struct iobuf_arena *
__iobuf_arena_alloc(struct iobuf_pool *iobuf_pool, size_t page_size,
int32_t num_iobufs)
{
struct iobuf_arena *iobuf_arena = NULL;
size_t rounded_size = 0;
+ int index = 0; /* unused */
GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);
@@ -162,7 +157,7 @@ __iobuf_arena_alloc(struct iobuf_pool *iobuf_pool, size_t page_size,
INIT_LIST_HEAD(&iobuf_arena->passive.list);
iobuf_arena->iobuf_pool = iobuf_pool;
- rounded_size = gf_iobuf_get_pagesize(page_size);
+ rounded_size = gf_iobuf_get_pagesize(page_size, &index);
iobuf_arena->page_size = rounded_size;
iobuf_arena->page_count = num_iobufs;
@@ -173,8 +168,7 @@ __iobuf_arena_alloc(struct iobuf_pool *iobuf_pool, size_t page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (iobuf_arena->mem_base == MAP_FAILED) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_MAPPING_FAILED,
- "mapping failed");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_MAPPING_FAILED, NULL);
goto err;
}
@@ -186,8 +180,7 @@ __iobuf_arena_alloc(struct iobuf_pool *iobuf_pool, size_t page_size,
__iobuf_arena_init_iobufs(iobuf_arena);
if (!iobuf_arena->iobufs) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INIT_IOBUF_FAILED,
- "init failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INIT_IOBUF_FAILED, NULL);
goto err;
}
@@ -232,8 +225,8 @@ __iobuf_pool_add_arena(struct iobuf_pool *iobuf_pool, const size_t page_size,
if (!iobuf_arena) {
iobuf_arena = __iobuf_arena_alloc(iobuf_pool, page_size, num_pages);
if (!iobuf_arena) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND,
- "arena not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND,
+ NULL);
return NULL;
}
}
@@ -332,7 +325,6 @@ iobuf_pool_new(void)
size_t page_size = 0;
size_t arena_size = 0;
int32_t num_pages = 0;
- int index;
iobuf_pool = GF_CALLOC(sizeof(*iobuf_pool), 1, gf_common_mt_iobuf_pool);
if (!iobuf_pool)
@@ -355,28 +347,16 @@ iobuf_pool_new(void)
iobuf_pool->mr_list[i] = NULL;
}
- pthread_mutex_lock(&iobuf_pool->mutex);
- {
- for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
- page_size = gf_iobuf_init_config[i].pagesize;
- num_pages = gf_iobuf_init_config[i].num_pages;
-
- index = gf_iobuf_get_arena_index(page_size);
- if (index == -1) {
- pthread_mutex_unlock(&iobuf_pool->mutex);
- gf_msg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,
- "page_size (%zu) of iobufs in arena being added is "
- "greater than max available",
- page_size);
- return NULL;
- }
-
- __iobuf_pool_add_arena(iobuf_pool, page_size, num_pages, index);
+ /* No locking required here
+ * as no one else can use this pool yet
+ */
+ for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
+ page_size = gf_iobuf_init_config[i].pagesize;
+ num_pages = gf_iobuf_init_config[i].num_pages;
+ if (__iobuf_pool_add_arena(iobuf_pool, page_size, num_pages, i) != NULL)
arena_size += page_size * num_pages;
- }
}
- pthread_mutex_unlock(&iobuf_pool->mutex);
/* Need an arena to handle all the bigger iobuf requests */
iobuf_create_stdalloc_arena(iobuf_pool);
@@ -501,8 +481,8 @@ __iobuf_get(struct iobuf_pool *iobuf_pool, const size_t page_size,
return iobuf;
}
-struct iobuf *
-iobuf_get_from_stdalloc(struct iobuf_pool *iobuf_pool, size_t page_size)
+static struct iobuf *
+iobuf_get_from_stdalloc(struct iobuf_pool *iobuf_pool, const size_t page_size)
{
struct iobuf *iobuf = NULL;
struct iobuf_arena *iobuf_arena = NULL;
@@ -555,7 +535,7 @@ iobuf_get2(struct iobuf_pool *iobuf_pool, size_t page_size)
page_size = iobuf_pool->default_page_size;
}
- rounded_size = gf_iobuf_get_pagesize(page_size);
+ rounded_size = gf_iobuf_get_pagesize(page_size, &index);
if (rounded_size == -1) {
/* make sure to provide the requested buffer with standard
memory allocations */
@@ -569,14 +549,9 @@ iobuf_get2(struct iobuf_pool *iobuf_pool, size_t page_size)
iobuf_pool->request_misses++;
return iobuf;
- }
-
- index = gf_iobuf_get_arena_index(page_size);
- if (index == -1) {
- gf_msg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,
- "page_size (%zu) of iobufs in arena being added is "
- "greater than max available",
- page_size);
+ } else if (index == -1) {
+ gf_smsg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,
+ "page_size=%zu", page_size, NULL);
return NULL;
}
@@ -584,16 +559,16 @@ iobuf_get2(struct iobuf_pool *iobuf_pool, size_t page_size)
{
iobuf = __iobuf_get(iobuf_pool, rounded_size, index);
if (!iobuf) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,
- "iobuf not found");
- goto unlock;
+ pthread_mutex_unlock(&iobuf_pool->mutex);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,
+ NULL);
+ goto post_unlock;
}
iobuf_ref(iobuf);
}
-unlock:
pthread_mutex_unlock(&iobuf_pool->mutex);
-
+post_unlock:
return iobuf;
}
@@ -639,10 +614,8 @@ iobuf_get(struct iobuf_pool *iobuf_pool)
index = gf_iobuf_get_arena_index(iobuf_pool->default_page_size);
if (index == -1) {
- gf_msg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,
- "page_size (%zu) of iobufs in arena being added is "
- "greater than max available",
- iobuf_pool->default_page_size);
+ gf_smsg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,
+ "page_size=%zu", iobuf_pool->default_page_size, NULL);
return NULL;
}
@@ -650,29 +623,26 @@ iobuf_get(struct iobuf_pool *iobuf_pool)
{
iobuf = __iobuf_get(iobuf_pool, iobuf_pool->default_page_size, index);
if (!iobuf) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,
- "iobuf not found");
- goto unlock;
+ pthread_mutex_unlock(&iobuf_pool->mutex);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,
+ NULL);
+ goto out;
}
iobuf_ref(iobuf);
}
-unlock:
pthread_mutex_unlock(&iobuf_pool->mutex);
out:
return iobuf;
}
-void
+static void
__iobuf_put(struct iobuf *iobuf, struct iobuf_arena *iobuf_arena)
{
struct iobuf_pool *iobuf_pool = NULL;
int index = 0;
- GF_VALIDATE_OR_GOTO("iobuf", iobuf_arena, out);
- GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
-
iobuf_pool = iobuf_arena->iobuf_pool;
index = gf_iobuf_get_arena_index(iobuf_arena->page_size);
@@ -725,15 +695,14 @@ iobuf_put(struct iobuf *iobuf)
iobuf_arena = iobuf->iobuf_arena;
if (!iobuf_arena) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND,
- "arena not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND, NULL);
return;
}
iobuf_pool = iobuf_arena->iobuf_pool;
if (!iobuf_pool) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_POOL_NOT_FOUND,
- "iobuf pool not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_POOL_NOT_FOUND, "iobuf",
+ NULL);
return;
}
@@ -972,14 +941,12 @@ iobuf_size(struct iobuf *iobuf)
GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
if (!iobuf->iobuf_arena) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND,
- "arena not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND, NULL);
goto out;
}
if (!iobuf->iobuf_arena->iobuf_pool) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_POOL_NOT_FOUND,
- "pool not found");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_POOL_NOT_FOUND, NULL);
goto out;
}
diff --git a/libglusterfs/src/latency.c b/libglusterfs/src/latency.c
index 06a973e0e7c..ce4b0e8255d 100644
--- a/libglusterfs/src/latency.c
+++ b/libglusterfs/src/latency.c
@@ -16,35 +16,32 @@
#include "glusterfs/glusterfs.h"
#include "glusterfs/statedump.h"
-void
-gf_update_latency(call_frame_t *frame)
+gf_latency_t *
+gf_latency_new(size_t n)
{
- double elapsed;
- struct timespec *begin, *end;
-
- fop_latency_t *lat;
-
- begin = &frame->begin;
- end = &frame->end;
+ int i = 0;
+ gf_latency_t *lat = NULL;
- if (!(begin->tv_sec && end->tv_sec))
- goto out;
+ lat = GF_MALLOC(n * sizeof(*lat), gf_common_mt_latency_t);
+ if (!lat)
+ return NULL;
- elapsed = (end->tv_sec - begin->tv_sec) * 1e9 +
- (end->tv_nsec - begin->tv_nsec);
+ for (i = 0; i < n; i++) {
+ gf_latency_reset(lat + i);
+ }
+ return lat;
+}
- if (frame->op < 0 || frame->op >= GF_FOP_MAXVALUE) {
- gf_log("[core]", GF_LOG_WARNING, "Invalid frame op value: %d",
- frame->op);
+void
+gf_latency_update(gf_latency_t *lat, struct timespec *begin,
+ struct timespec *end)
+{
+ if (!(begin->tv_sec && end->tv_sec)) {
+ /*Measure latency might have been enabled/disabled during the op*/
return;
}
- /* Can happen mostly at initiator xlator, as STACK_WIND/UNWIND macros
- set it right anyways for those frames */
- if (!frame->op)
- frame->op = frame->root->op;
-
- lat = &frame->this->stats.interval.latencies[frame->op];
+ double elapsed = gf_tsdiff(begin, end);
if (lat->max < elapsed)
lat->max = elapsed;
@@ -54,40 +51,34 @@ gf_update_latency(call_frame_t *frame)
lat->total += elapsed;
lat->count++;
-out:
- return;
}
void
-gf_proc_dump_latency_info(xlator_t *xl)
+gf_latency_reset(gf_latency_t *lat)
{
- char key_prefix[GF_DUMP_MAX_BUF_LEN];
- char key[GF_DUMP_MAX_BUF_LEN];
- int i;
-
- snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.latency", xl->name);
- gf_proc_dump_add_section("%s", key_prefix);
-
- for (i = 0; i < GF_FOP_MAXVALUE; i++) {
- gf_proc_dump_build_key(key, key_prefix, "%s", (char *)gf_fop_list[i]);
-
- fop_latency_t *lat = &xl->stats.interval.latencies[i];
+ if (!lat)
+ return;
+ memset(lat, 0, sizeof(*lat));
+ lat->min = ULLONG_MAX;
+ /* make sure 'min' is set to high value, so it would be
+ properly set later */
+}
- /* Doesn't make sense to continue if there are no fops
- came in the given interval */
- if (!lat->count)
- continue;
+void
+gf_frame_latency_update(call_frame_t *frame)
+{
+ gf_latency_t *lat;
+ /* Can happen mostly at initiator xlator, as STACK_WIND/UNWIND macros
+ set it right anyways for those frames */
+ if (!frame->op)
+ frame->op = frame->root->op;
- gf_proc_dump_write(key, "%.03f,%" PRId64 ",%.03f",
- (lat->total / lat->count), lat->count, lat->total);
+ if (frame->op < 0 || frame->op >= GF_FOP_MAXVALUE) {
+ gf_log("[core]", GF_LOG_WARNING, "Invalid frame op value: %d",
+ frame->op);
+ return;
}
- memset(xl->stats.interval.latencies, 0,
- sizeof(xl->stats.interval.latencies));
-
- /* make sure 'min' is set to high value, so it would be
- properly set later */
- for (i = 0; i < GF_FOP_MAXVALUE; i++) {
- xl->stats.interval.latencies[i].min = 0xffffffff;
- }
+ lat = &frame->this->stats.interval.latencies[frame->op];
+ gf_latency_update(lat, &frame->begin, &frame->end);
}
diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
index 0de3836efc5..5f18cd56cbe 100644
--- a/libglusterfs/src/libglusterfs.sym
+++ b/libglusterfs/src/libglusterfs.sym
@@ -451,6 +451,7 @@ gf_event_unregister_close
fd_anonymous
fd_anonymous_with_flags
fd_bind
+fd_close
fd_create
fd_create_uint64
__fd_ctx_del
@@ -547,8 +548,6 @@ get_file_mtime
get_host_name
get_mem_size
get_path_name
-get_struct_variable
-get_vol_type
get_xlator_by_name
get_xlator_by_type
gf_array_insertionsort
@@ -586,6 +585,7 @@ gf_dirent_free
gf_dirent_orig_offset
gf_dm_hashfn
gf_dnscache_init
+gf_dnscache_deinit
gf_errno_to_error
gf_error_to_errno
_gf_event
@@ -939,6 +939,12 @@ syncbarrier_destroy
syncbarrier_init
syncbarrier_wait
syncbarrier_wake
+synccond_init
+synccond_destroy
+synccond_wait
+synccond_timedwait
+synccond_signal
+synccond_broadcast
syncenv_destroy
syncenv_new
synclock_destroy
@@ -1016,6 +1022,7 @@ synctask_new
synctask_new1
synctask_set
synctask_setid
+synctask_sleep
synctask_wake
synctask_yield
sys_access
@@ -1041,6 +1048,7 @@ sys_futimes
sys_lchown
sys_lgetxattr
sys_link
+sys_linkat
sys_llistxattr
sys_lremovexattr
sys_lseek
@@ -1064,13 +1072,17 @@ sys_rmdir
sys_stat
sys_statvfs
sys_symlink
+sys_symlinkat
sys_truncate
sys_unlink
+sys_unlinkat
sys_utimensat
sys_write
sys_writev
sys_socket
sys_accept
+sys_kill
+sys_sysctl
tbf_init
tbf_throttle
timespec_now
@@ -1170,3 +1182,12 @@ glusterfs_mux_volfile_reconfigure
glusterfs_process_svc_detach
mgmt_is_multiplexed_daemon
xlator_is_cleanup_starting
+gf_nanosleep
+gf_syncfs
+graph_total_client_xlator
+get_xattrs_to_heal
+gf_latency_statedump_and_reset
+gf_latency_new
+gf_latency_reset
+gf_latency_update
+gf_frame_latency_update
diff --git a/libglusterfs/src/logging.c b/libglusterfs/src/logging.c
index 0134d237cb4..a930d3e3b63 100644
--- a/libglusterfs/src/logging.c
+++ b/libglusterfs/src/logging.c
@@ -35,7 +35,6 @@
#define GF_LOG_CONTROL_FILE "/etc/glusterfs/logger.conf"
#define GF_LOG_BACKTRACE_DEPTH 5
#define GF_LOG_BACKTRACE_SIZE 4096
-#define GF_LOG_TIMESTR_SIZE 256
#define GF_MAX_SLOG_PAIR_COUNT 100
#include "glusterfs/logging.h"
@@ -48,7 +47,7 @@
#define TEST_LOG(__msg, __args...) \
gf_log("logging-infra", GF_LOG_DEBUG, __msg, ##__args);
-void
+static void
gf_log_flush_timeout_cbk(void *data);
int
@@ -57,26 +56,24 @@ gf_log_inject_timer_event(glusterfs_ctx_t *ctx);
static void
gf_log_flush_extra_msgs(glusterfs_ctx_t *ctx, uint32_t new);
-static char *gf_level_strings[] = {"", /* NONE */
- "M", /* EMERGENCY */
- "A", /* ALERT */
- "C", /* CRITICAL */
- "E", /* ERROR */
- "W", /* WARNING */
- "N", /* NOTICE */
- "I", /* INFO */
- "D", /* DEBUG */
- "T", /* TRACE */
- ""};
-
-/* Ideally this should get moved to logging.h */
-struct _msg_queue {
- struct list_head msgs;
-};
-
-struct _log_msg {
- const char *msg;
- struct list_head queue;
+static int
+log_buf_init(log_buf_t *buf, const char *domain, const char *file,
+ const char *function, int32_t line, gf_loglevel_t level,
+ int errnum, uint64_t msgid, char **appmsgstr, int graph_id);
+static void
+gf_log_rotate(glusterfs_ctx_t *ctx);
+
+static char gf_level_strings[] = {
+ ' ', /* NONE */
+ 'M', /* EMERGENCY */
+ 'A', /* ALERT */
+ 'C', /* CRITICAL */
+ 'E', /* ERROR */
+ 'W', /* WARNING */
+ 'N', /* NOTICE */
+ 'I', /* INFO */
+ 'D', /* DEBUG */
+ 'T', /* TRACE */
};
void
@@ -314,18 +311,16 @@ gf_log_rotate(glusterfs_ctx_t *ctx)
fd = sys_open(ctx->log.filename, O_CREAT | O_WRONLY | O_APPEND,
S_IRUSR | S_IWUSR);
if (fd < 0) {
- gf_msg("logrotate", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open "
- "logfile");
+ gf_smsg("logrotate", GF_LOG_ERROR, errno,
+ LG_MSG_OPEN_LOGFILE_FAILED, NULL);
return;
}
new_logfile = fdopen(fd, "a");
if (!new_logfile) {
- gf_msg("logrotate", GF_LOG_CRITICAL, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open logfile"
- " %s",
- ctx->log.filename);
+ gf_smsg("logrotate", GF_LOG_CRITICAL, errno,
+ LG_MSG_OPEN_LOGFILE_FAILED, "filename=%s",
+ ctx->log.filename, NULL);
sys_close(fd);
return;
}
@@ -478,7 +473,7 @@ gf_openlog(const char *ident, int option, int facility)
* buf = "I/O error\u001bon /tmp/bar file"
*
*/
-char *
+static char *
_json_escape(const char *str, char *buf, size_t len)
{
static const unsigned char json_exceptions[UCHAR_MAX + 1] = {
@@ -687,9 +682,8 @@ gf_log_init(void *data, const char *file, const char *ident)
}
if (mkdir_p(logdir, 0755, true)) {
/* EEXIST is handled in mkdir_p() itself */
- gf_msg("logging", GF_LOG_ERROR, 0, LG_MSG_STRDUP_ERROR,
- "failed to create metrics dir %s (%s)", logdir,
- strerror(errno));
+ gf_smsg("logging", GF_LOG_ERROR, 0, LG_MSG_STRDUP_ERROR,
+ "logdir=%s", logdir, "errno=%s", strerror(errno), NULL);
GF_FREE(logdir);
return -1;
}
@@ -765,7 +759,7 @@ _gf_log_callingfn(const char *domain, const char *file, const char *function,
xlator_t *this = THIS;
char *logline = NULL;
char *msg = NULL;
- char timestr[GF_LOG_TIMESTR_SIZE] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char *callstr = NULL;
@@ -782,18 +776,6 @@ _gf_log_callingfn(const char *domain, const char *file, const char *function,
if (skip_logging(this, level))
goto out;
- static const char *level_strings[] = {"", /* NONE */
- "M", /* EMERGENCY */
- "A", /* ALERT */
- "C", /* CRITICAL */
- "E", /* ERROR */
- "W", /* WARNING */
- "N", /* NOTICE */
- "I", /* INFO */
- "D", /* DEBUG */
- "T", /* TRACE */
- ""};
-
if (!domain || !file || !function || !fmt) {
fprintf(stderr, "logging: %s:%s():%d: invalid argument\n", __FILE__,
__PRETTY_FUNCTION__, __LINE__);
@@ -837,12 +819,12 @@ _gf_log_callingfn(const char *domain, const char *file, const char *function,
if (-1 == ret)
goto out;
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
- ret = gf_asprintf(
- &logline, "[%s.%" GF_PRI_SUSECONDS "] %s [%s:%d:%s] %s %d-%s: %s\n",
- timestr, tv.tv_usec, level_strings[level], basename, line, function,
- callstr, ((this->graph) ? this->graph->id : 0), domain, msg);
+ ret = gf_asprintf(&logline, "[%s] %c [%s:%d:%s] %s %d-%s: %s\n", timestr,
+ gf_level_strings[level], basename, line, function,
+ callstr, ((this->graph) ? this->graph->id : 0), domain,
+ msg);
if (-1 == ret) {
goto out;
}
@@ -1105,7 +1087,7 @@ _gf_msg_nomem(const char *domain, const char *file, const char *function,
char msg[2048] = {
0,
};
- char timestr[GF_LOG_TIMESTR_SIZE] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
glusterfs_ctx_t *ctx = NULL;
@@ -1133,20 +1115,20 @@ _gf_msg_nomem(const char *domain, const char *file, const char *function,
ret = gettimeofday(&tv, NULL);
if (-1 == ret)
goto out;
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
/* TODO: Currently we print in the enhanced format, with a message ID
* of 0. Need to enhance this to support format as configured */
wlen = snprintf(
msg, sizeof msg,
- "[%s.%" GF_PRI_SUSECONDS "] %s [MSGID: %" PRIu64
+ "[%s] %c [MSGID: %" PRIu64
"]"
" [%s:%d:%s] %s: no memory "
"available for size (%" GF_PRI_SIZET
") current memory usage in kilobytes %ld"
" [call stack follows]\n",
- timestr, tv.tv_usec, gf_level_strings[level], (uint64_t)0, basename,
- line, function, domain, size,
+ timestr, gf_level_strings[level], (uint64_t)0, basename, line, function,
+ domain, size,
(!getrusage(RUSAGE_SELF, &r_usage) ? r_usage.ru_maxrss : 0));
if (-1 == wlen) {
ret = -1;
@@ -1292,7 +1274,7 @@ gf_log_glusterlog(glusterfs_ctx_t *ctx, const char *domain, const char *file,
int errnum, uint64_t msgid, char **appmsgstr, char *callstr,
struct timeval tv, int graph_id, gf_log_format_t fmt)
{
- char timestr[GF_LOG_TIMESTR_SIZE] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char *header = NULL;
@@ -1303,7 +1285,7 @@ gf_log_glusterlog(glusterfs_ctx_t *ctx, const char *domain, const char *file,
gf_log_rotate(ctx);
/* format the time stamp */
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
/* generate footer */
if (errnum) {
@@ -1319,40 +1301,35 @@ gf_log_glusterlog(glusterfs_ctx_t *ctx, const char *domain, const char *file,
if (fmt == gf_logformat_traditional) {
if (!callstr) {
ret = gf_asprintf(&header,
- "[%s.%" GF_PRI_SUSECONDS
- "] %s [%s:%d:%s]"
+ "[%s] %c [%s:%d:%s]"
" %d-%s: %s",
- timestr, tv.tv_usec, gf_level_strings[level],
- file, line, function, graph_id, domain,
- *appmsgstr);
+ timestr, gf_level_strings[level], file, line,
+ function, graph_id, domain, *appmsgstr);
} else {
ret = gf_asprintf(&header,
- "[%s.%" GF_PRI_SUSECONDS
- "] %s [%s:%d:%s] %s"
+ "[%s] %c [%s:%d:%s] %s"
" %d-%s: %s",
- timestr, tv.tv_usec, gf_level_strings[level],
- file, line, function, callstr, graph_id, domain,
- *appmsgstr);
+ timestr, gf_level_strings[level], file, line,
+ function, callstr, graph_id, domain, *appmsgstr);
}
} else { /* gf_logformat_withmsgid */
/* CEE log format unsupported in logger_glusterlog, so just
* print enhanced log format */
if (!callstr) {
ret = gf_asprintf(&header,
- "[%s.%" GF_PRI_SUSECONDS "] %s [MSGID: %" PRIu64
+ "[%s] %c [MSGID: %" PRIu64
"]"
" [%s:%d:%s] %d-%s: %s",
- timestr, tv.tv_usec, gf_level_strings[level],
- msgid, file, line, function, graph_id, domain,
- *appmsgstr);
+ timestr, gf_level_strings[level], msgid, file,
+ line, function, graph_id, domain, *appmsgstr);
} else {
ret = gf_asprintf(&header,
- "[%s.%" GF_PRI_SUSECONDS "] %s [MSGID: %" PRIu64
+ "[%s] %c [MSGID: %" PRIu64
"]"
" [%s:%d:%s] %s %d-%s: %s",
- timestr, tv.tv_usec, gf_level_strings[level],
- msgid, file, line, function, callstr, graph_id,
- domain, *appmsgstr);
+ timestr, gf_level_strings[level], msgid, file,
+ line, function, callstr, graph_id, domain,
+ *appmsgstr);
}
}
if (-1 == ret) {
@@ -1403,39 +1380,36 @@ gf_syslog_log_repetitions(const char *domain, const char *file,
int graph_id)
{
int priority;
- char timestr_latest[GF_LOG_TIMESTR_SIZE] = {
+ char timestr_latest[GF_TIMESTR_SIZE] = {
0,
};
- char timestr_oldest[GF_LOG_TIMESTR_SIZE] = {
+ char timestr_oldest[GF_TIMESTR_SIZE] = {
0,
};
SET_LOG_PRIO(level, priority);
- gf_time_fmt(timestr_latest, sizeof timestr_latest, latest.tv_sec,
- gf_timefmt_FT);
- gf_time_fmt(timestr_oldest, sizeof timestr_oldest, oldest.tv_sec,
- gf_timefmt_FT);
+ gf_time_fmt_tv(timestr_latest, sizeof timestr_latest, &latest,
+ gf_timefmt_FT);
+ gf_time_fmt_tv(timestr_oldest, sizeof timestr_oldest, &oldest,
+ gf_timefmt_FT);
if (errnum) {
- syslog(
- priority,
- "The message \"[MSGID: %" PRIu64
- "] [%s:%d:%s] "
- "%d-%s: %s [%s] \" repeated %d times between %s.%" GF_PRI_SUSECONDS
- " and %s.%" GF_PRI_SUSECONDS,
- msgid, file, line, function, graph_id, domain, *appmsgstr,
- strerror(errnum), refcount, timestr_oldest, oldest.tv_usec,
- timestr_latest, latest.tv_usec);
+ syslog(priority,
+ "The message \"[MSGID: %" PRIu64
+ "] [%s:%d:%s] "
+ "%d-%s: %s [%s] \" repeated %d times between %s"
+ " and %s",
+ msgid, file, line, function, graph_id, domain, *appmsgstr,
+ strerror(errnum), refcount, timestr_oldest, timestr_latest);
} else {
syslog(priority,
"The message \"[MSGID: %" PRIu64
"] [%s:%d:%s] "
- "%d-%s: %s \" repeated %d times between %s.%" GF_PRI_SUSECONDS
- " and %s.%" GF_PRI_SUSECONDS,
+ "%d-%s: %s \" repeated %d times between %s"
+ " and %s",
msgid, file, line, function, graph_id, domain, *appmsgstr,
- refcount, timestr_oldest, oldest.tv_usec, timestr_latest,
- latest.tv_usec);
+ refcount, timestr_oldest, timestr_latest);
}
return 0;
}
@@ -1449,10 +1423,10 @@ gf_glusterlog_log_repetitions(glusterfs_ctx_t *ctx, const char *domain,
struct timeval latest, int graph_id)
{
int ret = 0;
- char timestr_latest[GF_LOG_TIMESTR_SIZE] = {
+ char timestr_latest[GF_TIMESTR_SIZE] = {
0,
};
- char timestr_oldest[GF_LOG_TIMESTR_SIZE] = {
+ char timestr_oldest[GF_TIMESTR_SIZE] = {
0,
};
char errstr[256] = {
@@ -1467,7 +1441,7 @@ gf_glusterlog_log_repetitions(glusterfs_ctx_t *ctx, const char *domain,
gf_log_rotate(ctx);
ret = gf_asprintf(&header,
- "The message \"%s [MSGID: %" PRIu64
+ "The message \"%c [MSGID: %" PRIu64
"]"
" [%s:%d:%s] %d-%s: %s",
gf_level_strings[level], msgid, file, line, function,
@@ -1476,21 +1450,17 @@ gf_glusterlog_log_repetitions(glusterfs_ctx_t *ctx, const char *domain,
goto err;
}
- gf_time_fmt(timestr_latest, sizeof timestr_latest, latest.tv_sec,
- gf_timefmt_FT);
+ gf_time_fmt_tv(timestr_latest, sizeof timestr_latest, &latest,
+ gf_timefmt_FT);
- gf_time_fmt(timestr_oldest, sizeof timestr_oldest, oldest.tv_sec,
- gf_timefmt_FT);
+ gf_time_fmt_tv(timestr_oldest, sizeof timestr_oldest, &oldest,
+ gf_timefmt_FT);
if (errnum)
snprintf(errstr, sizeof(errstr) - 1, " [%s]", strerror(errnum));
- ret = gf_asprintf(&footer,
- "%s\" repeated %d times between"
- " [%s.%" GF_PRI_SUSECONDS "] and [%s.%" GF_PRI_SUSECONDS
- "]",
- errstr, refcount, timestr_oldest, oldest.tv_usec,
- timestr_latest, latest.tv_usec);
+ ret = gf_asprintf(&footer, "%s\" repeated %d times between [%s] and [%s]",
+ errstr, refcount, timestr_oldest, timestr_latest);
if (-1 == ret) {
ret = -1;
goto err;
@@ -1547,6 +1517,11 @@ gf_log_print_with_repetitions(glusterfs_ctx_t *ctx, const char *domain,
appmsgstr, callstr, refcount, oldest, latest, graph_id);
break;
}
+ /* NOTE: If syslog control file is absent, which is another
+ * way to control logging to syslog, then we will fall through
+ * to the gluster log. The ideal way to do things would be to
+ * not have the extra control file check */
+
case gf_logger_glusterlog:
ret = gf_glusterlog_log_repetitions(
ctx, domain, file, function, line, level, errnum, msgid,
@@ -1940,20 +1915,11 @@ _gf_msg(const char *domain, const char *file, const char *function,
int ret = 0;
char *msgstr = NULL;
va_list ap;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
glusterfs_ctx_t *ctx = NULL;
char *callstr = NULL;
int log_inited = 0;
- /* in args check */
- if (!domain || !file || !function || !fmt) {
- fprintf(stderr, "logging: %s:%s():%d: invalid argument\n", __FILE__,
- __PRETTY_FUNCTION__, __LINE__);
- return -1;
- }
-
- this = THIS;
-
if (this == NULL)
return -1;
@@ -1967,6 +1933,13 @@ _gf_msg(const char *domain, const char *file, const char *function,
if (skip_logging(this, level))
goto out;
+ /* in args check */
+ if (!domain || !file || !function || !fmt) {
+ fprintf(stderr, "logging: %s:%s():%d: invalid argument\n", __FILE__,
+ __PRETTY_FUNCTION__, __LINE__);
+ return -1;
+ }
+
/* form the message */
va_start(ap, fmt);
ret = vasprintf(&msgstr, fmt, ap);
@@ -2027,7 +2000,7 @@ _gf_log(const char *domain, const char *file, const char *function, int line,
const char *basename = NULL;
FILE *new_logfile = NULL;
va_list ap;
- char timestr[GF_LOG_TIMESTR_SIZE] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
struct timeval tv = {
@@ -2046,18 +2019,6 @@ _gf_log(const char *domain, const char *file, const char *function, int line,
if (skip_logging(this, level))
goto out;
- static const char *level_strings[] = {"", /* NONE */
- "M", /* EMERGENCY */
- "A", /* ALERT */
- "C", /* CRITICAL */
- "E", /* ERROR */
- "W", /* WARNING */
- "N", /* NOTICE */
- "I", /* INFO */
- "D", /* DEBUG */
- "T", /* TRACE */
- ""};
-
if (!domain || !file || !function || !fmt) {
fprintf(stderr, "logging: %s:%s():%d: invalid argument\n", __FILE__,
__PRETTY_FUNCTION__, __LINE__);
@@ -2097,16 +2058,17 @@ _gf_log(const char *domain, const char *file, const char *function, int line,
fd = sys_open(ctx->log.filename, O_CREAT | O_RDONLY, S_IRUSR | S_IWUSR);
if (fd < 0) {
- gf_msg("logrotate", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open logfile");
+ gf_smsg("logrotate", GF_LOG_ERROR, errno,
+ LG_MSG_OPEN_LOGFILE_FAILED, NULL);
return -1;
}
sys_close(fd);
new_logfile = fopen(ctx->log.filename, "a");
if (!new_logfile) {
- gf_msg("logrotate", GF_LOG_CRITICAL, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open logfile %s", ctx->log.filename);
+ gf_smsg("logrotate", GF_LOG_CRITICAL, errno,
+ LG_MSG_OPEN_LOGFILE_FAILED, "filename=%s",
+ ctx->log.filename, NULL);
goto log;
}
@@ -2125,12 +2087,11 @@ log:
if (-1 == ret)
goto out;
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
- ret = gf_asprintf(
- &logline, "[%s.%" GF_PRI_SUSECONDS "] %s [%s:%d:%s] %d-%s: %s\n",
- timestr, tv.tv_usec, level_strings[level], basename, line, function,
- ((this->graph) ? this->graph->id : 0), domain, msg);
+ ret = gf_asprintf(&logline, "[%s] %c [%s:%d:%s] %d-%s: %s\n", timestr,
+ gf_level_strings[level], basename, line, function,
+ ((this->graph) ? this->graph->id : 0), domain, msg);
if (-1 == ret) {
goto err;
}
@@ -2213,8 +2174,8 @@ gf_cmd_log_init(const char *filename)
return -1;
if (!filename) {
- gf_msg(this->name, GF_LOG_CRITICAL, 0, LG_MSG_INVALID_ENTRY,
- "gf_cmd_log_init: no filename specified\n");
+ gf_smsg(this->name, GF_LOG_CRITICAL, 0, LG_MSG_FILENAME_NOT_SPECIFIED,
+ "gf_cmd_log_init", NULL);
return -1;
}
@@ -2231,17 +2192,15 @@ gf_cmd_log_init(const char *filename)
fd = sys_open(ctx->log.cmd_log_filename, O_CREAT | O_WRONLY | O_APPEND,
S_IRUSR | S_IWUSR);
if (fd < 0) {
- gf_msg(this->name, GF_LOG_CRITICAL, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open cmd_log_file");
+ gf_smsg(this->name, GF_LOG_CRITICAL, errno, LG_MSG_OPEN_LOGFILE_FAILED,
+ "cmd_log_file", NULL);
return -1;
}
ctx->log.cmdlogfile = fdopen(fd, "a");
if (!ctx->log.cmdlogfile) {
- gf_msg(this->name, GF_LOG_CRITICAL, errno, LG_MSG_FILE_OP_FAILED,
- "gf_cmd_log_init: failed to open logfile \"%s\" "
- "\n",
- ctx->log.cmd_log_filename);
+ gf_smsg(this->name, GF_LOG_CRITICAL, errno, LG_MSG_OPEN_LOGFILE_FAILED,
+ "gf_cmd_log_init: %s", ctx->log.cmd_log_filename, NULL);
sys_close(fd);
return -1;
}
@@ -2252,7 +2211,7 @@ int
gf_cmd_log(const char *domain, const char *fmt, ...)
{
va_list ap;
- char timestr[64];
+ char timestr[GF_TIMESTR_SIZE];
struct timeval tv = {
0,
};
@@ -2285,10 +2244,9 @@ gf_cmd_log(const char *domain, const char *fmt, ...)
goto out;
}
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
- ret = gf_asprintf(&logline, "[%s.%" GF_PRI_SUSECONDS "] %s : %s\n", timestr,
- tv.tv_usec, domain, msg);
+ ret = gf_asprintf(&logline, "[%s] %s : %s\n", timestr, domain, msg);
if (ret == -1) {
goto out;
}
@@ -2305,20 +2263,18 @@ gf_cmd_log(const char *domain, const char *fmt, ...)
fd = sys_open(ctx->log.cmd_log_filename, O_CREAT | O_WRONLY | O_APPEND,
S_IRUSR | S_IWUSR);
if (fd < 0) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open "
- "logfile \"%s\" \n",
- ctx->log.cmd_log_filename);
+ gf_smsg(THIS->name, GF_LOG_CRITICAL, errno,
+ LG_MSG_OPEN_LOGFILE_FAILED, "name=%s",
+ ctx->log.cmd_log_filename, NULL);
ret = -1;
goto out;
}
ctx->log.cmdlogfile = fdopen(fd, "a");
if (!ctx->log.cmdlogfile) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, errno, LG_MSG_FILE_OP_FAILED,
- "failed to open logfile \"%s\""
- " \n",
- ctx->log.cmd_log_filename);
+ gf_smsg(THIS->name, GF_LOG_CRITICAL, errno,
+ LG_MSG_OPEN_LOGFILE_FAILED, "name=%s",
+ ctx->log.cmd_log_filename, NULL);
ret = -1;
sys_close(fd);
goto out;
@@ -2337,7 +2293,7 @@ out:
}
static int
-_do_slog_format(const char *event, va_list inp, char **msg)
+_do_slog_format(int errnum, const char *event, va_list inp, char **msg)
{
va_list valist_tmp;
int i = 0;
@@ -2350,10 +2306,13 @@ _do_slog_format(const char *event, va_list inp, char **msg)
char format_char = '%';
char *tmp1 = NULL;
char *tmp2 = NULL;
+ char temp_sep[3] = "";
- ret = gf_asprintf(&tmp2, "%s", event);
- if (ret == -1)
+ tmp2 = gf_strdup("");
+ if (!tmp2) {
+ ret = -1;
goto out;
+ }
/* Hardcoded value for max key value pairs, exits early */
/* from loop if found NULL */
@@ -2401,22 +2360,45 @@ _do_slog_format(const char *event, va_list inp, char **msg)
(void)va_arg(inp, void *);
}
- ret = gf_asprintf(&tmp2, "%s\t%s", tmp1, buffer);
+ ret = gf_asprintf(&tmp2, "%s%s{%s}", tmp1, temp_sep, buffer);
if (ret < 0)
goto out;
GF_FREE(buffer);
buffer = NULL;
} else {
- ret = gf_asprintf(&tmp2, "%s\t%s", tmp1, fmt);
+ ret = gf_asprintf(&tmp2, "%s%s{%s}", tmp1, temp_sep, fmt);
if (ret < 0)
goto out;
}
+ /* Set seperator for next iteration */
+ temp_sep[0] = ',';
+ temp_sep[1] = ' ';
+ temp_sep[2] = 0;
+
GF_FREE(tmp1);
tmp1 = NULL;
}
+ tmp1 = gf_strdup(tmp2);
+ if (!tmp1) {
+ ret = -1;
+ goto out;
+ }
+ GF_FREE(tmp2);
+ tmp2 = NULL;
+
+ if (errnum) {
+ ret = gf_asprintf(&tmp2, "%s [%s%s{errno=%d}, {error=%s}]", event, tmp1,
+ temp_sep, errnum, strerror(errnum));
+ } else {
+ ret = gf_asprintf(&tmp2, "%s [%s]", event, tmp1);
+ }
+
+ if (ret == -1)
+ goto out;
+
*msg = gf_strdup(tmp2);
if (!*msg)
ret = -1;
@@ -2442,36 +2424,19 @@ _gf_smsg(const char *domain, const char *file, const char *function,
va_list valist;
char *msg = NULL;
int ret = 0;
+ xlator_t *this = THIS;
- va_start(valist, event);
- ret = _do_slog_format(event, valist, &msg);
- if (ret == -1)
- goto out;
-
- ret = _gf_msg(domain, file, function, line, level, errnum, trace, msgid,
- "%s", msg);
-
-out:
- va_end(valist);
- if (msg)
- GF_FREE(msg);
- return ret;
-}
-
-int
-_gf_slog(const char *domain, const char *file, const char *function, int line,
- gf_loglevel_t level, const char *event, ...)
-{
- va_list valist;
- char *msg = NULL;
- int ret = 0;
+ if (skip_logging(this, level))
+ return ret;
va_start(valist, event);
- ret = _do_slog_format(event, valist, &msg);
+ ret = _do_slog_format(errnum, event, valist, &msg);
if (ret == -1)
goto out;
- ret = _gf_log(domain, file, function, line, level, "%s", msg);
+ /* Pass errnum as zero since it is already formated as required */
+ ret = _gf_msg(domain, file, function, line, level, 0, trace, msgid, "%s",
+ msg);
out:
va_end(valist);
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index d0f8a64d2f7..2d5a12b0a00 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -123,6 +123,15 @@ gf_mem_update_acct_info(struct mem_acct *mem_acct, struct mem_header *header,
return gf_mem_header_prepare(header, size);
}
+static bool
+gf_mem_acct_enabled(void)
+{
+ xlator_t *x = THIS;
+ /* Low-level __gf_xxx() may be called
+ before ctx is initialized. */
+ return x->ctx && x->ctx->mem_acct_enable;
+}
+
void *
__gf_calloc(size_t nmemb, size_t size, uint32_t type, const char *typestr)
{
@@ -131,7 +140,7 @@ __gf_calloc(size_t nmemb, size_t size, uint32_t type, const char *typestr)
void *ptr = NULL;
xlator_t *xl = NULL;
- if (!THIS->ctx->mem_acct_enable)
+ if (!gf_mem_acct_enabled())
return CALLOC(nmemb, size);
xl = THIS;
@@ -156,7 +165,7 @@ __gf_malloc(size_t size, uint32_t type, const char *typestr)
void *ptr = NULL;
xlator_t *xl = NULL;
- if (!THIS->ctx->mem_acct_enable)
+ if (!gf_mem_acct_enabled())
return MALLOC(size);
xl = THIS;
@@ -178,7 +187,7 @@ __gf_realloc(void *ptr, size_t size)
size_t tot_size = 0;
struct mem_header *header = NULL;
- if (!THIS->ctx->mem_acct_enable)
+ if (!gf_mem_acct_enabled())
return REALLOC(ptr, size);
REQUIRE(NULL != ptr);
@@ -301,7 +310,7 @@ __gf_free(void *free_ptr)
struct mem_header *header = NULL;
bool last_ref = false;
- if (!THIS->ctx->mem_acct_enable) {
+ if (!gf_mem_acct_enabled()) {
FREE(free_ptr);
return;
}
@@ -353,6 +362,30 @@ free:
FREE(ptr);
}
+#if defined(GF_DISABLE_MEMPOOL)
+
+struct mem_pool *
+mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
+ unsigned long count, char *name)
+{
+ struct mem_pool *new;
+
+ new = GF_MALLOC(sizeof(struct mem_pool), gf_common_mt_mem_pool);
+ if (!new)
+ return NULL;
+
+ new->sizeof_type = sizeof_type;
+ return new;
+}
+
+void
+mem_pool_destroy(struct mem_pool *pool)
+{
+ GF_FREE(pool);
+}
+
+#else /* !GF_DISABLE_MEMPOOL */
+
static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
static struct list_head pool_threads;
static pthread_mutex_t pool_free_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -362,12 +395,10 @@ static size_t pool_list_size;
static __thread per_thread_pool_list_t *thread_pool_list = NULL;
-#if !defined(GF_DISABLE_MEMPOOL)
#define N_COLD_LISTS 1024
#define POOL_SWEEP_SECS 30
typedef struct {
- struct list_head death_row;
pooled_obj_hdr_t *cold_lists[N_COLD_LISTS];
unsigned int n_cold_lists;
} sweep_state_t;
@@ -384,36 +415,33 @@ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
static unsigned int init_count = 0;
static pthread_t sweeper_tid;
-gf_boolean_t
+static bool
collect_garbage(sweep_state_t *state, per_thread_pool_list_t *pool_list)
{
unsigned int i;
per_thread_pool_t *pt_pool;
- gf_boolean_t poisoned;
(void)pthread_spin_lock(&pool_list->lock);
- poisoned = pool_list->poison != 0;
- if (!poisoned) {
- for (i = 0; i < NPOOLS; ++i) {
- pt_pool = &pool_list->pools[i];
- if (pt_pool->cold_list) {
- if (state->n_cold_lists >= N_COLD_LISTS) {
- break;
- }
- state->cold_lists[state->n_cold_lists++] = pt_pool->cold_list;
+ for (i = 0; i < NPOOLS; ++i) {
+ pt_pool = &pool_list->pools[i];
+ if (pt_pool->cold_list) {
+ if (state->n_cold_lists >= N_COLD_LISTS) {
+ (void)pthread_spin_unlock(&pool_list->lock);
+ return true;
}
- pt_pool->cold_list = pt_pool->hot_list;
- pt_pool->hot_list = NULL;
+ state->cold_lists[state->n_cold_lists++] = pt_pool->cold_list;
}
+ pt_pool->cold_list = pt_pool->hot_list;
+ pt_pool->hot_list = NULL;
}
(void)pthread_spin_unlock(&pool_list->lock);
- return poisoned;
+ return false;
}
-void
+static void
free_obj_list(pooled_obj_hdr_t *victim)
{
pooled_obj_hdr_t *next;
@@ -425,82 +453,96 @@ free_obj_list(pooled_obj_hdr_t *victim)
}
}
-void *
+static void *
pool_sweeper(void *arg)
{
sweep_state_t state;
per_thread_pool_list_t *pool_list;
- per_thread_pool_list_t *next_pl;
- per_thread_pool_t *pt_pool;
- unsigned int i;
- gf_boolean_t poisoned;
+ uint32_t i;
+ bool pending;
/*
* This is all a bit inelegant, but the point is to avoid doing
* expensive things (like freeing thousands of objects) while holding a
- * global lock. Thus, we split each iteration into three passes, with
+ * global lock. Thus, we split each iteration into two passes, with
* only the first and fastest holding the lock.
*/
+ pending = true;
+
for (;;) {
- sleep(POOL_SWEEP_SECS);
+ /* If we know there's pending work to do (or it's the first run), we
+ * do collect garbage more often. */
+ sleep(pending ? POOL_SWEEP_SECS / 5 : POOL_SWEEP_SECS);
+
(void)pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
- INIT_LIST_HEAD(&state.death_row);
state.n_cold_lists = 0;
+ pending = false;
/* First pass: collect stuff that needs our attention. */
(void)pthread_mutex_lock(&pool_lock);
- list_for_each_entry_safe(pool_list, next_pl, &pool_threads, thr_list)
+ list_for_each_entry(pool_list, &pool_threads, thr_list)
{
- (void)pthread_mutex_unlock(&pool_lock);
- poisoned = collect_garbage(&state, pool_list);
- (void)pthread_mutex_lock(&pool_lock);
-
- if (poisoned) {
- list_move(&pool_list->thr_list, &state.death_row);
+ if (collect_garbage(&state, pool_list)) {
+ pending = true;
}
}
(void)pthread_mutex_unlock(&pool_lock);
- /* Second pass: free dead pools. */
- (void)pthread_mutex_lock(&pool_free_lock);
- list_for_each_entry_safe(pool_list, next_pl, &state.death_row, thr_list)
- {
- for (i = 0; i < NPOOLS; ++i) {
- pt_pool = &pool_list->pools[i];
- free_obj_list(pt_pool->cold_list);
- free_obj_list(pt_pool->hot_list);
- pt_pool->hot_list = pt_pool->cold_list = NULL;
- }
- list_del(&pool_list->thr_list);
- list_add(&pool_list->thr_list, &pool_free_threads);
- }
- (void)pthread_mutex_unlock(&pool_free_lock);
-
- /* Third pass: free cold objects from live pools. */
+ /* Second pass: free cold objects from live pools. */
for (i = 0; i < state.n_cold_lists; ++i) {
free_obj_list(state.cold_lists[i]);
}
(void)pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
}
+
+ return NULL;
}
void
-mem_pool_thread_destructor(void)
+mem_pool_thread_destructor(per_thread_pool_list_t *pool_list)
{
- per_thread_pool_list_t *pool_list = thread_pool_list;
+ per_thread_pool_t *pt_pool;
+ uint32_t i;
- /* The pool-sweeper thread will take it from here.
- *
- * We can change 'poison' here without taking locks because the change
- * itself doesn't interact with other parts of the code and a simple write
- * is already atomic from the point of view of the processor.
- *
- * This change can modify what mem_put() does, but both possibilities are
- * fine until the sweeper thread kicks in. The real synchronization must be
- * between mem_put() and the sweeper thread. */
+ if (pool_list == NULL) {
+ pool_list = thread_pool_list;
+ }
+
+ /* The current thread is terminating. None of the allocated objects will
+ * be used again. We can directly destroy them here instead of delaying
+ * it until the next sweeper loop. */
if (pool_list != NULL) {
- pool_list->poison = 1;
+ /* Remove pool_list from the global list to avoid that sweeper
+ * could touch it. */
+ pthread_mutex_lock(&pool_lock);
+ list_del(&pool_list->thr_list);
+ pthread_mutex_unlock(&pool_lock);
+
+ /* We need to protect hot/cold changes from potential mem_put() calls
+ * that reference this pool_list. Once poison is set to true, we are
+ * sure that no one else will touch hot/cold lists. The only possible
+ * race is when at the same moment a mem_put() is adding a new item
+ * to the hot list. We protect from that by taking pool_list->lock.
+ * After that we don't need the lock to destroy the hot/cold lists. */
+ pthread_spin_lock(&pool_list->lock);
+ pool_list->poison = true;
+ pthread_spin_unlock(&pool_list->lock);
+
+ for (i = 0; i < NPOOLS; i++) {
+ pt_pool = &pool_list->pools[i];
+
+ free_obj_list(pt_pool->hot_list);
+ pt_pool->hot_list = NULL;
+
+ free_obj_list(pt_pool->cold_list);
+ pt_pool->cold_list = NULL;
+ }
+
+ pthread_mutex_lock(&pool_free_lock);
+ list_add(&pool_list->thr_list, &pool_free_threads);
+ pthread_mutex_unlock(&pool_free_lock);
+
thread_pool_list = NULL;
}
}
@@ -528,6 +570,22 @@ mem_pools_preinit(void)
init_done = GF_MEMPOOL_INIT_EARLY;
}
+static __attribute__((destructor)) void
+mem_pools_postfini(void)
+{
+ /* TODO: This function should destroy all per thread memory pools that
+ * are still alive, but this is not possible right now because glibc
+ * starts calling destructors as soon as exit() is called, and
+ * gluster doesn't ensure that all threads have been stopped before
+ * calling exit(). Existing threads would crash when they try to use
+ * memory or they terminate if we destroy things here.
+ *
+ * When we propertly terminate all threads, we can add the needed
+ * code here. Till then we need to leave the memory allocated. Most
+ * probably this function will be executed on process termination,
+ * so the memory will be released anyway by the system. */
+}
+
/* Call mem_pools_init() once threading has been configured completely. This
* prevent the pool_sweeper thread from getting killed once the main() thread
* exits during deamonizing. */
@@ -560,10 +618,6 @@ mem_pools_fini(void)
*/
break;
case 1: {
- per_thread_pool_list_t *pool_list;
- per_thread_pool_list_t *next_pl;
- unsigned int i;
-
/* if mem_pools_init() was not called, sweeper_tid will be invalid
* and the functions will error out. That is not critical. In all
* other cases, the sweeper_tid will be valid and the thread gets
@@ -571,32 +625,11 @@ mem_pools_fini(void)
(void)pthread_cancel(sweeper_tid);
(void)pthread_join(sweeper_tid, NULL);
- /* At this point all threads should have already terminated, so
- * it should be safe to destroy all pending per_thread_pool_list_t
- * structures that are stored for each thread. */
- mem_pool_thread_destructor();
-
- /* free all objects from all pools */
- list_for_each_entry_safe(pool_list, next_pl, &pool_threads,
- thr_list)
- {
- for (i = 0; i < NPOOLS; ++i) {
- free_obj_list(pool_list->pools[i].hot_list);
- free_obj_list(pool_list->pools[i].cold_list);
- pool_list->pools[i].hot_list = NULL;
- pool_list->pools[i].cold_list = NULL;
- }
-
- list_del(&pool_list->thr_list);
- FREE(pool_list);
- }
-
- list_for_each_entry_safe(pool_list, next_pl, &pool_free_threads,
- thr_list)
- {
- list_del(&pool_list->thr_list);
- FREE(pool_list);
- }
+ /* There could be threads still running in some cases, so we can't
+ * destroy pool_lists in use. We can also not destroy unused
+ * pool_lists because some allocated objects may still be pointing
+ * to them. */
+ mem_pool_thread_destructor(NULL);
init_done = GF_MEMPOOL_INIT_DESTROY;
/* Fall through. */
@@ -607,21 +640,29 @@ mem_pools_fini(void)
pthread_mutex_unlock(&init_mutex);
}
-#else
void
-mem_pools_init(void)
-{
-}
-void
-mem_pools_fini(void)
-{
-}
-void
-mem_pool_thread_destructor(void)
+mem_pool_destroy(struct mem_pool *pool)
{
-}
+ if (!pool)
+ return;
-#endif
+ /* remove this pool from the owner (glusterfs_ctx_t) */
+ LOCK(&pool->ctx->lock);
+ {
+ list_del(&pool->owner);
+ }
+ UNLOCK(&pool->ctx->lock);
+
+ /* free this pool, but keep the mem_pool_shared */
+ GF_FREE(pool);
+
+ /*
+ * Pools are now permanent, so the mem_pool->pool is kept around. All
+ * of the objects *in* the pool will eventually be freed via the
+ * pool-sweeper thread, and this way we don't have to add a lot of
+ * reference-counting complexity.
+ */
+}
struct mem_pool *
mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
@@ -672,6 +713,7 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
new->sizeof_type = sizeof_type;
new->count = count;
new->name = name;
+ new->xl_name = THIS->name;
new->pool = pool;
GF_ATOMIC_INIT(new->active, 0);
#ifdef DEBUG
@@ -689,21 +731,6 @@ mem_pool_new_fn(glusterfs_ctx_t *ctx, unsigned long sizeof_type,
return new;
}
-void *
-mem_get0(struct mem_pool *mem_pool)
-{
- void *ptr = mem_get(mem_pool);
- if (ptr) {
-#if defined(GF_DISABLE_MEMPOOL)
- memset(ptr, 0, mem_pool->sizeof_type);
-#else
- memset(ptr, 0, AVAILABLE_SIZE(mem_pool->pool->power_of_two));
-#endif
- }
-
- return ptr;
-}
-
per_thread_pool_list_t *
mem_get_pool_list(void)
{
@@ -738,13 +765,21 @@ mem_get_pool_list(void)
}
}
+ /* There's no need to take pool_list->lock, because this is already an
+ * atomic operation and we don't need to synchronize it with any change
+ * in hot/cold lists. */
+ pool_list->poison = false;
+
(void)pthread_mutex_lock(&pool_lock);
- pool_list->poison = 0;
list_add(&pool_list->thr_list, &pool_threads);
(void)pthread_mutex_unlock(&pool_lock);
thread_pool_list = pool_list;
+ /* Ensure that all memory objects associated to the new pool_list are
+ * destroyed when the thread terminates. */
+ gf_thread_needs_cleanup();
+
return pool_list;
}
@@ -804,6 +839,23 @@ mem_get_from_pool(struct mem_pool *mem_pool)
return retval;
}
+#endif /* GF_DISABLE_MEMPOOL */
+
+void *
+mem_get0(struct mem_pool *mem_pool)
+{
+ void *ptr = mem_get(mem_pool);
+ if (ptr) {
+#if defined(GF_DISABLE_MEMPOOL)
+ memset(ptr, 0, mem_pool->sizeof_type);
+#else
+ memset(ptr, 0, AVAILABLE_SIZE(mem_pool->pool->power_of_two));
+#endif
+ }
+
+ return ptr;
+}
+
void *
mem_get(struct mem_pool *mem_pool)
{
@@ -848,6 +900,14 @@ mem_put(void *ptr)
/* Not one of ours; don't touch it. */
return;
}
+
+ if (!hdr->pool_list) {
+ gf_msg_callingfn("mem-pool", GF_LOG_CRITICAL, EINVAL,
+ LG_MSG_INVALID_ARG,
+ "invalid argument hdr->pool_list NULL");
+ return;
+ }
+
pool_list = hdr->pool_list;
pt_pool = &pool_list->pools[hdr->power_of_two - POOL_SMALLEST];
@@ -870,27 +930,3 @@ mem_put(void *ptr)
}
#endif /* GF_DISABLE_MEMPOOL */
}
-
-void
-mem_pool_destroy(struct mem_pool *pool)
-{
- if (!pool)
- return;
-
- /* remove this pool from the owner (glusterfs_ctx_t) */
- LOCK(&pool->ctx->lock);
- {
- list_del(&pool->owner);
- }
- UNLOCK(&pool->ctx->lock);
-
- /* free this pool, but keep the mem_pool_shared */
- GF_FREE(pool);
-
- /*
- * Pools are now permanent, so the mem_pool->pool is kept around. All
- * of the objects *in* the pool will eventually be freed via the
- * pool-sweeper thread, and this way we don't have to add a lot of
- * reference-counting complexity.
- */
-}
diff --git a/libglusterfs/src/monitoring.c b/libglusterfs/src/monitoring.c
index 45e3d776903..fbb68dc8622 100644
--- a/libglusterfs/src/monitoring.c
+++ b/libglusterfs/src/monitoring.c
@@ -113,15 +113,15 @@ dump_latency_and_count(xlator_t *xl, int fd)
dprintf(fd, "%s.interval.%s.fail_count %" PRIu64 "\n", xl->name,
gf_fop_list[index], cbk);
}
- if (xl->stats.interval.latencies[index].count != 0.0) {
+ if (xl->stats.interval.latencies[index].count != 0) {
dprintf(fd, "%s.interval.%s.latency %lf\n", xl->name,
gf_fop_list[index],
- (xl->stats.interval.latencies[index].total /
+ (((double)xl->stats.interval.latencies[index].total) /
xl->stats.interval.latencies[index].count));
- dprintf(fd, "%s.interval.%s.max %lf\n", xl->name,
+ dprintf(fd, "%s.interval.%s.max %" PRIu64 "\n", xl->name,
gf_fop_list[index],
xl->stats.interval.latencies[index].max);
- dprintf(fd, "%s.interval.%s.min %lf\n", xl->name,
+ dprintf(fd, "%s.interval.%s.min %" PRIu64 "\n", xl->name,
gf_fop_list[index],
xl->stats.interval.latencies[index].min);
}
diff --git a/libglusterfs/src/options.c b/libglusterfs/src/options.c
index 400a3901689..f6b5aa0ea23 100644
--- a/libglusterfs/src/options.c
+++ b/libglusterfs/src/options.c
@@ -25,7 +25,8 @@ xlator_option_validate_path(xlator_t *xl, const char *key, const char *value,
if (strstr(value, "../")) {
snprintf(errstr, 256, "invalid path given '%s'", value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -35,7 +36,8 @@ xlator_option_validate_path(xlator_t *xl, const char *key, const char *value,
"option %s %s: '%s' is not an "
"absolute path name",
key, value, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -59,7 +61,8 @@ xlator_option_validate_int(xlator_t *xl, const char *key, const char *value,
if (gf_string2longlong(value, &inputll) != 0) {
snprintf(errstr, 256, "invalid number format \"%s\" in option \"%s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -67,7 +70,8 @@ xlator_option_validate_int(xlator_t *xl, const char *key, const char *value,
if ((inputll == 0) && (gf_string2ulonglong(value, &uinputll) != 0)) {
snprintf(errstr, 256, "invalid number format \"%s\" in option \"%s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -87,8 +91,8 @@ xlator_option_validate_int(xlator_t *xl, const char *key, const char *value,
"'%lld' in 'option %s %s' is smaller than "
"minimum value '%.0f'",
inputll, key, value, opt->min);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
} else if (opt->validate == GF_OPT_VALIDATE_MAX) {
@@ -97,8 +101,8 @@ xlator_option_validate_int(xlator_t *xl, const char *key, const char *value,
"'%lld' in 'option %s %s' is greater than "
"maximum value '%.0f'",
inputll, key, value, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
} else if ((inputll < opt->min) || (inputll > opt->max)) {
@@ -106,7 +110,8 @@ xlator_option_validate_int(xlator_t *xl, const char *key, const char *value,
"'%lld' in 'option %s %s' is out of range "
"[%.0f - %.0f]",
inputll, key, value, opt->min, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -129,7 +134,8 @@ xlator_option_validate_sizet(xlator_t *xl, const char *key, const char *value,
if (gf_string2bytesize_uint64(value, &size) != 0) {
snprintf(errstr, 256, "invalid number format \"%s\" in option \"%s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
ret = -1;
goto out;
}
@@ -147,7 +153,8 @@ xlator_option_validate_sizet(xlator_t *xl, const char *key, const char *value,
"'%" PRIu64
"' in 'option %s %s' is out of range [%.0f - %.0f]",
size, key, value, opt->min, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "error=%s",
+ errstr, NULL);
ret = -1;
}
@@ -171,7 +178,8 @@ xlator_option_validate_bool(xlator_t *xl, const char *key, const char *value,
if (gf_string2boolean(value, &is_valid) != 0) {
snprintf(errstr, 256, "option %s %s: '%s' is not a valid boolean value",
key, value, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -206,7 +214,8 @@ xlator_option_validate_xlator(xlator_t *xl, const char *key, const char *value,
if (!xlopt) {
snprintf(errstr, 256, "option %s %s: '%s' is not a valid volume name",
key, value, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -305,7 +314,8 @@ out:
char errstr[4096];
set_error_str(errstr, sizeof(errstr), opt, key, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
if (op_errstr)
*op_errstr = gf_strdup(errstr);
}
@@ -324,7 +334,8 @@ xlator_option_validate_percent(xlator_t *xl, const char *key, const char *value,
if (gf_string2percent(value, &percent) != 0) {
snprintf(errstr, 256, "invalid percent format \"%s\" in \"option %s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -332,7 +343,8 @@ xlator_option_validate_percent(xlator_t *xl, const char *key, const char *value,
snprintf(errstr, 256,
"'%lf' in 'option %s %s' is out of range [0 - 100]", percent,
key, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -378,8 +390,8 @@ xlator_option_validate_percent_or_sizet(xlator_t *xl, const char *key,
"'%lf' in 'option %s %s' is out"
" of range [0 - 100]",
size, key, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE,
+ "error=%s", errstr, NULL);
goto out;
}
ret = 0;
@@ -394,8 +406,8 @@ xlator_option_validate_percent_or_sizet(xlator_t *xl, const char *key,
" %s' should not be fractional value. Use "
"valid unsigned integer value.",
size, key, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -413,8 +425,8 @@ xlator_option_validate_percent_or_sizet(xlator_t *xl, const char *key,
"'%lf' in 'option %s %s'"
" is out of range [%.0f - %.0f]",
size, key, value, opt->min, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "error=%s",
+ errstr, NULL);
goto out;
}
ret = 0;
@@ -425,7 +437,8 @@ xlator_option_validate_percent_or_sizet(xlator_t *xl, const char *key,
snprintf(errstr, 256, "invalid number format \"%s\" in \"option %s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s", errstr,
+ NULL);
out:
if (ret && op_errstr)
@@ -447,7 +460,8 @@ xlator_option_validate_time(xlator_t *xl, const char *key, const char *value,
"invalid time format \"%s\" in "
"\"option %s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -466,7 +480,8 @@ xlator_option_validate_time(xlator_t *xl, const char *key, const char *value,
"' in 'option %s %s' is "
"out of range [%.0f - %.0f]",
input_time, key, value, opt->min, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -489,7 +504,8 @@ xlator_option_validate_double(xlator_t *xl, const char *key, const char *value,
if (gf_string2double(value, &input) != 0) {
snprintf(errstr, 256, "invalid number format \"%s\" in option \"%s\"",
value, key);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -509,8 +525,8 @@ xlator_option_validate_double(xlator_t *xl, const char *key, const char *value,
"'%f' in 'option %s %s' is smaller than "
"minimum value '%f'",
input, key, value, opt->min);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
} else if (opt->validate == GF_OPT_VALIDATE_MAX) {
@@ -519,8 +535,8 @@ xlator_option_validate_double(xlator_t *xl, const char *key, const char *value,
"'%f' in 'option %s %s' is greater than "
"maximum value '%f'",
input, key, value, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s",
- errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
goto out;
}
} else if ((input < opt->min) || (input > opt->max)) {
@@ -528,7 +544,8 @@ xlator_option_validate_double(xlator_t *xl, const char *key, const char *value,
"'%f' in 'option %s %s' is out of range "
"[%f - %f]",
input, key, value, opt->min, opt->max);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_OUT_OF_RANGE, "error=%s",
+ errstr, NULL);
goto out;
}
@@ -549,7 +566,8 @@ xlator_option_validate_addr(xlator_t *xl, const char *key, const char *value,
if (!valid_internet_address((char *)value, _gf_false, _gf_false)) {
snprintf(errstr, 256, "option %s %s: Can not parse %s address", key,
value, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
if (op_errstr)
*op_errstr = gf_strdup(errstr);
}
@@ -640,7 +658,8 @@ out:
"option %s %s: '%s' is not "
"a valid internet-address-list",
key, value, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
if (op_errstr)
*op_errstr = gf_strdup(errstr);
}
@@ -681,7 +700,8 @@ out:
"option %s %s: '%s' is not "
"a valid mount-auth-address",
key, value, value);
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", errstr);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "error=%s",
+ errstr, NULL);
if (op_errstr)
*op_errstr = gf_strdup(errstr);
}
@@ -747,20 +767,16 @@ validate_list_elements(const char *string, volume_option_t *opt,
key = strtok_r(str_ptr, ":", &substr_sav);
if (!key || (key_validator && key_validator(key))) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INVALID_ENTRY,
- "invalid list '%s', key "
- "'%s' not valid.",
- string, key ? key : "");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INVALID_ENTRY,
+ "list=%s", string, "key=%s", key ? key : "", NULL);
goto out;
}
value = strtok_r(NULL, ":", &substr_sav);
if (!value || (value_validator && value_validator(value, opt))) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INVALID_ENTRY,
- "invalid list '%s', "
- "value '%s' not valid.",
- string, key);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INVALID_ENTRY,
+ "list=%s", string, "value=%s", key, NULL);
goto out;
}
@@ -865,8 +881,8 @@ xlator_option_validate(xlator_t *xl, char *key, char *value,
};
if (opt->type > GF_OPTION_TYPE_MAX) {
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "unknown option type '%d'", opt->type);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_UNKNOWN_OPTION_TYPE,
+ "type=%d", opt->type, NULL);
goto out;
}
@@ -947,18 +963,16 @@ xl_opt_validate(dict_t *dict, char *key, data_t *value, void *data)
ret = xlator_option_validate(xl, key, value->data, opt, &errstr);
if (ret)
- gf_msg(xl->name, GF_LOG_WARNING, 0, LG_MSG_VALIDATE_RETURNS,
- "validate of %s returned %d", key, ret);
+ gf_smsg(xl->name, GF_LOG_WARNING, 0, LG_MSG_VALIDATE_RETURNS, "key=%s",
+ key, "ret=%d", ret, NULL);
if (errstr)
/* possible small leak of previously set stub->errstr */
stub->errstr = errstr;
if (fnmatch(opt->key[0], key, FNM_NOESCAPE) != 0) {
- gf_msg(xl->name, GF_LOG_DEBUG, 0, LG_MSG_INVALID_ENTRY,
- "option '%s' is deprecated, preferred is '%s', "
- "continuing with correction",
- key, opt->key[0]);
+ gf_smsg(xl->name, GF_LOG_DEBUG, 0, LG_MSG_OPTION_DEPRECATED, "key=%s",
+ key, "preferred=%s", opt->key[0], NULL);
dict_set(dict, opt->key[0], value);
dict_del(dict, key);
}
@@ -1026,9 +1040,8 @@ xlator_validate_rec(xlator_t *xlator, char **op_errstr)
while (trav) {
if (xlator_validate_rec(trav->xlator, op_errstr)) {
- gf_msg("xlator", GF_LOG_WARNING, 0, LG_MSG_VALIDATE_REC_FAILED,
- "validate_rec "
- "failed");
+ gf_smsg("xlator", GF_LOG_WARNING, 0, LG_MSG_VALIDATE_REC_FAILED,
+ NULL);
goto out;
}
@@ -1052,8 +1065,8 @@ xlator_validate_rec(xlator_t *xlator, char **op_errstr)
THIS = old_THIS;
if (ret) {
- gf_msg(xlator->name, GF_LOG_INFO, 0, LG_MSG_INVALID_ENTRY, "%s",
- *op_errstr);
+ gf_smsg(xlator->name, GF_LOG_INFO, 0, LG_MSG_INVALID_ENTRY, "%s",
+ *op_errstr, NULL);
goto out;
}
diff --git a/libglusterfs/src/rbthash.c b/libglusterfs/src/rbthash.c
index ae2e158d61e..c90b5a21f44 100644
--- a/libglusterfs/src/rbthash.c
+++ b/libglusterfs/src/rbthash.c
@@ -56,9 +56,8 @@ __rbthash_init_buckets(rbthash_table_t *tbl, int buckets)
tbl->buckets[i].bucket = rb_create(
(rb_comparison_func *)rbthash_comparator, tbl, NULL);
if (!tbl->buckets[i].bucket) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RB_TABLE_CREATE_FAILED,
- "Failed to "
- "create rb table bucket");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RB_TABLE_CREATE_FAILED,
+ NULL);
ret = -1;
goto err;
}
@@ -88,20 +87,17 @@ rbthash_table_init(glusterfs_ctx_t *ctx, int buckets, rbt_hasher_t hfunc,
int ret = -1;
if (!hfunc) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_HASH_FUNC_ERROR,
- "Hash function not given");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_HASH_FUNC_ERROR, NULL);
return NULL;
}
if (!entrypool && !expected_entries) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Both mem-pool and expected entries not provided");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_ENTRIES_NOT_PROVIDED, NULL);
return NULL;
}
if (entrypool && expected_entries) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Both mem-pool and expected entries are provided");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_ENTRIES_PROVIDED, NULL);
return NULL;
}
@@ -132,8 +128,8 @@ rbthash_table_init(glusterfs_ctx_t *ctx, int buckets, rbt_hasher_t hfunc,
ret = __rbthash_init_buckets(newtab, buckets);
if (ret == -1) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INIT_BUCKET_FAILED,
- "Failed to init buckets");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INIT_BUCKET_FAILED,
+ NULL);
if (newtab->pool_alloced)
mem_pool_destroy(newtab->entrypool);
} else {
@@ -170,8 +166,8 @@ rbthash_init_entry(rbthash_table_t *tbl, void *data, void *key, int keylen)
entry = mem_get(tbl->entrypool);
if (!entry) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_ENTRY_FAILED,
- "Failed to get entry from mem-pool");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_ENTRY_FAILED,
+ NULL);
goto ret;
}
@@ -243,8 +239,8 @@ rbthash_insert_entry(rbthash_table_t *tbl, rbthash_entry_t *entry)
bucket = rbthash_entry_bucket(tbl, entry);
if (!bucket) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_BUCKET_FAILED,
- "Failed to get bucket");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_BUCKET_FAILED,
+ NULL);
goto err;
}
@@ -253,8 +249,8 @@ rbthash_insert_entry(rbthash_table_t *tbl, rbthash_entry_t *entry)
{
if (!rb_probe(bucket->bucket, (void *)entry)) {
UNLOCK(&bucket->bucketlock);
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INSERT_FAILED,
- "Failed to insert entry");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INSERT_FAILED,
+ NULL);
ret = -1;
goto err;
}
@@ -276,16 +272,16 @@ rbthash_insert(rbthash_table_t *tbl, void *data, void *key, int keylen)
entry = rbthash_init_entry(tbl, data, key, keylen);
if (!entry) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INIT_ENTRY_FAILED,
- "Failed to init entry");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INIT_ENTRY_FAILED,
+ NULL);
goto err;
}
ret = rbthash_insert_entry(tbl, entry);
if (ret == -1) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INSERT_FAILED,
- "Failed to insert entry");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_INSERT_FAILED,
+ NULL);
rbthash_deinit_entry(tbl, entry);
goto err;
}
@@ -331,8 +327,8 @@ rbthash_get(rbthash_table_t *tbl, void *key, int keylen)
bucket = rbthash_key_bucket(tbl, key, keylen);
if (!bucket) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_GET_BUCKET_FAILED,
- "Failed to get bucket");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_BUCKET_FAILED,
+ NULL);
return NULL;
}
@@ -365,8 +361,8 @@ rbthash_remove(rbthash_table_t *tbl, void *key, int keylen)
bucket = rbthash_key_bucket(tbl, key, keylen);
if (!bucket) {
- gf_msg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_BUCKET_FAILED,
- "Failed to get bucket");
+ gf_smsg(GF_RBTHASH, GF_LOG_ERROR, 0, LG_MSG_RBTHASH_GET_BUCKET_FAILED,
+ NULL);
return NULL;
}
diff --git a/libglusterfs/src/stack.c b/libglusterfs/src/stack.c
index 371f60c4436..1531f0da43f 100644
--- a/libglusterfs/src/stack.c
+++ b/libglusterfs/src/stack.c
@@ -92,7 +92,7 @@ gf_proc_dump_call_frame(call_frame_t *call_frame, const char *key_buf, ...)
};
int ret = -1;
- char timestr[256] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
int len;
@@ -162,7 +162,7 @@ gf_proc_dump_call_stack(call_stack_t *call_stack, const char *key_buf, ...)
va_list ap;
call_frame_t *trav;
int32_t i = 1, cnt = 0;
- char timestr[256] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
int len;
diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c
index 5e0f04f3217..65f0eb5c7f3 100644
--- a/libglusterfs/src/statedump.c
+++ b/libglusterfs/src/statedump.c
@@ -199,6 +199,40 @@ gf_proc_dump_write(char *key, char *value, ...)
return ret;
}
+void
+gf_latency_statedump_and_reset(char *key, gf_latency_t *lat)
+{
+ /* Doesn't make sense to continue if there are no fops
+ came in the given interval */
+ if (!lat || !lat->count)
+ return;
+ gf_proc_dump_write(key,
+ "AVG:%lf CNT:%" PRIu64 " TOTAL:%" PRIu64 " MIN:%" PRIu64
+ " MAX:%" PRIu64,
+ (((double)lat->total) / lat->count), lat->count,
+ lat->total, lat->min, lat->max);
+ gf_latency_reset(lat);
+}
+
+void
+gf_proc_dump_xl_latency_info(xlator_t *xl)
+{
+ char key_prefix[GF_DUMP_MAX_BUF_LEN];
+ char key[GF_DUMP_MAX_BUF_LEN];
+ int i;
+
+ snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.latency", xl->name);
+ gf_proc_dump_add_section("%s", key_prefix);
+
+ for (i = 0; i < GF_FOP_MAXVALUE; i++) {
+ gf_proc_dump_build_key(key, key_prefix, "%s", (char *)gf_fop_list[i]);
+
+ gf_latency_t *lat = &xl->stats.interval.latencies[i];
+
+ gf_latency_statedump_and_reset(key, lat);
+ }
+}
+
static void
gf_proc_dump_xlator_mem_info(xlator_t *xl)
{
@@ -270,7 +304,7 @@ gf_proc_dump_xlator_mem_info_only_in_use(xlator_t *xl)
void
gf_proc_dump_mem_info()
{
-#ifdef HAVE_MALLOC_STATS
+#ifdef HAVE_MALLINFO
struct mallinfo info;
memset(&info, 0, sizeof(struct mallinfo));
@@ -296,7 +330,7 @@ gf_proc_dump_mem_info_to_dict(dict_t *dict)
{
if (!dict)
return;
-#ifdef HAVE_MALLOC_STATS
+#ifdef HAVE_MALLINFO
struct mallinfo info;
int ret = -1;
@@ -349,26 +383,13 @@ gf_proc_dump_mem_info_to_dict(dict_t *dict)
void
gf_proc_dump_mempool_info(glusterfs_ctx_t *ctx)
{
+#ifdef GF_DISABLE_MEMPOOL
+ gf_proc_dump_write("built with --disable-mempool", " so no memory pools");
+#else
struct mem_pool *pool = NULL;
gf_proc_dump_add_section("mempool");
-#if defined(OLD_MEM_POOLS)
- list_for_each_entry(pool, &ctx->mempool_list, global_list)
- {
- gf_proc_dump_write("-----", "-----");
- gf_proc_dump_write("pool-name", "%s", pool->name);
- gf_proc_dump_write("hot-count", "%d", pool->hot_count);
- gf_proc_dump_write("cold-count", "%d", pool->cold_count);
- gf_proc_dump_write("padded_sizeof", "%lu", pool->padded_sizeof_type);
- gf_proc_dump_write("alloc-count", "%" PRIu64, pool->alloc_count);
- gf_proc_dump_write("max-alloc", "%d", pool->max_alloc);
-
- gf_proc_dump_write("pool-misses", "%" PRIu64, pool->pool_misses);
- gf_proc_dump_write("cur-stdalloc", "%d", pool->curr_stdalloc);
- gf_proc_dump_write("max-stdalloc", "%d", pool->max_stdalloc);
- }
-#else
LOCK(&ctx->lock);
{
list_for_each_entry(pool, &ctx->mempool_list, owner)
@@ -377,6 +398,7 @@ gf_proc_dump_mempool_info(glusterfs_ctx_t *ctx)
gf_proc_dump_write("-----", "-----");
gf_proc_dump_write("pool-name", "%s", pool->name);
+ gf_proc_dump_write("xlator-name", "%s", pool->xl_name);
gf_proc_dump_write("active-count", "%" GF_PRI_ATOMIC, active);
gf_proc_dump_write("sizeof-type", "%lu", pool->sizeof_type);
gf_proc_dump_write("padded-sizeof", "%d",
@@ -387,15 +409,13 @@ gf_proc_dump_mempool_info(glusterfs_ctx_t *ctx)
}
}
UNLOCK(&ctx->lock);
-
- /* TODO: details of (struct mem_pool_shared) pool->pool */
-#endif
+#endif /* GF_DISABLE_MEMPOOL */
}
void
gf_proc_dump_mempool_info_to_dict(glusterfs_ctx_t *ctx, dict_t *dict)
{
-#if defined(OLD_MEM_POOLS)
+#ifndef GF_DISABLE_MEMPOOL
struct mem_pool *pool = NULL;
char key[GF_DUMP_MAX_BUF_LEN] = {
0,
@@ -406,51 +426,47 @@ gf_proc_dump_mempool_info_to_dict(glusterfs_ctx_t *ctx, dict_t *dict)
if (!ctx || !dict)
return;
- list_for_each_entry(pool, &ctx->mempool_list, global_list)
+ LOCK(&ctx->lock);
{
- snprintf(key, sizeof(key), "pool%d.name", count);
- ret = dict_set_str(dict, key, pool->name);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.hotcount", count);
- ret = dict_set_int32(dict, key, pool->hot_count);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.coldcount", count);
- ret = dict_set_int32(dict, key, pool->cold_count);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.paddedsizeof", count);
- ret = dict_set_uint64(dict, key, pool->padded_sizeof_type);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.alloccount", count);
- ret = dict_set_uint64(dict, key, pool->alloc_count);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.max_alloc", count);
- ret = dict_set_int32(dict, key, pool->max_alloc);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.max-stdalloc", count);
- ret = dict_set_int32(dict, key, pool->max_stdalloc);
- if (ret)
- return;
-
- snprintf(key, sizeof(key), "pool%d.pool-misses", count);
- ret = dict_set_uint64(dict, key, pool->pool_misses);
- if (ret)
- return;
- count++;
+ list_for_each_entry(pool, &ctx->mempool_list, owner)
+ {
+ int64_t active = GF_ATOMIC_GET(pool->active);
+
+ snprintf(key, sizeof(key), "pool%d.name", count);
+ ret = dict_set_str(dict, key, pool->name);
+ if (ret)
+ goto out;
+
+ snprintf(key, sizeof(key), "pool%d.active-count", count);
+ ret = dict_set_uint64(dict, key, active);
+ if (ret)
+ goto out;
+
+ snprintf(key, sizeof(key), "pool%d.sizeof-type", count);
+ ret = dict_set_uint64(dict, key, pool->sizeof_type);
+ if (ret)
+ goto out;
+
+ snprintf(key, sizeof(key), "pool%d.padded-sizeof", count);
+ ret = dict_set_uint64(dict, key, 1 << pool->pool->power_of_two);
+ if (ret)
+ goto out;
+
+ snprintf(key, sizeof(key), "pool%d.size", count);
+ ret = dict_set_uint64(dict, key,
+ (1 << pool->pool->power_of_two) * active);
+ if (ret)
+ goto out;
+
+ snprintf(key, sizeof(key), "pool%d.shared-pool", count);
+ ret = dict_set_static_ptr(dict, key, pool->pool);
+ if (ret)
+ goto out;
+ }
}
- ret = dict_set_int32(dict, "mempool-count", count);
-#endif
+out:
+ UNLOCK(&ctx->lock);
+#endif /* !GF_DISABLE_MEMPOOL */
}
void
@@ -485,7 +501,7 @@ gf_proc_dump_single_xlator_info(xlator_t *trav)
return;
if (ctx->measure_latency)
- gf_proc_dump_latency_info(trav);
+ gf_proc_dump_xl_latency_info(trav);
gf_proc_dump_xlator_mem_info(trav);
@@ -782,7 +798,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
char brick_name[PATH_MAX] = {
0,
};
- char timestr[256] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char sign_string[512] = {
@@ -842,7 +858,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
? dump_options.dump_path
: ((ctx->statedump_path != NULL) ? ctx->statedump_path
: DEFAULT_VAR_RUN_DIRECTORY)),
- brick_name, getpid(), (uint64_t)time(NULL));
+ brick_name, getpid(), (uint64_t)gf_time());
if ((ret < 0) || (ret >= sizeof(path))) {
goto out;
}
@@ -861,10 +877,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
// continue even though gettimeofday() has failed
ret = gettimeofday(&tv, NULL);
if (0 == ret) {
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
- len = strlen(timestr);
- snprintf(timestr + len, sizeof timestr - len, ".%" GF_PRI_SUSECONDS,
- tv.tv_usec);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
}
len = snprintf(sign_string, sizeof(sign_string), "DUMP-START-TIME: %s\n",
@@ -913,10 +926,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
ret = gettimeofday(&tv, NULL);
if (0 == ret) {
- gf_time_fmt(timestr, sizeof timestr, tv.tv_sec, gf_timefmt_FT);
- len = strlen(timestr);
- snprintf(timestr + len, sizeof timestr - len, ".%" GF_PRI_SUSECONDS,
- tv.tv_usec);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
}
len = snprintf(sign_string, sizeof(sign_string), "\nDUMP-END-TIME: %s",
@@ -1035,7 +1045,7 @@ gf_proc_dump_xlator_profile(xlator_t *this, strfd_t *strfd)
{
gf_dump_strfd = strfd;
- gf_proc_dump_latency_info(this);
+ gf_proc_dump_xl_latency_info(this);
gf_dump_strfd = NULL;
}
diff --git a/libglusterfs/src/store.c b/libglusterfs/src/store.c
index 06ef75e3d35..5c316b9291a 100644
--- a/libglusterfs/src/store.c
+++ b/libglusterfs/src/store.c
@@ -22,7 +22,7 @@ gf_store_mkdir(char *path)
{
int32_t ret = -1;
- ret = mkdir_p(path, 0777, _gf_true);
+ ret = mkdir_p(path, 0755, _gf_true);
if ((-1 == ret) && (EEXIST != errno)) {
gf_msg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
@@ -183,8 +183,8 @@ out:
}
int
-gf_store_read_and_tokenize(FILE *file, char *str, int size, char **iter_key,
- char **iter_val, gf_store_op_errno_t *store_errno)
+gf_store_read_and_tokenize(FILE *file, char **iter_key, char **iter_val,
+ gf_store_op_errno_t *store_errno)
{
int32_t ret = -1;
char *savetok = NULL;
@@ -192,15 +192,15 @@ gf_store_read_and_tokenize(FILE *file, char *str, int size, char **iter_key,
char *value = NULL;
char *temp = NULL;
size_t str_len = 0;
+ char str[8192];
GF_ASSERT(file);
- GF_ASSERT(str);
GF_ASSERT(iter_key);
GF_ASSERT(iter_val);
GF_ASSERT(store_errno);
retry:
- temp = fgets(str, size, file);
+ temp = fgets(str, 8192, file);
if (temp == NULL || feof(file)) {
ret = -1;
*store_errno = GD_STORE_EOF;
@@ -240,13 +240,8 @@ int32_t
gf_store_retrieve_value(gf_store_handle_t *handle, char *key, char **value)
{
int32_t ret = -1;
- char *scan_str = NULL;
char *iter_key = NULL;
char *iter_val = NULL;
- char *free_str = NULL;
- struct stat st = {
- 0,
- };
gf_store_op_errno_t store_errno = GD_STORE_SUCCESS;
GF_ASSERT(handle);
@@ -278,32 +273,9 @@ gf_store_retrieve_value(gf_store_handle_t *handle, char *key, char **value)
} else {
fseek(handle->read, 0, SEEK_SET);
}
- ret = sys_fstat(handle->fd, &st);
- if (ret < 0) {
- gf_msg("", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
- "stat on file %s failed", handle->path);
- ret = -1;
- store_errno = GD_STORE_STAT_FAILED;
- goto out;
- }
-
- /* "st.st_size + 1" is used as we are fetching each
- * line of a file using fgets, fgets will append "\0"
- * to the end of the string
- */
- scan_str = GF_CALLOC(1, st.st_size + 1, gf_common_mt_char);
-
- if (scan_str == NULL) {
- ret = -1;
- store_errno = GD_STORE_ENOMEM;
- goto out;
- }
-
- free_str = scan_str;
-
do {
- ret = gf_store_read_and_tokenize(handle->read, scan_str, st.st_size + 1,
- &iter_key, &iter_val, &store_errno);
+ ret = gf_store_read_and_tokenize(handle->read, &iter_key, &iter_val,
+ &store_errno);
if (ret < 0) {
gf_msg_trace("", 0,
"error while reading key '%s': "
@@ -333,8 +305,6 @@ out:
sys_close(handle->fd);
}
- GF_FREE(free_str);
-
return ret;
}
@@ -607,40 +577,16 @@ gf_store_iter_get_next(gf_store_iter_t *iter, char **key, char **value,
gf_store_op_errno_t *op_errno)
{
int32_t ret = -1;
- char *scan_str = NULL;
char *iter_key = NULL;
char *iter_val = NULL;
- struct stat st = {
- 0,
- };
gf_store_op_errno_t store_errno = GD_STORE_SUCCESS;
GF_ASSERT(iter);
GF_ASSERT(key);
GF_ASSERT(value);
- ret = sys_stat(iter->filepath, &st);
- if (ret < 0) {
- gf_msg("", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
- "stat on file failed");
- ret = -1;
- store_errno = GD_STORE_STAT_FAILED;
- goto out;
- }
-
- /* "st.st_size + 1" is used as we are fetching each
- * line of a file using fgets, fgets will append "\0"
- * to the end of the string
- */
- scan_str = GF_CALLOC(1, st.st_size + 1, gf_common_mt_char);
- if (!scan_str) {
- ret = -1;
- store_errno = GD_STORE_ENOMEM;
- goto out;
- }
-
- ret = gf_store_read_and_tokenize(iter->file, scan_str, st.st_size + 1,
- &iter_key, &iter_val, &store_errno);
+ ret = gf_store_read_and_tokenize(iter->file, &iter_key, &iter_val,
+ &store_errno);
if (ret < 0) {
goto out;
}
@@ -665,7 +611,6 @@ gf_store_iter_get_next(gf_store_iter_t *iter, char **key, char **value,
ret = 0;
out:
- GF_FREE(scan_str);
if (ret) {
GF_FREE(*key);
GF_FREE(*value);
@@ -704,23 +649,25 @@ out:
}
int32_t
-gf_store_iter_destroy(gf_store_iter_t *iter)
+gf_store_iter_destroy(gf_store_iter_t **iter)
{
int32_t ret = -1;
- if (!iter)
+ if (!(*iter))
return 0;
/* gf_store_iter_new will not return a valid iter object with iter->file
* being NULL*/
- ret = fclose(iter->file);
+ ret = fclose((*iter)->file);
if (ret)
gf_msg("", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
"Unable"
" to close file: %s, ret: %d",
- iter->filepath, ret);
+ (*iter)->filepath, ret);
+
+ GF_FREE(*iter);
+ *iter = NULL;
- GF_FREE(iter);
return ret;
}
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
index 4167db465f4..d9f1723856d 100644
--- a/libglusterfs/src/syncop-utils.c
+++ b/libglusterfs/src/syncop-utils.c
@@ -345,10 +345,8 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
gf_dirent_t *tmp = NULL;
uint32_t jobs_running = 0;
uint32_t qlen = 0;
- pthread_cond_t cond;
- pthread_mutex_t mut;
- gf_boolean_t cond_init = _gf_false;
- gf_boolean_t mut_init = _gf_false;
+ pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+ pthread_mutex_t mut = PTHREAD_MUTEX_INITIALIZER;
gf_dirent_t entries;
xlator_t *this = NULL;
@@ -378,15 +376,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
INIT_LIST_HEAD(&entries.list);
INIT_LIST_HEAD(&q.list);
- ret = pthread_mutex_init(&mut, NULL);
- if (ret)
- goto out;
- mut_init = _gf_true;
-
- ret = pthread_cond_init(&cond, NULL);
- if (ret)
- goto out;
- cond_init = _gf_true;
while ((ret = syncop_readdir(subvol, fd, 131072, offset, &entries, xdata,
NULL))) {
@@ -452,21 +441,17 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
out:
if (fd)
fd_unref(fd);
- if (mut_init && cond_init) {
- pthread_mutex_lock(&mut);
- {
- while (jobs_running)
- pthread_cond_wait(&cond, &mut);
- }
- pthread_mutex_unlock(&mut);
- gf_dirent_free(&q);
- gf_dirent_free(&entries);
+
+ pthread_mutex_lock(&mut);
+ {
+ while (jobs_running)
+ pthread_cond_wait(&cond, &mut);
}
+ pthread_mutex_unlock(&mut);
+
+ gf_dirent_free(&q);
+ gf_dirent_free(&entries);
- if (mut_init)
- pthread_mutex_destroy(&mut);
- if (cond_init)
- pthread_cond_destroy(&cond);
return ret | retval;
}
diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
index 2eb7b49fc4c..df20cec559f 100644
--- a/libglusterfs/src/syncop.c
+++ b/libglusterfs/src/syncop.c
@@ -11,6 +11,10 @@
#include "glusterfs/syncop.h"
#include "glusterfs/libglusterfs-messages.h"
+#ifdef HAVE_TSAN_API
+#include <sanitizer/tsan_interface.h>
+#endif
+
int
syncopctx_setfsuid(void *uid)
{
@@ -97,6 +101,13 @@ syncopctx_setfsgroups(int count, const void *groups)
/* set/reset the ngrps, this is where reset of groups is handled */
opctx->ngrps = count;
+
+ if ((opctx->valid & SYNCOPCTX_GROUPS) == 0) {
+ /* This is the first time we are storing groups into the TLS structure
+ * so we mark the current thread so that it will be properly cleaned
+ * up when the thread terminates. */
+ gf_thread_needs_cleanup();
+ }
opctx->valid |= SYNCOPCTX_GROUPS;
out:
@@ -147,10 +158,14 @@ out:
return ret;
}
+void *
+syncenv_processor(void *thdata);
+
static void
__run(struct synctask *task)
{
struct syncenv *env = NULL;
+ int32_t total, ret, i;
env = task->env;
@@ -166,7 +181,6 @@ __run(struct synctask *task)
env->runcount--;
break;
case SYNCTASK_WAIT:
- env->waitcount--;
break;
case SYNCTASK_DONE:
gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,
@@ -180,8 +194,27 @@ __run(struct synctask *task)
}
list_add_tail(&task->all_tasks, &env->runq);
- env->runcount++;
task->state = SYNCTASK_RUN;
+
+ env->runcount++;
+
+ total = env->procs + env->runcount - env->procs_idle;
+ if (total > env->procmax) {
+ total = env->procmax;
+ }
+ if (total > env->procs) {
+ for (i = 0; i < env->procmax; i++) {
+ if (env->proc[i].env == NULL) {
+ env->proc[i].env = env;
+ ret = gf_thread_create(&env->proc[i].processor, NULL,
+ syncenv_processor, &env->proc[i],
+ "sproc%d", i);
+ if ((ret < 0) || (++env->procs >= total)) {
+ break;
+ }
+ }
+ }
+ }
}
static void
@@ -203,7 +236,6 @@ __wait(struct synctask *task)
gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_REWAITING_TASK,
"re-waiting already waiting "
"task");
- env->waitcount--;
break;
case SYNCTASK_DONE:
gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,
@@ -216,12 +248,11 @@ __wait(struct synctask *task)
}
list_add_tail(&task->all_tasks, &env->waitq);
- env->waitcount++;
task->state = SYNCTASK_WAIT;
}
void
-synctask_yield(struct synctask *task)
+synctask_yield(struct synctask *task, struct timespec *delta)
{
xlator_t *oldTHIS = THIS;
@@ -230,9 +261,16 @@ synctask_yield(struct synctask *task)
task->proc->sched.uc_flags &= ~_UC_TLSBASE;
#endif
+ task->delta = delta;
+
if (task->state != SYNCTASK_DONE) {
task->state = SYNCTASK_SUSPEND;
}
+
+#ifdef HAVE_TSAN_API
+ __tsan_switch_to_fiber(task->proc->tsan.fiber, 0);
+#endif
+
if (swapcontext(&task->ctx, &task->proc->sched) < 0) {
gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_SWAPCONTEXT_FAILED,
"swapcontext failed");
@@ -242,6 +280,35 @@ synctask_yield(struct synctask *task)
}
void
+synctask_sleep(int32_t secs)
+{
+ struct timespec delta;
+ struct synctask *task;
+
+ task = synctask_get();
+
+ if (task == NULL) {
+ sleep(secs);
+ } else {
+ delta.tv_sec = secs;
+ delta.tv_nsec = 0;
+
+ synctask_yield(task, &delta);
+ }
+}
+
+static void
+__synctask_wake(struct synctask *task)
+{
+ task->woken = 1;
+
+ if (task->slept)
+ __run(task);
+
+ pthread_cond_broadcast(&task->env->cond);
+}
+
+void
synctask_wake(struct synctask *task)
{
struct syncenv *env = NULL;
@@ -250,13 +317,18 @@ synctask_wake(struct synctask *task)
pthread_mutex_lock(&env->mutex);
{
- task->woken = 1;
+ if (task->timer != NULL) {
+ if (gf_timer_call_cancel(task->xl->ctx, task->timer) != 0) {
+ goto unlock;
+ }
- if (task->slept)
- __run(task);
+ task->timer = NULL;
+ task->synccond = NULL;
+ }
- pthread_cond_broadcast(&env->cond);
+ __synctask_wake(task);
}
+unlock:
pthread_mutex_unlock(&env->mutex);
}
@@ -275,7 +347,7 @@ synctask_wrap(void)
task->state = SYNCTASK_DONE;
- synctask_yield(task);
+ synctask_yield(task, NULL);
}
void
@@ -294,6 +366,10 @@ synctask_destroy(struct synctask *task)
pthread_cond_destroy(&task->cond);
}
+#ifdef HAVE_TSAN_API
+ __tsan_destroy_fiber(task->tsan.fiber);
+#endif
+
GF_FREE(task);
}
@@ -404,6 +480,13 @@ synctask_create(struct syncenv *env, size_t stacksize, synctask_fn_t fn,
makecontext(&newtask->ctx, (void (*)(void))synctask_wrap, 0);
+#ifdef HAVE_TSAN_API
+ newtask->tsan.fiber = __tsan_create_fiber(0);
+ snprintf(newtask->tsan.name, TSAN_THREAD_NAMELEN, "<synctask of %s>",
+ this->name);
+ __tsan_set_fiber_name(newtask->tsan.fiber, newtask->tsan.name);
+#endif
+
newtask->state = SYNCTASK_INIT;
newtask->slept = 1;
@@ -415,11 +498,6 @@ synctask_create(struct syncenv *env, size_t stacksize, synctask_fn_t fn,
}
synctask_wake(newtask);
- /*
- * Make sure someone's there to execute anything we just put on the
- * run queue.
- */
- syncenv_scale(env);
return newtask;
err:
@@ -513,8 +591,12 @@ syncenv_task(struct syncproc *proc)
goto unlock;
}
- sleep_till.tv_sec = time(NULL) + SYNCPROC_IDLE_TIME;
+ env->procs_idle++;
+
+ sleep_till.tv_sec = gf_time() + SYNCPROC_IDLE_TIME;
ret = pthread_cond_timedwait(&env->cond, &env->mutex, &sleep_till);
+
+ env->procs_idle--;
}
task = list_entry(env->runq.next, struct synctask, all_tasks);
@@ -533,6 +615,34 @@ unlock:
return task;
}
+static void
+synctask_timer(void *data)
+{
+ struct synctask *task = data;
+ struct synccond *cond;
+
+ cond = task->synccond;
+ if (cond != NULL) {
+ pthread_mutex_lock(&cond->pmutex);
+
+ list_del_init(&task->waitq);
+ task->synccond = NULL;
+
+ pthread_mutex_unlock(&cond->pmutex);
+
+ task->ret = -ETIMEDOUT;
+ }
+
+ pthread_mutex_lock(&task->env->mutex);
+
+ gf_timer_call_cancel(task->xl->ctx, task->timer);
+ task->timer = NULL;
+
+ __synctask_wake(task);
+
+ pthread_mutex_unlock(&task->env->mutex);
+}
+
void
synctask_switchto(struct synctask *task)
{
@@ -548,6 +658,10 @@ synctask_switchto(struct synctask *task)
task->ctx.uc_flags &= ~_UC_TLSBASE;
#endif
+#ifdef HAVE_TSAN_API
+ __tsan_switch_to_fiber(task->tsan.fiber, 0);
+#endif
+
if (swapcontext(&task->proc->sched, &task->ctx) < 0) {
gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_SWAPCONTEXT_FAILED,
"swapcontext failed");
@@ -565,7 +679,14 @@ synctask_switchto(struct synctask *task)
} else {
task->slept = 1;
__wait(task);
+
+ if (task->delta != NULL) {
+ task->timer = gf_timer_call_after(task->xl->ctx, *task->delta,
+ synctask_timer, task);
+ }
}
+
+ task->delta = NULL;
}
pthread_mutex_unlock(&env->mutex);
}
@@ -573,63 +694,27 @@ synctask_switchto(struct synctask *task)
void *
syncenv_processor(void *thdata)
{
- struct syncenv *env = NULL;
struct syncproc *proc = NULL;
struct synctask *task = NULL;
proc = thdata;
- env = proc->env;
- for (;;) {
- task = syncenv_task(proc);
- if (!task)
- break;
+#ifdef HAVE_TSAN_API
+ proc->tsan.fiber = __tsan_create_fiber(0);
+ snprintf(proc->tsan.name, TSAN_THREAD_NAMELEN, "<sched of syncenv@%p>",
+ proc);
+ __tsan_set_fiber_name(proc->tsan.fiber, proc->tsan.name);
+#endif
+ while ((task = syncenv_task(proc)) != NULL) {
synctask_switchto(task);
-
- syncenv_scale(env);
}
- return NULL;
-}
-
-void
-syncenv_scale(struct syncenv *env)
-{
- int diff = 0;
- int scale = 0;
- int i = 0;
- int ret = 0;
-
- pthread_mutex_lock(&env->mutex);
- {
- if (env->procs > env->runcount)
- goto unlock;
-
- scale = env->runcount;
- if (scale > env->procmax)
- scale = env->procmax;
- if (scale > env->procs)
- diff = scale - env->procs;
- while (diff) {
- diff--;
- for (; (i < env->procmax); i++) {
- if (env->proc[i].processor == 0)
- break;
- }
+#ifdef HAVE_TSAN_API
+ __tsan_destroy_fiber(proc->tsan.fiber);
+#endif
- env->proc[i].env = env;
- ret = gf_thread_create(&env->proc[i].processor, NULL,
- syncenv_processor, &env->proc[i],
- "sproc%03hx", env->procs & 0x3ff);
- if (ret)
- break;
- env->procs++;
- i++;
- }
- }
-unlock:
- pthread_mutex_unlock(&env->mutex);
+ return NULL;
}
/* The syncenv threads are cleaned up in this routine.
@@ -708,12 +793,13 @@ syncenv_new(size_t stacksize, int procmin, int procmax)
newenv->stacksize = stacksize;
newenv->procmin = procmin;
newenv->procmax = procmax;
+ newenv->procs_idle = 0;
for (i = 0; i < newenv->procmin; i++) {
newenv->proc[i].env = newenv;
ret = gf_thread_create(&newenv->proc[i].processor, NULL,
syncenv_processor, &newenv->proc[i], "sproc%d",
- newenv->procs);
+ i);
if (ret)
break;
newenv->procs++;
@@ -803,7 +889,7 @@ __synclock_lock(struct synclock *lock)
task->woken = 0;
list_add_tail(&task->waitq, &lock->waitq);
pthread_mutex_unlock(&lock->guard);
- synctask_yield(task);
+ synctask_yield(task, NULL);
/* task is removed from waitq in unlock,
* under lock->guard.*/
pthread_mutex_lock(&lock->guard);
@@ -956,6 +1042,136 @@ synclock_unlock(synclock_t *lock)
return ret;
}
+/* Condition variables */
+
+int32_t
+synccond_init(synccond_t *cond)
+{
+ int32_t ret;
+
+ INIT_LIST_HEAD(&cond->waitq);
+
+ ret = pthread_mutex_init(&cond->pmutex, NULL);
+ if (ret != 0) {
+ return -ret;
+ }
+
+ ret = pthread_cond_init(&cond->pcond, NULL);
+ if (ret != 0) {
+ pthread_mutex_destroy(&cond->pmutex);
+ }
+
+ return -ret;
+}
+
+void
+synccond_destroy(synccond_t *cond)
+{
+ pthread_cond_destroy(&cond->pcond);
+ pthread_mutex_destroy(&cond->pmutex);
+}
+
+int
+synccond_timedwait(synccond_t *cond, synclock_t *lock, struct timespec *delta)
+{
+ struct timespec now;
+ struct synctask *task = NULL;
+ int ret;
+
+ task = synctask_get();
+
+ if (task == NULL) {
+ if (delta != NULL) {
+ timespec_now_realtime(&now);
+ timespec_adjust_delta(&now, *delta);
+ }
+
+ pthread_mutex_lock(&cond->pmutex);
+
+ if (delta == NULL) {
+ ret = -pthread_cond_wait(&cond->pcond, &cond->pmutex);
+ } else {
+ ret = -pthread_cond_timedwait(&cond->pcond, &cond->pmutex, &now);
+ }
+ } else {
+ pthread_mutex_lock(&cond->pmutex);
+
+ list_add_tail(&task->waitq, &cond->waitq);
+ task->synccond = cond;
+
+ ret = synclock_unlock(lock);
+ if (ret == 0) {
+ pthread_mutex_unlock(&cond->pmutex);
+
+ synctask_yield(task, delta);
+
+ ret = synclock_lock(lock);
+ if (ret == 0) {
+ ret = task->ret;
+ }
+ task->ret = 0;
+
+ return ret;
+ }
+
+ list_del_init(&task->waitq);
+ }
+
+ pthread_mutex_unlock(&cond->pmutex);
+
+ return ret;
+}
+
+int
+synccond_wait(synccond_t *cond, synclock_t *lock)
+{
+ return synccond_timedwait(cond, lock, NULL);
+}
+
+void
+synccond_signal(synccond_t *cond)
+{
+ struct synctask *task;
+
+ pthread_mutex_lock(&cond->pmutex);
+
+ if (!list_empty(&cond->waitq)) {
+ task = list_first_entry(&cond->waitq, struct synctask, waitq);
+ list_del_init(&task->waitq);
+
+ pthread_mutex_unlock(&cond->pmutex);
+
+ synctask_wake(task);
+ } else {
+ pthread_cond_signal(&cond->pcond);
+
+ pthread_mutex_unlock(&cond->pmutex);
+ }
+}
+
+void
+synccond_broadcast(synccond_t *cond)
+{
+ struct list_head list;
+ struct synctask *task;
+
+ INIT_LIST_HEAD(&list);
+
+ pthread_mutex_lock(&cond->pmutex);
+
+ list_splice_init(&cond->waitq, &list);
+ pthread_cond_broadcast(&cond->pcond);
+
+ pthread_mutex_unlock(&cond->pmutex);
+
+ while (!list_empty(&list)) {
+ task = list_first_entry(&list, struct synctask, waitq);
+ list_del_init(&task->waitq);
+
+ synctask_wake(task);
+ }
+}
+
/* Barriers */
int
@@ -1025,7 +1241,7 @@ __syncbarrier_wait(struct syncbarrier *barrier, int waitfor)
/* called within a synctask */
list_add_tail(&task->waitq, &barrier->waitq);
pthread_mutex_unlock(&barrier->guard);
- synctask_yield(task);
+ synctask_yield(task, NULL);
pthread_mutex_lock(&barrier->guard);
} else {
/* called by a non-synctask */
@@ -2874,12 +3090,13 @@ syncop_seek(xlator_t *subvol, fd_t *fd, off_t offset, gf_seek_what_t what,
SYNCOP(subvol, (&args), syncop_seek_cbk, subvol->fops->seek, fd, offset,
what, xdata_in);
- if (*off)
- *off = args.offset;
-
- if (args.op_ret == -1)
+ if (args.op_ret < 0) {
return -args.op_errno;
- return args.op_ret;
+ } else {
+ if (off)
+ *off = args.offset;
+ return args.op_ret;
+ }
}
int
diff --git a/libglusterfs/src/syscall.c b/libglusterfs/src/syscall.c
index 1d88c8adac1..04400f98b6c 100644
--- a/libglusterfs/src/syscall.c
+++ b/libglusterfs/src/syscall.c
@@ -13,6 +13,10 @@
#include "glusterfs/mem-pool.h"
#include "glusterfs/libglusterfs-messages.h"
+#ifdef __FreeBSD__
+#include <sys/sysctl.h>
+#include <signal.h>
+#endif
#include <sys/types.h>
#include <utime.h>
#include <sys/time.h>
@@ -214,6 +218,15 @@ sys_unlink(const char *pathname)
}
int
+sys_unlinkat(int dfd, const char *pathname)
+{
+#ifdef GF_SOLARIS_HOST_OS
+ return FS_RET_CHECK0(solaris_unlinkat(dfd, pathname, 0), errno);
+#endif
+ return FS_RET_CHECK0(unlinkat(dfd, pathname, 0), errno);
+}
+
+int
sys_rmdir(const char *pathname)
{
return FS_RET_CHECK0(rmdir(pathname), errno);
@@ -226,6 +239,12 @@ sys_symlink(const char *oldpath, const char *newpath)
}
int
+sys_symlinkat(const char *oldpath, int dirfd, const char *newpath)
+{
+ return FS_RET_CHECK0(symlinkat(oldpath, dirfd, newpath), errno);
+}
+
+int
sys_rename(const char *oldpath, const char *newpath)
{
#ifdef GF_SOLARIS_HOST_OS
@@ -253,6 +272,12 @@ sys_link(const char *oldpath, const char *newpath)
}
int
+sys_linkat(int oldfd, const char *oldpath, int newfd, const char *newpath)
+{
+ return FS_RET_CHECK0(linkat(oldfd, oldpath, newfd, newpath, 0), errno);
+}
+
+int
sys_chmod(const char *path, mode_t mode)
{
return FS_RET_CHECK0(chmod(path, mode), errno);
@@ -485,7 +510,7 @@ sys_lsetxattr(const char *path, const char *name, const void *value,
#endif
#ifdef GF_BSD_HOST_OS
- return FS_RET_CHECK0(
+ return FS_RET_CHECK(
extattr_set_link(path, EXTATTR_NAMESPACE_USER, name, value, size),
errno);
#endif
@@ -603,7 +628,7 @@ sys_fsetxattr(int filedes, const char *name, const void *value, size_t size,
#endif
#ifdef GF_BSD_HOST_OS
- return FS_RET_CHECK0(
+ return FS_RET_CHECK(
extattr_set_fd(filedes, EXTATTR_NAMESPACE_USER, name, value, size),
errno);
#endif
@@ -828,7 +853,24 @@ sys_copy_file_range(int fd_in, off64_t *off_in, int fd_out, off64_t *off_out,
return syscall(SYS_copy_file_range, fd_in, off_in, fd_out, off_out, len,
flags);
#else
- return -ENOSYS;
+ errno = ENOSYS;
+ return -1;
#endif /* HAVE_COPY_FILE_RANGE_SYS */
#endif /* HAVE_COPY_FILE_RANGE */
}
+
+#ifdef __FreeBSD__
+int
+sys_kill(pid_t pid, int sig)
+{
+ return FS_RET_CHECK0(kill(pid, sig), errno);
+}
+
+int
+sys_sysctl(const int *name, u_int namelen, void *oldp, size_t *oldlenp,
+ const void *newp, size_t newlen)
+{
+ return FS_RET_CHECK0(sysctl(name, namelen, oldp, oldlenp, newp, newlen),
+ errno);
+}
+#endif
diff --git a/libglusterfs/src/throttle-tbf.c b/libglusterfs/src/throttle-tbf.c
index 81efebd7efe..e11ca4f9d35 100644
--- a/libglusterfs/src/throttle-tbf.c
+++ b/libglusterfs/src/throttle-tbf.c
@@ -99,7 +99,7 @@ tbf_tokengenerator(void *arg)
token_gen_interval = bucket->token_gen_interval;
while (1) {
- usleep(token_gen_interval);
+ gf_nanosleep(token_gen_interval * GF_US_IN_NS);
LOCK(&bucket->lock);
{
diff --git a/libglusterfs/src/tier-ctr-interface.h b/libglusterfs/src/tier-ctr-interface.h
deleted file mode 100644
index 19680aa3bea..00000000000
--- a/libglusterfs/src/tier-ctr-interface.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _TIER_CTR_INTERFACE_H_
-#define _TIER_CTR_INTERFACE_H_
-
-#include "glusterfs/common-utils.h"
-#include "gfdb_data_store_types.h"
-
-#define GFDB_IPC_CTR_KEY "gfdb.ipc-ctr-op"
-
-/*
- * CTR IPC OPERATIONS
- *
- *
- */
-#define GFDB_IPC_CTR_QUERY_OPS "gfdb.ipc-ctr-query-op"
-#define GFDB_IPC_CTR_CLEAR_OPS "gfdb.ipc-ctr-clear-op"
-#define GFDB_IPC_CTR_GET_DB_PARAM_OPS "gfdb.ipc-ctr-get-db-parm"
-#define GFDB_IPC_CTR_GET_DB_VERSION_OPS "gfdb.ipc-ctr-get-db-version"
-#define GFDB_IPC_CTR_SET_COMPACT_PRAGMA "gfdb.ipc-ctr-set-compact-pragma"
-/*
- * CTR IPC INPUT/OUTPUT
- *
- *
- */
-#define GFDB_IPC_CTR_GET_QFILE_PATH "gfdb.ipc-ctr-get-qfile-path"
-#define GFDB_IPC_CTR_GET_QUERY_PARAMS "gfdb.ipc-ctr-get-query-parms"
-#define GFDB_IPC_CTR_RET_QUERY_COUNT "gfdb.ipc-ctr-ret-rec-count"
-#define GFDB_IPC_CTR_GET_DB_KEY "gfdb.ipc-ctr-get-params-key"
-#define GFDB_IPC_CTR_RET_DB_VERSION "gfdb.ipc-ctr-ret-db-version"
-
-/*
- * gfdb ipc ctr params for query
- *
- *
- */
-typedef struct gfdb_ipc_ctr_params {
- gf_boolean_t is_promote;
- int write_freq_threshold;
- int read_freq_threshold;
- gfdb_time_t time_stamp;
- int query_limit;
- gf_boolean_t emergency_demote;
-} gfdb_ipc_ctr_params_t;
-
-#endif
diff --git a/libglusterfs/src/timer.c b/libglusterfs/src/timer.c
index 1e19ffdff22..66c861b04cd 100644
--- a/libglusterfs/src/timer.c
+++ b/libglusterfs/src/timer.c
@@ -137,7 +137,8 @@ gf_timer_proc(void *data)
timespec_now(&now);
event = list_first_entry(&reg->active, gf_timer_t, list);
if (TS(now) < TS(event->at)) {
- pthread_cond_timedwait(&reg->cond, &reg->lock, &event->at);
+ now = event->at;
+ pthread_cond_timedwait(&reg->cond, &reg->lock, &now);
} else {
event->fired = _gf_true;
list_del_init(&event->list);
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
index 9906809f7aa..9a2582d45d5 100644
--- a/libglusterfs/src/xlator.c
+++ b/libglusterfs/src/xlator.c
@@ -184,9 +184,11 @@ xlator_volopt_dynload(char *xlator_type, void **dl_handle,
volume_opt_list_t *opt_list)
{
int ret = -1;
+ int flag = 0;
char *name = NULL;
void *handle = NULL;
xlator_api_t *xlapi = NULL;
+ volume_option_t *opt = NULL;
GF_VALIDATE_OR_GOTO("xlator", xlator_type, out);
@@ -194,8 +196,10 @@ xlator_volopt_dynload(char *xlator_type, void **dl_handle,
* need this check */
if (!strstr(xlator_type, "rpc-transport"))
ret = gf_asprintf(&name, "%s/%s.so", XLATORDIR, xlator_type);
- else
+ else {
+ flag = 1;
ret = gf_asprintf(&name, "%s/%s.so", XLATORPARENTDIR, xlator_type);
+ }
if (-1 == ret) {
goto out;
}
@@ -206,24 +210,34 @@ xlator_volopt_dynload(char *xlator_type, void **dl_handle,
handle = dlopen(name, RTLD_NOW);
if (!handle) {
- gf_msg("xlator", GF_LOG_WARNING, 0, LG_MSG_DLOPEN_FAILED, "%s",
- dlerror());
+ gf_smsg("xlator", GF_LOG_WARNING, 0, LG_MSG_DLOPEN_FAILED, "error=%s",
+ dlerror(), NULL);
goto out;
}
- /* check new struct first, and then check this */
- xlapi = dlsym(handle, "xlator_api");
- if (!xlapi) {
- gf_msg("xlator", GF_LOG_ERROR, 0, LG_MSG_DLSYM_ERROR,
- "dlsym(xlator_api) missing: %s", dlerror());
- goto out;
- }
+ if (flag == 0) {
+ /* check new struct first, and then check this */
+ xlapi = dlsym(handle, "xlator_api");
+ if (!xlapi) {
+ gf_smsg("xlator", GF_LOG_ERROR, 0, LG_MSG_DLSYM_ERROR, "error=%s",
+ dlerror(), NULL);
+ goto out;
+ }
- opt_list->given_opt = xlapi->options;
- if (!opt_list->given_opt) {
- gf_msg("xlator", GF_LOG_ERROR, 0, LG_MSG_LOAD_FAILED,
- "Failed to load xlator options table");
- goto out;
+ opt_list->given_opt = xlapi->options;
+ if (!opt_list->given_opt) {
+ gf_smsg("xlator", GF_LOG_ERROR, 0, LG_MSG_LOAD_FAILED, NULL);
+ goto out;
+ }
+ } else {
+ opt = dlsym(handle, "options");
+ if (!opt) {
+ gf_smsg("xlator", GF_LOG_ERROR, 0, LG_MSG_DLSYM_ERROR, "error=%s",
+ dlerror(), NULL);
+ goto out;
+ }
+
+ opt_list->given_opt = opt;
}
*dl_handle = handle;
@@ -246,21 +260,22 @@ xlator_dynload_apis(xlator_t *xl)
void *handle = NULL;
volume_opt_list_t *vol_opt = NULL;
xlator_api_t *xlapi = NULL;
+ int i = 0;
handle = xl->dlhandle;
xlapi = dlsym(handle, "xlator_api");
if (!xlapi) {
- gf_msg("xlator", GF_LOG_ERROR, 0, LG_MSG_DLSYM_ERROR,
- "dlsym(xlator_api) missing: %s", dlerror());
+ gf_smsg("xlator", GF_LOG_ERROR, 0, LG_MSG_DLSYM_ERROR, "dlsym=%s",
+ dlerror(), NULL);
ret = -1;
goto out;
}
xl->fops = xlapi->fops;
if (!xl->fops) {
- gf_msg("xlator", GF_LOG_WARNING, 0, LG_MSG_DLSYM_ERROR,
- "%s: struct missing (fops)", xl->name);
+ gf_smsg("xlator", GF_LOG_WARNING, 0, LG_MSG_STRUCT_MISS, "name=%s",
+ xl->name, NULL);
goto out;
}
@@ -271,8 +286,8 @@ xlator_dynload_apis(xlator_t *xl)
xl->init = xlapi->init;
if (!xl->init) {
- gf_msg("xlator", GF_LOG_WARNING, 0, LG_MSG_DLSYM_ERROR,
- "%s: method missing (init)", xl->name);
+ gf_smsg("xlator", GF_LOG_WARNING, 0, LG_MSG_METHOD_MISS, "name=%s",
+ xl->name, NULL);
goto out;
}
@@ -343,6 +358,10 @@ xlator_dynload_apis(xlator_t *xl)
memcpy(xl->op_version, xlapi->op_version,
sizeof(uint32_t) * GF_MAX_RELEASES);
+ for (i = 0; i < GF_FOP_MAXVALUE; i++) {
+ gf_latency_reset(&xl->stats.interval.latencies[i]);
+ }
+
ret = 0;
out:
return ret;
@@ -370,8 +389,8 @@ xlator_dynload(xlator_t *xl)
handle = dlopen(name, RTLD_NOW);
if (!handle) {
- gf_msg("xlator", GF_LOG_WARNING, 0, LG_MSG_DLOPEN_FAILED, "%s",
- dlerror());
+ gf_smsg("xlator", GF_LOG_WARNING, 0, LG_MSG_DLOPEN_FAILED, "error=%s",
+ dlerror(), NULL);
goto out;
}
xl->dlhandle = handle;
@@ -438,10 +457,8 @@ xlator_set_inode_lru_limit(xlator_t *this, void *data)
if (this->itable) {
if (!data) {
- gf_msg(this->name, GF_LOG_WARNING, 0, LG_MSG_INVALID_ENTRY,
- "input data is NULL. "
- "Cannot update the lru limit of the inode"
- " table. Continuing with older value");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, LG_MSG_INPUT_DATA_NULL,
+ NULL);
goto out;
}
inode_lru_limit = *(int *)data;
@@ -615,18 +632,15 @@ xlator_init(xlator_t *xl)
xl->instance_name = NULL;
GF_ATOMIC_INIT(xl->xprtrefcnt, 0);
if (!xl->init) {
- gf_msg(xl->name, GF_LOG_WARNING, 0, LG_MSG_INIT_FAILED,
- "No init() found");
+ gf_smsg(xl->name, GF_LOG_WARNING, 0, LG_MSG_INIT_FAILED, NULL);
goto out;
}
ret = __xlator_init(xl);
if (ret) {
- gf_msg(xl->name, GF_LOG_ERROR, 0, LG_MSG_VOLUME_ERROR,
- "Initialization of volume '%s' failed,"
- " review your volfile again",
- xl->name);
+ gf_smsg(xl->name, GF_LOG_ERROR, 0, LG_MSG_VOLUME_ERROR, "name=%s",
+ xl->name, NULL);
goto out;
}
@@ -812,7 +826,7 @@ xlator_members_free(xlator_t *xl)
GF_FREE(xl->name);
GF_FREE(xl->type);
- if (!(xl->ctx && xl->ctx->cmd_args.valgrind) && xl->dlhandle)
+ if (!(xl->ctx && xl->ctx->cmd_args.vgtool != _gf_none) && xl->dlhandle)
dlclose(xl->dlhandle);
if (xl->options)
dict_unref(xl->options);
@@ -862,8 +876,7 @@ xlator_tree_free_members(xlator_t *tree)
xlator_t *prev = tree;
if (!tree) {
- gf_msg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND,
- "Translator tree not found");
+ gf_smsg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND, NULL);
return -1;
}
@@ -883,8 +896,7 @@ xlator_tree_free_memacct(xlator_t *tree)
xlator_t *prev = tree;
if (!tree) {
- gf_msg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND,
- "Translator tree not found");
+ gf_smsg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND, NULL);
return -1;
}
@@ -945,6 +957,8 @@ xlator_mem_cleanup(xlator_t *this)
xlator_list_t **trav_p = NULL;
xlator_t *top = NULL;
xlator_t *victim = NULL;
+ glusterfs_graph_t *graph = NULL;
+ gf_boolean_t graph_cleanup = _gf_false;
if (this->call_cleanup || !this->ctx)
return;
@@ -952,6 +966,12 @@ xlator_mem_cleanup(xlator_t *this)
this->call_cleanup = 1;
ctx = this->ctx;
+ inode_table = this->itable;
+ if (inode_table) {
+ inode_table_destroy(inode_table);
+ this->itable = NULL;
+ }
+
xlator_call_fini(trav);
while (prev) {
@@ -960,12 +980,6 @@ xlator_mem_cleanup(xlator_t *this)
prev = trav;
}
- inode_table = this->itable;
- if (inode_table) {
- inode_table_destroy(inode_table);
- this->itable = NULL;
- }
-
if (this->fini) {
this->fini(this);
}
@@ -975,17 +989,28 @@ xlator_mem_cleanup(xlator_t *this)
if (ctx->active) {
top = ctx->active->first;
LOCK(&ctx->volfile_lock);
- /* TODO here we have leak for xlator node in a graph */
- /* Need to move only top xlator from a graph */
for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
victim = (*trav_p)->xlator;
if (victim->call_cleanup && !strcmp(victim->name, this->name)) {
+ graph_cleanup = _gf_true;
(*trav_p) = (*trav_p)->next;
break;
}
}
UNLOCK(&ctx->volfile_lock);
}
+
+ if (graph_cleanup) {
+ prev = this;
+ graph = ctx->active;
+ pthread_mutex_lock(&graph->mutex);
+ while (prev) {
+ trav = prev->next;
+ GF_FREE(prev);
+ prev = trav;
+ }
+ pthread_mutex_unlock(&graph->mutex);
+ }
}
void
@@ -1344,9 +1369,9 @@ is_gf_log_command(xlator_t *this, const char *name, char *value, size_t size)
/* Some crude way to change the log-level of process */
if (!strcmp(name, "trusted.glusterfs.set-log-level")) {
- gf_msg("glusterfs", gf_log_get_loglevel(), 0, LG_MSG_SET_LOG_LEVEL,
- "setting log level to %d (old-value=%d)", log_level,
- gf_log_get_loglevel());
+ gf_smsg("glusterfs", gf_log_get_loglevel(), 0, LG_MSG_SET_LOG_LEVEL,
+ "new-value=%d", log_level, "old-value=%d",
+ gf_log_get_loglevel(), NULL);
gf_log_set_loglevel(this->ctx, log_level);
ret = 0;
goto out;
@@ -1354,9 +1379,9 @@ is_gf_log_command(xlator_t *this, const char *name, char *value, size_t size)
if (!strcmp(name, "trusted.glusterfs.fuse.set-log-level")) {
/* */
- gf_msg(this->name, gf_log_get_xl_loglevel(this), 0,
- LG_MSG_SET_LOG_LEVEL, "setting log level to %d (old-value=%d)",
- log_level, gf_log_get_xl_loglevel(this));
+ gf_smsg(this->name, gf_log_get_xl_loglevel(this), 0,
+ LG_MSG_SET_LOG_LEVEL, "new-value=%d", log_level, "old-value=%d",
+ gf_log_get_xl_loglevel(this), NULL);
gf_log_set_xl_loglevel(this, log_level);
ret = 0;
goto out;
@@ -1372,10 +1397,9 @@ is_gf_log_command(xlator_t *this, const char *name, char *value, size_t size)
while (trav) {
snprintf(key, 1024, "trusted.glusterfs.%s.set-log-level", trav->name);
if (fnmatch(name, key, FNM_NOESCAPE) == 0) {
- gf_msg(trav->name, gf_log_get_xl_loglevel(trav), 0,
- LG_MSG_SET_LOG_LEVEL,
- "setting log level to %d (old-value=%d)", log_level,
- gf_log_get_xl_loglevel(trav));
+ gf_smsg(trav->name, gf_log_get_xl_loglevel(trav), 0,
+ LG_MSG_SET_LOG_LEVEL, "new-value%d", log_level,
+ "old-value=%d", gf_log_get_xl_loglevel(trav), NULL);
gf_log_set_xl_loglevel(trav, log_level);
ret = 0;
}
@@ -1407,9 +1431,7 @@ glusterd_check_log_level(const char *value)
}
if (log_level == -1)
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INIT_FAILED,
- "Invalid log-level. possible values are "
- "DEBUG|WARNING|ERROR|CRITICAL|NONE|TRACE");
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_INIT, NULL);
return log_level;
}
@@ -1486,8 +1508,7 @@ gluster_graph_take_reference(xlator_t *tree)
xlator_t *prev = tree;
if (!tree) {
- gf_msg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND,
- "Translator tree not found");
+ gf_smsg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND, NULL);
return;
}
@@ -1524,15 +1545,15 @@ xlator_is_cleanup_starting(xlator_t *this)
xlator_t *xl = NULL;
if (!this) {
- gf_msg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "xlator object is null, returning false");
+ gf_smsg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_OBJECT_NULL, "xlator",
+ NULL);
goto out;
}
graph = this->graph;
if (!graph) {
- gf_msg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "Graph is not set for xlator %s", this->name);
+ gf_smsg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_NOT_SET,
+ "name=%s", this->name, NULL);
goto out;
}
@@ -1542,3 +1563,31 @@ xlator_is_cleanup_starting(xlator_t *this)
out:
return cleanup;
}
+
+int
+graph_total_client_xlator(glusterfs_graph_t *graph)
+{
+ xlator_t *xl = NULL;
+ int count = 0;
+
+ if (!graph) {
+ gf_smsg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_OBJECT_NULL, "graph",
+ NULL);
+ goto out;
+ }
+
+ xl = graph->first;
+ if (!strcmp(xl->type, "protocol/server")) {
+ gf_msg_debug(xl->name, 0, "Return because it is a server graph");
+ return 0;
+ }
+
+ while (xl) {
+ if (strcmp(xl->type, "protocol/client") == 0) {
+ count++;
+ }
+ xl = xl->next;
+ }
+out:
+ return count;
+}
diff --git a/rfc.sh b/rfc.sh
index 019bee113d8..e7faec9ea0f 100755
--- a/rfc.sh
+++ b/rfc.sh
@@ -4,7 +4,29 @@
# i.e. where we are interested in the result of a command,
# we have to run the command in an if-statement.
-ORIGIN=${GLUSTER_ORIGIN:-origin}
+UPSTREAM=${GLUSTER_UPSTREAM}
+if [ "x$UPSTREAM" -eq "x" ]; then
+ for rmt in $(git remote); do
+ rmt_repo=$(git remote show $rmt -n | grep Fetch | awk '{ print $3 }');
+ if [ $rmt_repo -eq "git@github:gluster/glusterfs" ]; then
+ UPSTREAM=$rmt
+ echo "Picked $rmt as upstream remote"
+ break
+ fi
+ done
+fi
+
+USER_REPO=${GLUSTER_USER_REPO:-origin}
+if [ "x${USER_REPO}" -eq "x${UPSTREAM}" ] ; then
+ echo "When you submit patches, it should get submitted to your fork, not to upstream directly"
+ echo "If you are not sure, check `for rmt in $(git remote); do git remote show $rmt -n; done`"
+ echo "And pick the correct remote you would like to push to and do `export GLUSTER_USER_REPO=$rmt`"
+ echo ""
+ echo "Exiting..."
+ exit 1
+fi
+
+
while getopts "v" opt; do
case $opt in
@@ -18,7 +40,7 @@ done
shift $((OPTIND-1))
-branch="release-7";
+branch="devel";
set_hooks_commit_msg()
{
@@ -50,21 +72,21 @@ is_num()
backport_id_message()
{
echo ""
- echo "This commit is to a non-master branch, and hence is treated as a backport."
+ echo "This commit is to a non-devel branch, and hence is treated as a backport."
echo ""
echo "For backports we would like to retain the same gerrit Change-Id across"
echo "branches. On auto inspection it is found that a gerrit Change-Id is"
- echo "missing, or the Change-Id is not found on your local master"
+ echo "missing, or the Change-Id is not found on your local devel branch"
echo ""
echo "This could mean a few things:"
echo " 1. This is not a backport, hence choose Y on the prompt to proceed"
- echo " 2. Your $ORIGIN master is not up to date, hence the script is unable"
- echo " to find the corresponding Change-Id on master. Either choose N,"
+ echo " 2. Your $USER_REPO/devel is not up to date, hence the script is unable"
+ echo " to find the corresponding Change-Id on devel. Either choose N,"
echo " 'git fetch', and try again, OR if you are sure you used the"
echo " same Change-Id, choose Y at the prompt to proceed"
echo " 3. You commented or removed the Change-Id in your commit message after"
echo " cherry picking the commit. Choose N, fix the commit message to"
- echo " use the same Change-Id as master (git commit --amend), resubmit"
+ echo " use the same Change-Id as 'devel' (git commit --amend), resubmit"
echo ""
}
@@ -72,8 +94,8 @@ check_backport()
{
moveon='N'
- # Backports are never made to master
- if [ $branch = "master" ]; then
+ # Backports are never made to 'devel'
+ if [ $branch = "devel" ]; then
return;
fi
@@ -86,22 +108,22 @@ check_backport()
echo -n "Did not find a Change-Id for a possible backport. Continue (y/N): "
read moveon
else
- # Search master for the same change ID (rebase_changes has run, so we
+ # Search 'devel' for the same change ID (rebase_changes has run, so we
# should never not find a Change-Id)
- mchangeid=$(git log $ORIGIN/master --format='%b' --grep="^Change-Id: ${changeid}" | grep ${changeid} | awk '{print $2}')
+ mchangeid=$(git log $UPSTREAM/devel --format='%b' --grep="^Change-Id: ${changeid}" | grep ${changeid} | awk '{print $2}')
- # Check if we found the change ID on master, else throw a message to
+ # Check if we found the change ID on 'devel', else throw a message to
# decide if we should continue.
- # NOTE: If master was not rebased, we will not find the Change-ID and
+ # NOTE: If 'devel' was not rebased, we will not find the Change-ID and
# could hit a false positive case here (or if someone checks out some
- # other branch as master).
+ # other branch as 'devel').
if [ "${mchangeid}" = "${changeid}" ]; then
moveon="Y"
else
backport_id_message;
echo "Change-Id of commit: $changeid"
- echo "Change-Id on master: $mchangeid"
- echo -n "Did not find mentioned Change-Id on master for a possible backport. Continue (y/N): "
+ echo "Change-Id on devel: $mchangeid"
+ echo -n "Did not find mentioned Change-Id on 'devel' for a possible backport. Continue (y/N): "
read moveon
fi
fi
@@ -116,10 +138,51 @@ check_backport()
rebase_changes()
{
- GIT_EDITOR=$0 git rebase -i $ORIGIN/$branch;
+ GIT_EDITOR=$0 git rebase -i $UPSTREAM/$branch;
}
+# Regex elaborated:
+# grep options:
+# -w -> --word-regexp (from the man page)
+# Select only those lines containing matches that form whole words.
+# The test is that the matching substring must either be at the
+# beginning of the line, or preceded by a non-word constituent
+# character. Similarly, it must be either at the end of the line or
+# followed by a non-word constituent character. Word-constituent
+# characters are letters, digits, and the underscore.
+#
+# IOW, the above helps us find the pattern with leading or training
+# spaces or non word consituents like , or ;
+#
+# -i -> --ignore-case (case insensitive search)
+#
+# -o -> --only-matching (only print matching portion of the line)
+#
+# -E -> --extended-regexp (use extended regular expression)
+#
+# ^
+# The search begins at the start of each line
+#
+# [[:space:]]*
+# Any number of spaces is accepted
+#
+# (Fixes|Updates)
+# Finds 'Fixes' OR 'Updates' in any case combination
+#
+# (:)?
+# Followed by an optional : (colon)
+#
+# [[:space:]]+
+# Followed by 1 or more spaces
+#
+# #
+# Followed by #
+#
+# [[:digit:]]+
+# Followed by 1 or more digits
+REFRE="^[[:space:]]*(Fixes|Updates)(:)?[[:space:]]+#[[:digit:]]+"
+
editor_mode()
{
if [ $(basename "$1") = "git-rebase-todo" ]; then
@@ -130,42 +193,32 @@ editor_mode()
if [ $(basename "$1") = "COMMIT_EDITMSG" ]; then
# see note above function warn_reference_missing for regex elaboration
# Lets first check for github issues
- ref=$(git log -n1 --format='%b' | grep -ow -E "([fF][iI][xX][eE][sS]|[uU][pP][dD][aA][tT][eE][sS])(:)?[[:space:]]+(gluster\/glusterfs)?#[[:digit:]]+" | awk -F '#' '{print $2}');
- if [ "x${ref}" = "x" ]; then
- # if not found, check for bugs
- ref=$(git log -n1 --format='%b' | grep -ow -E "([fF][iI][xX][eE][sS]|[uU][pP][dD][aA][tT][eE][sS])(:)?[[:space:]]+bz#[[:digit:]]+" | awk -F '#' '{print $2}');
- fi
-
+ ref=$(git log -n1 --format='%b' | grep -iow -E "${REFRE}" | awk -F '#' '{print $2}');
if [ "x${ref}" != "x" ]; then
return;
fi
while true; do
echo Commit: "\"$(head -n 1 $1)\""
- echo -n "Reference (Bugzilla ID or Github Issue ID): "
- read bug
- if [ -z "$bug" ]; then
+ echo -n "Github Issue ID: "
+ read issue
+ if [ -z "$issue" ]; then
return;
fi
- if ! is_num "$bug"; then
- echo "Invalid reference ID ($bug)!!!";
+ if ! is_num "$issue"; then
+ echo "Invalid Github Issue ID!!!";
continue;
fi
- bz_string="bz"
- if [ $bug -lt 742000 ]; then
- bz_string=""
- fi
-
- echo "Select yes '(y)' if this patch fixes the bug/feature completely,"
+ echo "Select yes '(y)' if this patch fixes the issue/feature completely,"
echo -n "or is the last of the patchset which brings feature (Y/n): "
read fixes
- fixes_string="fixes"
+ fixes_string="Fixes"
if [ "${fixes}" = 'N' ] || [ "${fixes}" = 'n' ]; then
- fixes_string="updates"
+ fixes_string="Updates"
fi
- sed "/^Change-Id:/{p; s/^.*$/${fixes_string}: ${bz_string}#${bug}/;}" $1 > $1.new && \
+ sed "/^Change-Id:/{p; s/^.*$/${fixes_string}: #${issue}/;}" $1 > $1.new && \
mv $1.new $1;
return;
done
@@ -181,72 +234,33 @@ EOF
assert_diverge()
{
- git diff $ORIGIN/$branch..HEAD | grep -q .;
+ git diff $UPSTREAM/$branch..HEAD | grep -q .;
}
-
-# Regex elaborated:
-# grep -w -> --word-regexp (from the man page)
-# Select only those lines containing matches that form whole words.
-# The test is that the matching substring must either be at the
-# beginning of the line, or preceded by a non-word constituent
-# character. Similarly, it must be either at the end of the line or
-# followed by a non-word constituent character. Word-constituent
-# characters are letters, digits, and the underscore.
-# IOW, the above helps us find the pattern with leading or training spaces
-# or non word consituents like , or ;
-#
-# [fF][iI][xX][eE][sS]|[uU][pP][dD][aA][tT][eE][sS])
-# Finds 'fixes' OR 'updates' in any case combination
-#
-# (:)?
-# Followed by an optional : (colon)
-#
-# [[:space:]]+
-# followed by 1 or more spaces
-#
-# (gluster\/glusterfs)?
-# Followed by 0 or more gluster/glusterfs
-#
-# #
-# Followed by #
-#
-# [[:digit:]]+
-# Followed by 1 or more digits
warn_reference_missing()
{
echo ""
echo "=== Missing a reference in commit! ==="
echo ""
- echo "Gluster commits are made with a reference to a bug or a github issue"
- echo ""
- echo "Submissions that are enhancements (IOW, not functional"
- echo "bug fixes, but improvements of any nature to the code) are tracked"
- echo "using github issues [1]."
+ echo "Gluster commits are made with a reference to a github issue"
echo ""
- echo "Submissions that are bug fixes are tracked using Bugzilla [2]."
+ echo "A check on the commit message, reveals that there is no "
+ echo "github issue referenced in the commit message."
echo ""
- echo "A check on the commit message, reveals that there is no bug or"
- echo "github issue referenced in the commit message"
+ echo "https://github.com/gluster/glusterfs/issues/new"
echo ""
- echo "[1] https://github.com/gluster/glusterfs/issues/new"
- echo "[2] https://bugzilla.redhat.com/enter_bug.cgi?product=GlusterFS"
+ echo "Please open an issue and reference the same in the commit message "
+ echo "using the following tags:"
echo ""
- echo "Please file an issue or a bug report and reference the same in the"
- echo "commit message using the following tags:"
- echo "GitHub Issues:"
- echo "\"Fixes: gluster/glusterfs#n\" OR \"Updates: gluster/glusterfs#n\","
- echo "\"Fixes: #n\" OR \"Updates: #n\","
- echo "Bugzilla ID:"
- echo "\"Fixes: bz#n\" OR \"Updates: bz#n\","
- echo "where n is the issue or bug number"
+ echo "\"Fixes: #NNNN\" OR \"Updates: #NNNN\","
+ echo "where NNNN is the issue id"
echo ""
echo "You may abort the submission choosing 'N' below and use"
echo "'git commit --amend' to add the issue reference before posting"
echo "to gerrit."
echo ""
- echo -n "Missing reference to a bug or a github issue. Continue (y/N): "
+ echo -n "Missing reference to a github issue. Continue (y/N): "
read moveon
if [ "${moveon}" = 'Y' ] || [ "${moveon}" = 'y' ]; then
return;
@@ -266,7 +280,7 @@ main()
return;
fi
- git fetch $ORIGIN;
+ git fetch $UPSTREAM;
rebase_changes;
@@ -274,12 +288,12 @@ main()
assert_diverge;
- # see note above function warn_reference_missing for regex elaboration
- reference=$(git log -n1 --format='%b' | grep -ow -E "([fF][iI][xX][eE][sS]|[uU][pP][dD][aA][tT][eE][sS])(:)?[[:space:]]+(gluster\/glusterfs)?(bz)?#[[:digit:]]+" | awk -F '#' '{print $2}');
+ # see note above variable REFRE for regex elaboration
+ reference=$(git log -n1 --format='%b' | grep -iow -E "${REFRE}" | awk -F '#' '{print $2}');
- # If this is a commit against master and does not have a bug ID or a github
+ # If this is a commit against 'devel' and does not have a github
# issue reference. Warn the contributor that one of the 2 is required
- if [ -z "${reference}" ] && [ $branch = "master" ]; then
+ if [ -z "${reference}" ] && [ $branch = "devel" ]; then
warn_reference_missing;
fi
@@ -307,9 +321,9 @@ main()
fi
if [ -z "${reference}" ]; then
- $drier git push $ORIGIN HEAD:refs/for/$branch/rfc;
+ $drier git push $USER_REPO HEAD:temp_${branch}/$(date +%Y-%m-%d_%s);
else
- $drier git push $ORIGIN HEAD:refs/for/$branch/ref-${reference};
+ $drier git push $USER_REPO HEAD:issue${reference}_${branch};
fi
}
diff --git a/rpc/rpc-lib/src/Makefile.am b/rpc/rpc-lib/src/Makefile.am
index 81a96476883..35c9db07e7f 100644
--- a/rpc/rpc-lib/src/Makefile.am
+++ b/rpc/rpc-lib/src/Makefile.am
@@ -2,7 +2,7 @@ lib_LTLIBRARIES = libgfrpc.la
libgfrpc_la_SOURCES = auth-unix.c rpcsvc-auth.c rpcsvc.c auth-null.c \
rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c \
- rpc-drc.c $(CONTRIBDIR)/sunrpc/xdr_sizeof.c rpc-clnt-ping.c \
+ rpc-drc.c rpc-clnt-ping.c \
autoscale-threads.c mgmt-pmap.c
EXTRA_DIST = libgfrpc.sym
diff --git a/rpc/rpc-lib/src/libgfrpc.sym b/rpc/rpc-lib/src/libgfrpc.sym
index 54d1be1112f..e026d80259b 100644
--- a/rpc/rpc-lib/src/libgfrpc.sym
+++ b/rpc/rpc-lib/src/libgfrpc.sym
@@ -65,3 +65,4 @@ rpc_transport_unix_options_build
rpc_transport_unref
rpc_clnt_mgmt_pmap_signout
rpcsvc_autoscale_threads
+rpcsvc_statedump
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 7275d7568b6..0cb5862e9a9 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -309,6 +309,7 @@ enum glusterd_mgmt_v3_procnum {
GLUSTERD_MGMT_V3_PRE_VALIDATE,
GLUSTERD_MGMT_V3_BRICK_OP,
GLUSTERD_MGMT_V3_COMMIT,
+ GLUSTERD_MGMT_V3_POST_COMMIT,
GLUSTERD_MGMT_V3_POST_VALIDATE,
GLUSTERD_MGMT_V3_UNLOCK,
GLUSTERD_MGMT_V3_MAXVALUE,
diff --git a/rpc/rpc-lib/src/rpc-clnt-ping.c b/rpc/rpc-lib/src/rpc-clnt-ping.c
index 2298ef6394f..31f17841bea 100644
--- a/rpc/rpc-lib/src/rpc-clnt-ping.c
+++ b/rpc/rpc-lib/src/rpc-clnt-ping.c
@@ -122,7 +122,7 @@ rpc_clnt_ping_timer_expired(void *rpc_ptr)
goto out;
}
- clock_gettime(CLOCK_REALTIME, &current);
+ timespec_now_realtime(&current);
pthread_mutex_lock(&conn->lock);
{
unref = rpc_clnt_remove_ping_timer_locked(rpc);
diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index 8ef05378351..517037c4a5d 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -97,7 +97,7 @@ call_bail(void *data)
struct saved_frame *saved_frame = NULL;
struct saved_frame *trav = NULL;
struct saved_frame *tmp = NULL;
- char frame_sent[256] = {
+ char frame_sent[GF_TIMESTR_SIZE] = {
0,
};
struct timespec timeout = {
@@ -105,7 +105,6 @@ call_bail(void *data)
};
char peerid[UNIX_PATH_MAX] = {0};
gf_boolean_t need_unref = _gf_false;
- int len;
GF_VALIDATE_OR_GOTO("client", data, out);
@@ -165,11 +164,8 @@ call_bail(void *data)
list_for_each_entry_safe(trav, tmp, &list, list)
{
- gf_time_fmt(frame_sent, sizeof frame_sent, trav->saved_at.tv_sec,
- gf_timefmt_FT);
- len = strlen(frame_sent);
- snprintf(frame_sent + len, sizeof(frame_sent) - len,
- ".%" GF_PRI_SUSECONDS, trav->saved_at.tv_usec);
+ gf_time_fmt_tv(frame_sent, sizeof frame_sent, &trav->saved_at,
+ gf_timefmt_FT);
gf_log(conn->name, GF_LOG_ERROR,
"bailing out frame type(%s), op(%s(%d)), xid = 0x%x, "
@@ -317,20 +313,15 @@ saved_frames_unwind(struct saved_frames *saved_frames)
{
struct saved_frame *trav = NULL;
struct saved_frame *tmp = NULL;
- char timestr[1024] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
- int len;
list_splice_init(&saved_frames->lk_sf.list, &saved_frames->sf.list);
list_for_each_entry_safe(trav, tmp, &saved_frames->sf.list, list)
{
- gf_time_fmt(timestr, sizeof timestr, trav->saved_at.tv_sec,
- gf_timefmt_FT);
- len = strlen(timestr);
- snprintf(timestr + len, sizeof(timestr) - len, ".%" GF_PRI_SUSECONDS,
- trav->saved_at.tv_usec);
+ gf_time_fmt_tv(timestr, sizeof timestr, &trav->saved_at, gf_timefmt_FT);
if (!trav->rpcreq || !trav->rpcreq->prog)
continue;
@@ -376,19 +367,20 @@ rpc_clnt_reconnect(void *conn_ptr)
struct timespec ts = {0, 0};
struct rpc_clnt *clnt = NULL;
gf_boolean_t need_unref = _gf_false;
+ gf_boolean_t canceled_unref = _gf_false;
conn = conn_ptr;
clnt = conn->rpc_clnt;
-
pthread_mutex_lock(&conn->lock);
{
trans = conn->trans;
- if (!trans) {
- pthread_mutex_unlock(&conn->lock);
- return;
+ if (!trans)
+ goto out_unlock;
+
+ if (conn->reconnect) {
+ if (!gf_timer_call_cancel(clnt->ctx, conn->reconnect))
+ canceled_unref = _gf_true;
}
- if (conn->reconnect)
- gf_timer_call_cancel(clnt->ctx, conn->reconnect);
conn->reconnect = 0;
if ((conn->connected == 0) && !clnt->disabled) {
@@ -409,11 +401,14 @@ rpc_clnt_reconnect(void *conn_ptr)
gf_log(conn->name, GF_LOG_TRACE, "breaking reconnect chain");
}
}
+out_unlock:
pthread_mutex_unlock(&conn->lock);
rpc_clnt_unref(clnt);
if (need_unref)
rpc_clnt_unref(clnt);
+ if (canceled_unref)
+ rpc_clnt_unref(clnt);
return;
}
@@ -924,7 +919,7 @@ rpc_clnt_notify(rpc_transport_t *trans, void *mydata,
}
case RPC_TRANSPORT_MSG_RECEIVED: {
- clock_gettime(CLOCK_REALTIME, &conn->last_received);
+ timespec_now_realtime(&conn->last_received);
pollin = data;
if (pollin->is_reply)
@@ -938,8 +933,7 @@ rpc_clnt_notify(rpc_transport_t *trans, void *mydata,
}
case RPC_TRANSPORT_MSG_SENT: {
- clock_gettime(CLOCK_REALTIME, &conn->last_sent);
-
+ timespec_now_realtime(&conn->last_sent);
ret = 0;
break;
}
@@ -956,6 +950,7 @@ rpc_clnt_notify(rpc_transport_t *trans, void *mydata,
conn->config.remote_port = 0;
conn->connected = 1;
conn->disconnected = 0;
+ pthread_cond_broadcast(&conn->cond);
}
pthread_mutex_unlock(&conn->lock);
@@ -1001,6 +996,7 @@ rpc_clnt_connection_init(struct rpc_clnt *clnt, glusterfs_ctx_t *ctx,
conn = &clnt->conn;
pthread_mutex_init(&clnt->conn.lock, NULL);
+ pthread_cond_init(&clnt->conn.cond, NULL);
conn->name = gf_strdup(name);
if (!conn->name) {
@@ -1826,6 +1822,7 @@ rpc_clnt_destroy(struct rpc_clnt *rpc)
saved_frames_destroy(saved_frames);
pthread_mutex_destroy(&rpc->lock);
pthread_mutex_destroy(&rpc->conn.lock);
+ pthread_cond_destroy(&rpc->conn.cond);
/* mem-pool should be destroyed, otherwise,
it will cause huge memory leaks */
@@ -1858,7 +1855,7 @@ rpc_clnt_unref(struct rpc_clnt *rpc)
return rpc;
}
-void
+int
rpc_clnt_disable(struct rpc_clnt *rpc)
{
rpc_clnt_connection_t *conn = NULL;
@@ -1902,8 +1899,9 @@ rpc_clnt_disable(struct rpc_clnt *rpc)
}
pthread_mutex_unlock(&conn->lock);
+ ret = -1;
if (trans) {
- rpc_transport_disconnect(trans, _gf_true);
+ ret = rpc_transport_disconnect(trans, _gf_true);
/* The auth_value was being reset to AUTH_GLUSTERFS_v2.
* if (clnt->auth_value)
* clnt->auth_value = AUTH_GLUSTERFS_v2;
@@ -1919,7 +1917,6 @@ rpc_clnt_disable(struct rpc_clnt *rpc)
* on a connected transport and hence its strictly serialized.
*/
}
-
if (unref)
rpc_clnt_unref(rpc);
@@ -1930,7 +1927,7 @@ rpc_clnt_disable(struct rpc_clnt *rpc)
rpc_clnt_unref(rpc);
out:
- return;
+ return ret;
}
void
diff --git a/rpc/rpc-lib/src/rpc-clnt.h b/rpc/rpc-lib/src/rpc-clnt.h
index b46feed50c8..2945265200b 100644
--- a/rpc/rpc-lib/src/rpc-clnt.h
+++ b/rpc/rpc-lib/src/rpc-clnt.h
@@ -85,8 +85,8 @@ typedef int (*rpcclnt_cb_fn)(struct rpc_clnt *rpc, void *mydata, void *data);
*/
typedef struct rpcclnt_actor_desc {
char procname[32];
- int procnum;
rpcclnt_cb_fn actor;
+ int procnum;
} rpcclnt_cb_actor_t;
/* Describes a program and its version along with the function pointers
@@ -98,8 +98,6 @@ typedef struct rpcclnt_cb_program {
int prognum;
int progver;
rpcclnt_cb_actor_t *actors; /* All procedure handlers */
- int numactors; /* Num actors in actor array */
-
/* Program specific state handed to actors */
void *private;
@@ -108,6 +106,8 @@ typedef struct rpcclnt_cb_program {
/* Needed for passing back in cb_actor */
void *mydata;
+ int numactors; /* Num actors in actor array */
+
} rpcclnt_cb_program_t;
typedef struct rpc_auth_data {
@@ -127,6 +127,7 @@ struct rpc_clnt_config {
struct rpc_clnt_connection {
pthread_mutex_t lock;
+ pthread_cond_t cond;
rpc_transport_t *trans;
struct rpc_clnt_config config;
gf_timer_t *reconnect;
@@ -151,17 +152,17 @@ typedef struct rpc_clnt_connection rpc_clnt_connection_t;
struct rpc_req {
rpc_clnt_connection_t *conn;
struct iovec req[2];
- int reqcnt;
struct iobref *req_iobref;
struct iovec rsp[2];
int rspcnt;
+ int reqcnt;
struct iobref *rsp_iobref;
- int rpc_status;
- rpc_auth_data_t verf;
rpc_clnt_prog_t *prog;
- int procnum;
+ rpc_auth_data_t verf;
fop_cbk_fn_t cbkfn;
void *conn_private;
+ int procnum;
+ int rpc_status;
uint32_t xid;
};
@@ -182,8 +183,8 @@ typedef struct rpc_clnt {
glusterfs_ctx_t *ctx;
gf_atomic_t refcount;
- int auth_value;
xlator_t *owner;
+ int auth_value;
char disabled;
} rpc_clnt_t;
@@ -250,7 +251,7 @@ int
rpcclnt_cbk_program_register(struct rpc_clnt *svc,
rpcclnt_cb_program_t *program, void *mydata);
-void
+int
rpc_clnt_disable(struct rpc_clnt *rpc);
int
diff --git a/rpc/rpc-lib/src/rpc-drc.c b/rpc/rpc-lib/src/rpc-drc.c
index d083db24fc5..de8dc630626 100644
--- a/rpc/rpc-lib/src/rpc-drc.c
+++ b/rpc/rpc-lib/src/rpc-drc.c
@@ -190,7 +190,7 @@ rpcsvc_get_drc_client(rpcsvc_drc_globals_t *drc,
if (!client)
goto out;
- client->ref = 0;
+ GF_ATOMIC_INIT(client->ref, 0);
client->sock_union = (union gf_sock_union) * sockaddr;
client->op_count = 0;
INIT_LIST_HEAD(&client->client_list);
@@ -246,7 +246,7 @@ static drc_client_t *
rpcsvc_drc_client_ref(drc_client_t *client)
{
GF_ASSERT(client);
- client->ref++;
+ GF_ATOMIC_INC(client->ref);
return client;
}
@@ -261,11 +261,12 @@ rpcsvc_drc_client_ref(drc_client_t *client)
static drc_client_t *
rpcsvc_drc_client_unref(rpcsvc_drc_globals_t *drc, drc_client_t *client)
{
+ uint32_t refcount;
+
GF_ASSERT(drc);
- GF_ASSERT(client->ref);
- client->ref--;
- if (!client->ref) {
+ refcount = GF_ATOMIC_DEC(client->ref);
+ if (!refcount) {
drc->client_count--;
rpcsvc_remove_drc_client(client);
client = NULL;
@@ -589,7 +590,7 @@ rpcsvc_drc_priv(rpcsvc_drc_globals_t *drc)
}
gf_proc_dump_build_key(key, "client", "%d.ref_count", i);
- gf_proc_dump_write(key, "%d", client->ref);
+ gf_proc_dump_write(key, "%" PRIu32, GF_ATOMIC_GET(client->ref));
gf_proc_dump_build_key(key, "client", "%d.op_count", i);
gf_proc_dump_write(key, "%d", client->op_count);
i++;
diff --git a/rpc/rpc-lib/src/rpc-drc.h b/rpc/rpc-lib/src/rpc-drc.h
index 6aaede0828a..ce66430809b 100644
--- a/rpc/rpc-lib/src/rpc-drc.h
+++ b/rpc/rpc-lib/src/rpc-drc.h
@@ -24,7 +24,7 @@ struct drc_client {
struct rb_table *rbtree;
/* no. of ops currently cached */
uint32_t op_count;
- uint32_t ref;
+ gf_atomic_uint32_t ref;
struct list_head client_list;
};
diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c
index 2b648717635..a6e201a9b36 100644
--- a/rpc/rpc-lib/src/rpc-transport.c
+++ b/rpc/rpc-lib/src/rpc-transport.c
@@ -12,13 +12,9 @@
#include <stdlib.h>
#include <stdio.h>
#include <sys/poll.h>
-#include <fnmatch.h>
#include <stdint.h>
-#include <glusterfs/logging.h>
#include "rpc-transport.h"
-#include <glusterfs/glusterfs.h>
-#include <glusterfs/list.h>
#ifndef GF_OPTION_LIST_EMPTY
#define GF_OPTION_LIST_EMPTY(_opt) (_opt->value[0] == NULL)
@@ -64,17 +60,6 @@ out:
}
int32_t
-rpc_transport_get_myname(rpc_transport_t *this, char *hostname, int hostlen)
-{
- int32_t ret = -1;
- GF_VALIDATE_OR_GOTO("rpc", this, out);
-
- ret = this->ops->get_myname(this, hostname, hostlen);
-out:
- return ret;
-}
-
-int32_t
rpc_transport_get_peername(rpc_transport_t *this, char *hostname, int hostlen)
{
int32_t ret = -1;
@@ -88,14 +73,10 @@ out:
int
rpc_transport_throttle(rpc_transport_t *this, gf_boolean_t onoff)
{
- int ret = 0;
-
if (!this->ops->throttle)
return -ENOSYS;
- ret = this->ops->throttle(this, onoff);
-
- return ret;
+ return this->ops->throttle(this, onoff);
}
int32_t
@@ -189,7 +170,7 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
char *name = NULL;
void *handle = NULL;
char *type = NULL;
- char str[] = "ERROR";
+ static char str[] = "ERROR";
int32_t ret = -1;
int is_tcp = 0, is_unix = 0, is_ibsdp = 0;
volume_opt_list_t *vol_opt = NULL;
@@ -214,9 +195,9 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
type = str;
/* Backward compatibility */
- ret = dict_get_str(options, "transport-type", &type);
+ ret = dict_get_str_sizen(options, "transport-type", &type);
if (ret < 0) {
- ret = dict_set_str(options, "transport-type", "socket");
+ ret = dict_set_str_sizen(options, "transport-type", "socket");
if (ret < 0)
gf_log("dict", GF_LOG_DEBUG, "setting transport-type failed");
else
@@ -238,15 +219,16 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
is_ibsdp = strcmp(type, "ib-sdp");
if ((is_tcp == 0) || (is_unix == 0) || (is_ibsdp == 0)) {
if (is_unix == 0)
- ret = dict_set_str(options, "transport.address-family", "unix");
+ ret = dict_set_str_sizen(options, "transport.address-family",
+ "unix");
if (is_ibsdp == 0)
- ret = dict_set_str(options, "transport.address-family",
- "inet-sdp");
+ ret = dict_set_str_sizen(options, "transport.address-family",
+ "inet-sdp");
if (ret < 0)
gf_log("dict", GF_LOG_DEBUG, "setting address-family failed");
- ret = dict_set_str(options, "transport-type", "socket");
+ ret = dict_set_str_sizen(options, "transport-type", "socket");
if (ret < 0)
gf_log("dict", GF_LOG_DEBUG, "setting transport-type failed");
}
@@ -255,9 +237,9 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
/* client-bind-insecure is for clients protocol, and
* bind-insecure for glusterd. Both mutually exclusive
*/
- ret = dict_get_str(options, "client-bind-insecure", &type);
+ ret = dict_get_str_sizen(options, "client-bind-insecure", &type);
if (ret)
- ret = dict_get_str(options, "bind-insecure", &type);
+ ret = dict_get_str_sizen(options, "bind-insecure", &type);
if (ret == 0) {
ret = gf_string2boolean(type, &bind_insecure);
if (ret < 0) {
@@ -276,7 +258,7 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
trans->bind_insecure = 1;
}
- ret = dict_get_str(options, "transport-type", &type);
+ ret = dict_get_str_sizen(options, "transport-type", &type);
if (ret < 0) {
gf_log("rpc-transport", GF_LOG_ERROR,
"'option transport-type <xx>' missing in volume '%s'",
@@ -289,7 +271,7 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
goto fail;
}
- if (dict_get(options, "notify-poller-death")) {
+ if (dict_get_sizen(options, "notify-poller-death")) {
trans->notify_poller_death = 1;
}
@@ -457,13 +439,10 @@ fail:
return ret;
}
-int32_t
+static void
rpc_transport_destroy(rpc_transport_t *this)
{
struct dnscache6 *cache = NULL;
- int32_t ret = -1;
-
- GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
if (this->clnt_options)
dict_unref(this->clnt_options);
@@ -491,10 +470,6 @@ rpc_transport_destroy(rpc_transport_t *this)
}
GF_FREE(this);
-
- ret = 0;
-fail:
- return ret;
}
rpc_transport_t *
@@ -577,16 +552,17 @@ rpc_transport_keepalive_options_set(dict_t *options, int32_t interval,
GF_ASSERT(options);
GF_ASSERT((interval > 0) || (time > 0));
- ret = dict_set_int32(options, "transport.socket.keepalive-interval",
- interval);
+ ret = dict_set_int32_sizen(options, "transport.socket.keepalive-interval",
+ interval);
if (ret)
goto out;
- ret = dict_set_int32(options, "transport.socket.keepalive-time", time);
+ ret = dict_set_int32_sizen(options, "transport.socket.keepalive-time",
+ time);
if (ret)
goto out;
- ret = dict_set_int32(options, "transport.tcp-user-timeout", timeout);
+ ret = dict_set_int32_sizen(options, "transport.tcp-user-timeout", timeout);
if (ret)
goto out;
out:
@@ -609,30 +585,30 @@ rpc_transport_unix_options_build(dict_t *dict, char *filepath,
goto out;
}
- ret = dict_set_dynstr(dict, "transport.socket.connect-path", fpath);
+ ret = dict_set_dynstr_sizen(dict, "transport.socket.connect-path", fpath);
if (ret) {
GF_FREE(fpath);
goto out;
}
- ret = dict_set_str(dict, "transport.address-family", "unix");
+ ret = dict_set_str_sizen(dict, "transport.address-family", "unix");
if (ret)
goto out;
- ret = dict_set_str(dict, "transport.socket.nodelay", "off");
+ ret = dict_set_str_sizen(dict, "transport.socket.nodelay", "off");
if (ret)
goto out;
- ret = dict_set_str(dict, "transport-type", "socket");
+ ret = dict_set_str_sizen(dict, "transport-type", "socket");
if (ret)
goto out;
- ret = dict_set_str(dict, "transport.socket.keepalive", "off");
+ ret = dict_set_str_sizen(dict, "transport.socket.keepalive", "off");
if (ret)
goto out;
if (frame_timeout > 0) {
- ret = dict_set_int32(dict, "frame-timeout", frame_timeout);
+ ret = dict_set_int32_sizen(dict, "frame-timeout", frame_timeout);
if (ret)
goto out;
}
@@ -647,9 +623,9 @@ rpc_transport_inet_options_build(dict_t *dict, const char *hostname, int port,
char *host = NULL;
int ret = -1;
#ifdef IPV6_DEFAULT
- char *addr_family = "inet6";
+ static char *addr_family = "inet6";
#else
- char *addr_family = "inet";
+ static char *addr_family = "inet";
#endif
GF_ASSERT(hostname);
@@ -662,7 +638,7 @@ rpc_transport_inet_options_build(dict_t *dict, const char *hostname, int port,
goto out;
}
- ret = dict_set_dynstr(dict, "remote-host", host);
+ ret = dict_set_dynstr_sizen(dict, "remote-host", host);
if (ret) {
gf_log(THIS->name, GF_LOG_WARNING, "failed to set remote-host with %s",
host);
@@ -670,21 +646,22 @@ rpc_transport_inet_options_build(dict_t *dict, const char *hostname, int port,
goto out;
}
- ret = dict_set_int32(dict, "remote-port", port);
+ ret = dict_set_int32_sizen(dict, "remote-port", port);
if (ret) {
gf_log(THIS->name, GF_LOG_WARNING, "failed to set remote-port with %d",
port);
goto out;
}
- ret = dict_set_str(dict, "address-family", (af != NULL ? af : addr_family));
+ ret = dict_set_str_sizen(dict, "address-family",
+ (af != NULL ? af : addr_family));
if (ret) {
gf_log(THIS->name, GF_LOG_WARNING, "failed to set address-family to %s",
addr_family);
goto out;
}
- ret = dict_set_str(dict, "transport-type", "socket");
+ ret = dict_set_str_sizen(dict, "transport-type", "socket");
if (ret) {
gf_log(THIS->name, GF_LOG_WARNING,
"failed to set trans-type with socket");
diff --git a/rpc/rpc-lib/src/rpc-transport.h b/rpc/rpc-lib/src/rpc-transport.h
index 5b88be5a3de..c499f0bb955 100644
--- a/rpc/rpc-lib/src/rpc-transport.h
+++ b/rpc/rpc-lib/src/rpc-transport.h
@@ -103,19 +103,19 @@ typedef enum {
struct rpc_transport_msg {
struct iovec *rpchdr;
- int rpchdrcount;
struct iovec *proghdr;
+ int rpchdrcount;
int proghdrcount;
struct iovec *progpayload;
- int progpayloadcount;
struct iobref *iobref;
+ int progpayloadcount;
};
typedef struct rpc_transport_msg rpc_transport_msg_t;
struct rpc_transport_rsp {
struct iovec *rsphdr;
- int rsphdr_count;
struct iovec *rsp_payload;
+ int rsphdr_count;
int rsp_payload_count;
struct iobref *rsp_iobref;
};
@@ -149,9 +149,9 @@ typedef struct rpc_transport_data rpc_transport_data_t;
struct rpc_request_info {
int prognum;
int progver;
- int procnum;
void *rpc_req; /* struct rpc_req */
rpc_transport_rsp_t rsp;
+ int procnum;
uint32_t xid;
};
typedef struct rpc_request_info rpc_request_info_t;
@@ -193,11 +193,12 @@ struct rpc_transport {
int32_t outstanding_rpc_count;
struct list_head list;
- int bind_insecure;
void *dl_handle; /* handle of dlopen() */
char *ssl_name;
dict_t *clnt_options; /* store options received from
* client */
+ gf_atomic_t disconnect_progress;
+ int bind_insecure;
/* connect_failed: saves the connect() syscall status as socket_t
* member holding connect() status can't be accessed by higher gfapi
* layer or in client management notification handler functions
@@ -205,18 +206,17 @@ struct rpc_transport {
gf_boolean_t connect_failed;
char notify_poller_death;
char poller_death_accept;
- gf_atomic_t disconnect_progress;
};
struct rpc_transport_pollin {
struct rpc_transport *trans;
- int count;
void *private;
struct iobref *iobref;
struct iovec vector[MAX_IOVEC];
+ gf_async_t async;
+ int count;
char is_reply;
char vectored;
- gf_async_t async;
};
typedef struct rpc_transport_pollin rpc_transport_pollin_t;
@@ -252,9 +252,6 @@ int32_t
rpc_transport_disconnect(rpc_transport_t *this, gf_boolean_t wait);
int32_t
-rpc_transport_destroy(rpc_transport_t *this);
-
-int32_t
rpc_transport_notify(rpc_transport_t *this, rpc_transport_event_t event,
void *data, ...);
@@ -285,9 +282,6 @@ rpc_transport_get_peeraddr(rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, size_t salen);
int32_t
-rpc_transport_get_myname(rpc_transport_t *this, char *hostname, int hostlen);
-
-int32_t
rpc_transport_get_myaddr(rpc_transport_t *this, char *peeraddr, int addrlen,
struct sockaddr_storage *sa, size_t salen);
diff --git a/rpc/rpc-lib/src/rpcsvc-common.h b/rpc/rpc-lib/src/rpcsvc-common.h
index 734601eef0b..6c4ec49a6ef 100644
--- a/rpc/rpc-lib/src/rpcsvc-common.h
+++ b/rpc/rpc-lib/src/rpcsvc-common.h
@@ -39,8 +39,6 @@ typedef struct rpcsvc_state {
pthread_rwlock_t rpclock;
- unsigned int memfactor;
-
/* List of the authentication schemes available. */
struct list_head authschemes;
@@ -61,6 +59,8 @@ typedef struct rpcsvc_state {
struct list_head notify;
int notify_count;
+ unsigned int memfactor;
+
xlator_t *xl; /* xlator */
void *mydata;
rpcsvc_notify_t notifyfn;
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
index 0daa54d536d..39910d481bf 100644
--- a/rpc/rpc-lib/src/rpcsvc.c
+++ b/rpc/rpc-lib/src/rpcsvc.c
@@ -13,6 +13,7 @@
#include <glusterfs/dict.h>
#include <glusterfs/byte-order.h>
#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
#include "xdr-rpc.h"
#include <glusterfs/iobuf.h>
#include "xdr-common.h"
@@ -41,7 +42,11 @@
#include "xdr-rpcclnt.h"
#include <glusterfs/glusterfs-acl.h>
-struct rpcsvc_program gluster_dump_prog;
+#ifndef PTHREAD_MUTEX_ADAPTIVE_NP
+#define PTHREAD_MUTEX_ADAPTIVE_NP PTHREAD_MUTEX_DEFAULT
+#endif
+
+static struct rpcsvc_program gluster_dump_prog;
#define rpcsvc_alloc_request(svc, request) \
do { \
@@ -66,59 +71,33 @@ rpcsvc_request_handler(void *arg);
static int
rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr);
-void
+static void
rpcsvc_toggle_queue_status(rpcsvc_program_t *prog,
- rpcsvc_request_queue_t *queue, char status[])
+ rpcsvc_request_queue_t *queue,
+ unsigned long status[])
{
- int queue_index = 0, status_index = 0, set_bit = 0;
-
- if (queue != &prog->request_queue[0]) {
- queue_index = (queue - &prog->request_queue[0]);
- }
-
- status_index = queue_index / 8;
- set_bit = queue_index % 8;
-
- status[status_index] ^= (1 << set_bit);
+ unsigned queue_index = queue - prog->request_queue;
- return;
+ status[queue_index / __BITS_PER_LONG] ^= (1UL << (queue_index %
+ __BITS_PER_LONG));
}
int
rpcsvc_get_free_queue_index(rpcsvc_program_t *prog)
{
- int queue_index = 0, max_index = 0, i = 0;
- unsigned int right_most_unset_bit = 0;
-
- right_most_unset_bit = 8;
+ unsigned i, j = 0;
- max_index = gf_roof(EVENT_MAX_THREADS, 8) / 8;
- for (i = 0; i < max_index; i++) {
- if (prog->request_queue_status[i] == 0) {
- right_most_unset_bit = 0;
+ for (i = 0; i < EVENT_MAX_THREADS / __BITS_PER_LONG; i++)
+ if (prog->request_queue_status[i] != ULONG_MAX) {
+ j = __builtin_ctzl(~prog->request_queue_status[i]);
break;
- } else {
- /* get_rightmost_set_bit (sic)*/
- right_most_unset_bit = __builtin_ctz(
- ~prog->request_queue_status[i]);
- if (right_most_unset_bit < 8) {
- break;
- }
}
- }
-
- if (right_most_unset_bit > 7) {
- queue_index = -1;
- } else {
- queue_index = i * 8;
- queue_index += right_most_unset_bit;
- }
- if (queue_index != -1) {
- prog->request_queue_status[i] |= (0x1 << right_most_unset_bit);
- }
+ if (i == EVENT_MAX_THREADS / __BITS_PER_LONG)
+ return -1;
- return queue_index;
+ prog->request_queue_status[i] |= (1UL << j);
+ return i * __BITS_PER_LONG + j;
}
rpcsvc_notify_wrapper_t *
@@ -363,6 +342,10 @@ rpcsvc_program_actor(rpcsvc_request_t *req)
goto err;
}
+ if (svc->xl->ctx->measure_latency) {
+ timespec_now(&req->begin);
+ }
+
req->ownthread = program->ownthread;
req->synctask = program->synctask;
@@ -1512,10 +1495,18 @@ rpcsvc_submit_generic(rpcsvc_request_t *req, struct iovec *proghdr,
size_t hdrlen = 0;
char new_iobref = 0;
rpcsvc_drc_globals_t *drc = NULL;
+ gf_latency_t *lat = NULL;
if ((!req) || (!req->trans))
return -1;
+ if (req->prog && req->begin.tv_sec) {
+ if ((req->procnum >= 0) && (req->procnum < req->prog->numactors)) {
+ timespec_now(&req->end);
+ lat = &req->prog->latencies[req->procnum];
+ gf_latency_update(lat, &req->begin, &req->end);
+ }
+ }
trans = req->trans;
for (i = 0; i < hdrcount; i++) {
@@ -1846,6 +1837,15 @@ rpcsvc_submit_message(rpcsvc_request_t *req, struct iovec *proghdr,
iobref);
}
+void
+rpcsvc_program_destroy(rpcsvc_program_t *program)
+{
+ if (program) {
+ GF_FREE(program->latencies);
+ GF_FREE(program);
+ }
+}
+
int
rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
{
@@ -1855,6 +1855,18 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
goto out;
}
+ pthread_rwlock_rdlock(&svc->rpclock);
+ {
+ list_for_each_entry(prog, &svc->programs, program)
+ {
+ if ((prog->prognum == program->prognum) &&
+ (prog->progver == program->progver)) {
+ break;
+ }
+ }
+ }
+ pthread_rwlock_unlock(&svc->rpclock);
+
ret = rpcsvc_program_unregister_portmap(program);
if (ret == -1) {
gf_log(GF_RPCSVC, GF_LOG_ERROR,
@@ -1871,17 +1883,6 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
goto out;
}
#endif
- pthread_rwlock_rdlock(&svc->rpclock);
- {
- list_for_each_entry(prog, &svc->programs, program)
- {
- if ((prog->prognum == program->prognum) &&
- (prog->progver == program->progver)) {
- break;
- }
- }
- }
- pthread_rwlock_unlock(&svc->rpclock);
gf_log(GF_RPCSVC, GF_LOG_DEBUG,
"Program unregistered: %s, Num: %d,"
@@ -1902,6 +1903,8 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program)
ret = 0;
out:
+ rpcsvc_program_destroy(prog);
+
if (ret == -1) {
if (program) {
gf_log(GF_RPCSVC, GF_LOG_ERROR,
@@ -2285,6 +2288,11 @@ rpcsvc_program_register(rpcsvc_t *svc, rpcsvc_program_t *program,
}
memcpy(newprog, program, sizeof(*program));
+ newprog->latencies = gf_latency_new(program->numactors);
+ if (!newprog->latencies) {
+ rpcsvc_program_destroy(newprog);
+ goto out;
+ }
INIT_LIST_HEAD(&newprog->program);
pthread_mutexattr_init(&thr_attr);
@@ -3230,13 +3238,55 @@ out:
return ret;
}
-rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = {
- [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA},
- [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0, DRC_NA},
- [GF_DUMP_PING] = {"PING", GF_DUMP_PING, rpcsvc_ping, NULL, 0, DRC_NA},
+void
+rpcsvc_program_dump(rpcsvc_program_t *prog)
+{
+ char key_prefix[GF_DUMP_MAX_BUF_LEN];
+ char key[GF_DUMP_MAX_BUF_LEN];
+ int i;
+
+ snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s", prog->progname);
+ gf_proc_dump_add_section("%s", key_prefix);
+
+ gf_proc_dump_build_key(key, key_prefix, "program-number");
+ gf_proc_dump_write(key, "%d", prog->prognum);
+
+ gf_proc_dump_build_key(key, key_prefix, "program-version");
+ gf_proc_dump_write(key, "%d", prog->progver);
+
+ strncat(key_prefix, ".latency",
+ sizeof(key_prefix) - strlen(key_prefix) - 1);
+
+ for (i = 0; i < prog->numactors; i++) {
+ gf_proc_dump_build_key(key, key_prefix, "%s", prog->actors[i].procname);
+ gf_latency_statedump_and_reset(key, &prog->latencies[i]);
+ }
+}
+
+void
+rpcsvc_statedump(rpcsvc_t *svc)
+{
+ rpcsvc_program_t *prog = NULL;
+ int ret = 0;
+ ret = pthread_rwlock_tryrdlock(&svc->rpclock);
+ if (ret)
+ return;
+ {
+ list_for_each_entry(prog, &svc->programs, program)
+ {
+ rpcsvc_program_dump(prog);
+ }
+ }
+ pthread_rwlock_unlock(&svc->rpclock);
+}
+
+static rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = {
+ [GF_DUMP_NULL] = {"NULL", NULL, NULL, GF_DUMP_NULL, DRC_NA, 0},
+ [GF_DUMP_DUMP] = {"DUMP", rpcsvc_dump, NULL, GF_DUMP_DUMP, DRC_NA, 0},
+ [GF_DUMP_PING] = {"PING", rpcsvc_ping, NULL, GF_DUMP_PING, DRC_NA, 0},
};
-struct rpcsvc_program gluster_dump_prog = {
+static struct rpcsvc_program gluster_dump_prog = {
.progname = "GF-DUMP",
.prognum = GLUSTER_DUMP_PROGRAM,
.progver = GLUSTER_DUMP_VERSION,
diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h
index d9052392d47..7b3030926c8 100644
--- a/rpc/rpc-lib/src/rpcsvc.h
+++ b/rpc/rpc-lib/src/rpcsvc.h
@@ -22,6 +22,7 @@
#include <inttypes.h>
#include <rpc/rpc_msg.h>
#include <glusterfs/compat.h>
+#include <glusterfs/client_t.h>
#ifndef MAX_IOVEC
#define MAX_IOVEC 16
@@ -142,12 +143,6 @@ struct rpcsvc_config {
int max_block_size;
};
-typedef struct rpcsvc_auth_data {
- int flavour;
- int datalen;
- char authdata[GF_MAX_AUTH_BYTES];
-} rpcsvc_auth_data_t;
-
#define rpcsvc_auth_flavour(au) ((au).flavour)
typedef struct drc_client drc_client_t;
@@ -195,24 +190,11 @@ struct rpcsvc_request {
* by the program actors. This is the buffer that will need to
* be de-xdred by the actor.
*/
- struct iovec msg[MAX_IOVEC];
int count;
+ struct iovec msg[MAX_IOVEC];
struct iobref *iobref;
- /* Status of the RPC call, whether it was accepted or denied. */
- int rpc_status;
-
- /* In case, the call was denied, the RPC error is stored here
- * till the reply is sent.
- */
- int rpc_err;
-
- /* In case the failure happened because of an authentication problem
- * , this value needs to be assigned the correct auth error number.
- */
- int auth_err;
-
/* There can be cases of RPC requests where the reply needs to
* be built from multiple sources. E.g. where even the NFS reply
* can contain a payload, as in the NFSv3 read reply. Here the RPC header
@@ -228,14 +210,14 @@ struct rpcsvc_request {
size_t payloadsize;
/* The credentials extracted from the rpc request */
- rpcsvc_auth_data_t cred;
+ client_auth_data_t cred;
/* The verified extracted from the rpc request. In request side
* processing this contains the verifier sent by the client, on reply
* side processing, it is filled with the verified that will be
* sent to the client.
*/
- rpcsvc_auth_data_t verf;
+ client_auth_data_t verf;
/* Container for a RPC program wanting to store a temp
* request-specific item.
*/
@@ -250,6 +232,19 @@ struct rpcsvc_request {
/* request queue in rpcsvc */
struct list_head request_list;
+ /* Status of the RPC call, whether it was accepted or denied. */
+ int rpc_status;
+
+ /* In case, the call was denied, the RPC error is stored here
+ * till the reply is sent.
+ */
+ int rpc_err;
+
+ /* In case the failure happened because of an authentication problem
+ * , this value needs to be assigned the correct auth error number.
+ */
+ int auth_err;
+
/* Things passed to rpc layer from client */
/* @flags: Can be used for binary data passed in xdata to be
@@ -269,6 +264,8 @@ struct rpcsvc_request {
gf_boolean_t ownthread;
gf_boolean_t synctask;
+ struct timespec begin; /*req handling start time*/
+ struct timespec end; /*req handling end time*/
};
#define rpcsvc_request_program(req) ((rpcsvc_program_t *)((req)->prog))
@@ -362,7 +359,6 @@ typedef void (*rpcsvc_deallocate_reply)(void *msg);
*/
typedef struct rpcsvc_actor_desc {
char procname[RPCSVC_NAME_MAX];
- int procnum;
rpcsvc_actor actor;
/* Handler for cases where the RPC requests fragments are large enough
@@ -375,18 +371,20 @@ typedef struct rpcsvc_actor_desc {
*/
rpcsvc_vector_sizer vector_sizer;
+ int procnum;
+
/* Can actor be ran on behalf an unprivileged requestor? */
- gf_boolean_t unprivileged;
drc_op_type_t op_type;
+ gf_boolean_t unprivileged;
} rpcsvc_actor_t;
typedef struct rpcsvc_request_queue {
- int gen;
struct list_head request_queue;
pthread_mutex_t queue_lock;
pthread_cond_t queue_cond;
pthread_t thread;
struct rpcsvc_program *program;
+ int gen;
gf_boolean_t waiting;
} rpcsvc_request_queue_t;
@@ -421,10 +419,9 @@ struct rpcsvc_program {
int numactors; /* Num actors in actor array */
int proghighvers; /* Highest ver for program
supported by the system. */
- int proglowvers; /* Lowest ver */
-
/* Program specific state handed to actors */
void *private;
+ gf_latency_t *latencies; /*Tracks latency statistics for the rpc call*/
/* This upcall is provided by the program during registration.
* It is used to notify the program about events like connection being
@@ -434,6 +431,8 @@ struct rpcsvc_program {
*/
rpcsvc_notify_t notify;
+ int proglowvers; /* Lowest ver */
+
/* An integer that identifies the min auth strength that is required
* by this protocol, for eg. MOUNT3 needs AUTH_UNIX at least.
* See RFC 1813, Section 5.2.1.
@@ -443,7 +442,6 @@ struct rpcsvc_program {
/* list member to link to list of registered services with rpcsvc */
struct list_head program;
rpcsvc_request_queue_t request_queue[EVENT_MAX_THREADS];
- char request_queue_status[EVENT_MAX_THREADS / 8 + 1];
pthread_mutex_t thr_lock;
pthread_cond_t thr_cond;
int threadcount;
@@ -462,6 +460,7 @@ struct rpcsvc_program {
gf_boolean_t alive;
gf_boolean_t synctask;
+ unsigned long request_queue_status[EVENT_MAX_THREADS / __BITS_PER_LONG];
};
typedef struct rpcsvc_cbk_program {
@@ -593,9 +592,9 @@ typedef struct rpcsvc_auth_ops {
typedef struct rpcsvc_auth_flavour_desc {
char authname[RPCSVC_NAME_MAX];
- int authnum;
rpcsvc_auth_ops_t *authops;
void *authprivate;
+ int authnum;
} rpcsvc_auth_t;
typedef void *(*rpcsvc_auth_initer_t)(rpcsvc_t *svc, dict_t *options);
@@ -690,4 +689,6 @@ rpcsvc_autoscale_threads(glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr);
extern int
rpcsvc_destroy(rpcsvc_t *svc);
+void
+rpcsvc_statedump(rpcsvc_t *svc);
#endif
diff --git a/rpc/rpc-lib/src/xdr-common.h b/rpc/rpc-lib/src/xdr-common.h
index 7b0bc36ec64..752736b3d4d 100644
--- a/rpc/rpc-lib/src/xdr-common.h
+++ b/rpc/rpc-lib/src/xdr-common.h
@@ -66,11 +66,9 @@ enum gf_dump_procnum {
#ifdef GF_LINUX_HOST_OS
#define xdr_u_int32_t xdr_uint32_t
#define xdr_u_int64_t xdr_uint64_t
-#ifdef IPV6_DEFAULT
unsigned long
xdr_sizeof(xdrproc_t func, void *data);
#endif
-#endif
#ifdef GF_DARWIN_HOST_OS
#define xdr_u_quad_t xdr_u_int64_t
diff --git a/rpc/rpc-transport/Makefile.am b/rpc/rpc-transport/Makefile.am
index 221fd640514..7dd9f026cfc 100644
--- a/rpc/rpc-transport/Makefile.am
+++ b/rpc/rpc-transport/Makefile.am
@@ -1 +1 @@
-SUBDIRS = socket $(RDMA_SUBDIR)
+SUBDIRS = socket
diff --git a/rpc/rpc-transport/rdma/Makefile.am b/rpc/rpc-transport/rdma/Makefile.am
deleted file mode 100644
index f963effea22..00000000000
--- a/rpc/rpc-transport/rdma/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = src \ No newline at end of file
diff --git a/rpc/rpc-transport/rdma/src/Makefile.am b/rpc/rpc-transport/rdma/src/Makefile.am
deleted file mode 100644
index 40b5a19d3d1..00000000000
--- a/rpc/rpc-transport/rdma/src/Makefile.am
+++ /dev/null
@@ -1,24 +0,0 @@
-# TODO : need to change transportdir
-
-transport_LTLIBRARIES = rdma.la
-transportdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport
-
-rdma_la_LDFLAGS = -module -avoid-version -nostartfiles
-
-rdma_la_SOURCES = rdma.c name.c
-rdma_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
- $(top_builddir)/rpc/xdr/src/libgfxdr.la \
- $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- -libverbs -lrdmacm
-
-noinst_HEADERS = rdma.h name.h rpc-trans-rdma-messages.h
-
-AM_CPPFLAGS = $(GF_CPPFLAGS) \
- -I$(top_srcdir)/libglusterfs/src \
- -I$(top_srcdir)/rpc/rpc-lib/src/ \
- -I$(top_srcdir)/rpc/xdr/src \
- -I$(top_builddir)/rpc/xdr/src
-
-AM_CFLAGS = -Wall $(GF_CFLAGS)
-
-CLEANFILES = *~
diff --git a/rpc/rpc-transport/rdma/src/name.c b/rpc/rpc-transport/rdma/src/name.c
deleted file mode 100644
index 47184d9f148..00000000000
--- a/rpc/rpc-transport/rdma/src/name.c
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <errno.h>
-#include <netdb.h>
-#include <string.h>
-#include <rdma/rdma_cma.h>
-
-#ifndef AF_INET_SDP
-#define AF_INET_SDP 27
-#endif
-
-#include "rpc-transport.h"
-#include "rdma.h"
-#include <glusterfs/common-utils.h>
-#include "rpc-lib-messages.h"
-#include "rpc-trans-rdma-messages.h"
-
-int32_t
-gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
- struct addrinfo **addr_info);
-
-static void
-_assign_port(struct sockaddr *sockaddr, uint16_t port)
-{
- switch (sockaddr->sa_family) {
- case AF_INET6:
- ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons(port);
- break;
-
- case AF_INET_SDP:
- case AF_INET:
- ((struct sockaddr_in *)sockaddr)->sin_port = htons(port);
- break;
- }
-}
-
-static int32_t
-af_inet_bind_to_port_lt_ceiling(struct rdma_cm_id *cm_id,
- struct sockaddr *sockaddr,
- socklen_t sockaddr_len, uint32_t ceiling)
-{
-#if GF_DISABLE_PRIVPORT_TRACKING
- _assign_port(sockaddr, 0);
- return rdma_bind_addr(cm_id, sockaddr);
-#else
- int32_t ret = -1;
- uint16_t port = ceiling - 1;
- unsigned char ports[GF_PORT_ARRAY_SIZE] = {
- 0,
- };
- int i = 0;
-
-loop:
- ret = gf_process_reserved_ports(ports, ceiling);
-
- while (port) {
- if (port == GF_CLIENT_PORT_CEILING) {
- ret = -1;
- break;
- }
-
- /* ignore the reserved ports */
- if (BIT_VALUE(ports, port)) {
- port--;
- continue;
- }
-
- _assign_port(sockaddr, port);
-
- ret = rdma_bind_addr(cm_id, sockaddr);
-
- if (ret == 0)
- break;
-
- if (ret == -1 && errno == EACCES)
- break;
-
- port--;
- }
-
- /* In case if all the secure ports are exhausted, we are no more
- * binding to secure ports, hence instead of getting a random
- * port, lets define the range to restrict it from getting from
- * ports reserved for bricks i.e from range of 49152 - 65535
- * which further may lead to port clash */
- if (!port) {
- ceiling = port = GF_CLNT_INSECURE_PORT_CEILING;
- for (i = 0; i <= ceiling; i++)
- BIT_CLEAR(ports, i);
- goto loop;
- }
-
- return ret;
-#endif /* GF_DISABLE_PRIVPORT_TRACKING */
-}
-
-#if 0
-static int32_t
-af_unix_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr,
- socklen_t sockaddr_len, struct rdma_cm_id *cm_id)
-{
- data_t *path_data = NULL;
- struct sockaddr_un *addr = NULL;
- int32_t ret = -1;
-
- path_data = dict_get (this->options,
- "transport.rdma.bind-path");
- if (path_data) {
- char *path = data_to_str (path_data);
- if (!path || strlen (path) > UNIX_PATH_MAX) {
- gf_msg_debug (this->name, 0,
- "transport.rdma.bind-path not specified "
- "for unix socket, letting connect to "
- "assign default value");
- goto err;
- }
-
- addr = (struct sockaddr_un *) sockaddr;
- strcpy (addr->sun_path, path);
- ret = bind (sock, (struct sockaddr *)addr, sockaddr_len);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- TRANS_MSG_SOCKET_BIND_ERROR,
- "cannot bind to unix-domain socket %d ",
- sock);
- goto err;
- }
- }
-
-err:
- return ret;
-}
-#endif
-
-static int32_t
-client_fill_address_family(rpc_transport_t *this, struct sockaddr *sockaddr)
-{
- data_t *address_family_data = NULL;
-
- address_family_data = dict_get(this->options, "transport.address-family");
- if (!address_family_data) {
- data_t *remote_host_data = NULL, *connect_path_data = NULL;
- remote_host_data = dict_get(this->options, "remote-host");
- connect_path_data = dict_get(this->options,
- "transport.rdma.connect-path");
-
- if (!(remote_host_data || connect_path_data) ||
- (remote_host_data && connect_path_data)) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- TRANS_MSG_ADDR_FAMILY_NOT_SPECIFIED,
- "address-family not specified and not able to "
- "determine the same from other options "
- "(remote-host:%s and connect-path:%s)",
- data_to_str(remote_host_data),
- data_to_str(connect_path_data));
- return -1;
- }
-
- if (remote_host_data) {
- gf_msg_debug(this->name, 0,
- "address-family not "
- "specified, guessing it to be "
- "inet/inet6");
- sockaddr->sa_family = AF_UNSPEC;
- } else {
- gf_msg_debug(this->name, 0,
- "address-family not "
- "specified, guessing it to be unix");
- sockaddr->sa_family = AF_UNIX;
- }
-
- } else {
- char *address_family = data_to_str(address_family_data);
- if (!strcasecmp(address_family, "unix")) {
- sockaddr->sa_family = AF_UNIX;
- } else if (!strcasecmp(address_family, "inet")) {
- sockaddr->sa_family = AF_INET;
- } else if (!strcasecmp(address_family, "inet6")) {
- sockaddr->sa_family = AF_INET6;
- } else if (!strcasecmp(address_family, "inet-sdp")) {
- sockaddr->sa_family = AF_INET_SDP;
- } else {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY,
- "unknown address-family (%s) specified", address_family);
- sockaddr->sa_family = AF_UNSPEC;
- return -1;
- }
- }
-
- return 0;
-}
-
-static int32_t
-af_inet_client_get_remote_sockaddr(rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t *sockaddr_len, int16_t remote_port)
-{
- dict_t *options = this->options;
- data_t *remote_host_data = NULL;
- data_t *remote_port_data = NULL;
- char *remote_host = NULL;
- struct addrinfo *addr_info = NULL;
- int32_t ret = 0;
-
- remote_host_data = dict_get(options, "remote-host");
- if (remote_host_data == NULL) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_REMOTE_HOST_ERROR,
- "option remote-host "
- "missing in volume %s",
- this->name);
- ret = -1;
- goto err;
- }
-
- remote_host = data_to_str(remote_host_data);
- if (remote_host == NULL) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_REMOTE_HOST_ERROR,
- "option remote-host "
- "has data NULL in volume %s",
- this->name);
- ret = -1;
- goto err;
- }
-
- if (remote_port == 0) {
- remote_port_data = dict_get(options, "remote-port");
- if (remote_port_data == NULL) {
- gf_msg_debug(this->name, 0,
- "option remote-port "
- "missing in volume %s. Defaulting to %d",
- this->name, GF_DEFAULT_RDMA_LISTEN_PORT);
-
- remote_port = GF_DEFAULT_RDMA_LISTEN_PORT;
- } else {
- remote_port = data_to_uint16(remote_port_data);
- }
- }
-
- if (remote_port == -1) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, RDMA_MSG_INVALID_ENTRY,
- "option remote-port has "
- "invalid port in volume %s",
- this->name);
- ret = -1;
- goto err;
- }
-
- /* TODO: gf_resolve is a blocking call. kick in some
- non blocking dns techniques */
- ret = gf_resolve_ip6(remote_host, remote_port, sockaddr->sa_family,
- &this->dnscache, &addr_info);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_DNS_RESOL_FAILED,
- "DNS resolution failed on host %s", remote_host);
- goto err;
- }
-
- memcpy(sockaddr, addr_info->ai_addr, addr_info->ai_addrlen);
- *sockaddr_len = addr_info->ai_addrlen;
-
-err:
- return ret;
-}
-
-static int32_t
-af_unix_client_get_remote_sockaddr(rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t *sockaddr_len)
-{
- struct sockaddr_un *sockaddr_un = NULL;
- char *connect_path = NULL;
- data_t *connect_path_data = NULL;
- int32_t ret = 0;
-
- connect_path_data = dict_get(this->options, "transport.rdma.connect-path");
- if (!connect_path_data) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_CONNECT_PATH_ERROR,
- "option "
- "transport.rdma.connect-path not specified for "
- "address-family unix");
- ret = -1;
- goto err;
- }
-
- connect_path = data_to_str(connect_path_data);
- if (!connect_path) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, RDMA_MSG_INVALID_ENTRY,
- "connect-path is null-string");
- ret = -1;
- goto err;
- }
-
- if (strlen(connect_path) > UNIX_PATH_MAX) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_CONNECT_PATH_ERROR,
- "connect-path value length %" GF_PRI_SIZET
- " > "
- "%d octets",
- strlen(connect_path), UNIX_PATH_MAX);
- ret = -1;
- goto err;
- }
-
- gf_msg_debug(this->name, 0, "using connect-path %s", connect_path);
- sockaddr_un = (struct sockaddr_un *)sockaddr;
- strcpy(sockaddr_un->sun_path, connect_path);
- *sockaddr_len = sizeof(struct sockaddr_un);
-
-err:
- return ret;
-}
-
-static int32_t
-af_unix_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
- socklen_t *addr_len)
-{
- data_t *listen_path_data = NULL;
- char *listen_path = NULL;
- int32_t ret = 0;
- struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
-
- listen_path_data = dict_get(this->options, "transport.rdma.listen-path");
- if (!listen_path_data) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_LISTEN_PATH_ERROR,
- "missing option listen-path");
- ret = -1;
- goto err;
- }
-
- listen_path = data_to_str(listen_path_data);
-
-#ifndef UNIX_PATH_MAX
-#define UNIX_PATH_MAX 108
-#endif
-
- if (strlen(listen_path) > UNIX_PATH_MAX) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_LISTEN_PATH_ERROR,
- "option listen-path has "
- "value length %" GF_PRI_SIZET " > %d",
- strlen(listen_path), UNIX_PATH_MAX);
- ret = -1;
- goto err;
- }
-
- sunaddr->sun_family = AF_UNIX;
- strcpy(sunaddr->sun_path, listen_path);
- *addr_len = sizeof(struct sockaddr_un);
-
-err:
- return ret;
-}
-
-static int32_t
-af_inet_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
- socklen_t *addr_len)
-{
- struct addrinfo hints, *res = 0;
- data_t *listen_port_data = NULL, *listen_host_data = NULL;
- uint16_t listen_port = 0;
- char service[NI_MAXSERV], *listen_host = NULL;
- dict_t *options = NULL;
- int32_t ret = 0;
-
- options = this->options;
-
- listen_port_data = dict_get(options, "transport.rdma.listen-port");
- if (listen_port_data) {
- listen_port = data_to_uint16(listen_port_data);
- }
-
- listen_host_data = dict_get(options, "transport.rdma.bind-address");
- if (listen_host_data) {
- listen_host = data_to_str(listen_host_data);
- } else {
- if (addr->sa_family == AF_INET6) {
- struct sockaddr_in6 *in = (struct sockaddr_in6 *)addr;
- in->sin6_addr = in6addr_any;
- in->sin6_port = htons(listen_port);
- *addr_len = sizeof(struct sockaddr_in6);
- goto out;
- } else if (addr->sa_family == AF_INET) {
- struct sockaddr_in *in = (struct sockaddr_in *)addr;
- in->sin_addr.s_addr = htonl(INADDR_ANY);
- in->sin_port = htons(listen_port);
- *addr_len = sizeof(struct sockaddr_in);
- goto out;
- }
- }
-
- sprintf(service, "%d", listen_port);
-
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = addr->sa_family;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_flags = AI_ADDRCONFIG | AI_PASSIVE;
-
- ret = getaddrinfo(listen_host, service, &hints, &res);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, ret, TRANS_MSG_GET_ADDR_INFO_FAILED,
- "getaddrinfo failed for host %s, service %s", listen_host,
- service);
- ret = -1;
- goto out;
- }
-
- memcpy(addr, res->ai_addr, res->ai_addrlen);
- *addr_len = res->ai_addrlen;
-
- freeaddrinfo(res);
-
-out:
- return ret;
-}
-
-int32_t
-gf_rdma_client_bind(rpc_transport_t *this, struct sockaddr *sockaddr,
- socklen_t *sockaddr_len, struct rdma_cm_id *cm_id)
-{
- int ret = 0;
-
- *sockaddr_len = sizeof(struct sockaddr_in6);
- switch (sockaddr->sa_family) {
- case AF_INET_SDP:
- case AF_INET:
- *sockaddr_len = sizeof(struct sockaddr_in);
- /* Fall through */
- case AF_INET6:
- if (!this->bind_insecure) {
- ret = af_inet_bind_to_port_lt_ceiling(
- cm_id, sockaddr, *sockaddr_len, GF_CLIENT_PORT_CEILING);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_WARNING, errno,
- RDMA_MSG_PORT_BIND_FAILED,
- "cannot bind rdma_cm_id to port "
- "less than %d",
- GF_CLIENT_PORT_CEILING);
- }
- } else {
- ret = af_inet_bind_to_port_lt_ceiling(
- cm_id, sockaddr, *sockaddr_len, GF_IANA_PRIV_PORTS_START);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_WARNING, errno,
- RDMA_MSG_PORT_BIND_FAILED,
- "cannot bind rdma_cm_id to port "
- "less than %d",
- GF_IANA_PRIV_PORTS_START);
- }
- }
- break;
-
- case AF_UNIX:
- *sockaddr_len = sizeof(struct sockaddr_un);
-#if 0
- ret = af_unix_client_bind (this, (struct sockaddr *)sockaddr,
- *sockaddr_len, sock);
-#endif
- break;
-
- default:
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY,
- "unknown address family %d", sockaddr->sa_family);
- ret = -1;
- break;
- }
-
- return ret;
-}
-
-int32_t
-gf_rdma_client_get_remote_sockaddr(rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t *sockaddr_len, int16_t remote_port)
-{
- int32_t ret = 0;
- char is_inet_sdp = 0;
-
- ret = client_fill_address_family(this, sockaddr);
- if (ret) {
- ret = -1;
- goto err;
- }
-
- switch (sockaddr->sa_family) {
- case AF_INET_SDP:
- sockaddr->sa_family = AF_INET;
- is_inet_sdp = 1;
- /* Fall through */
- case AF_INET:
- case AF_INET6:
- case AF_UNSPEC:
- ret = af_inet_client_get_remote_sockaddr(this, sockaddr,
- sockaddr_len, remote_port);
-
- if (is_inet_sdp) {
- sockaddr->sa_family = AF_INET_SDP;
- }
-
- break;
-
- case AF_UNIX:
- ret = af_unix_client_get_remote_sockaddr(this, sockaddr,
- sockaddr_len);
- break;
-
- default:
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY,
- "unknown address-family %d", sockaddr->sa_family);
- ret = -1;
- }
-
-err:
- return ret;
-}
-
-int32_t
-gf_rdma_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
- socklen_t *addr_len)
-{
- data_t *address_family_data = NULL;
- int32_t ret = 0;
- char is_inet_sdp = 0;
-
- address_family_data = dict_get(this->options, "transport.address-family");
- if (address_family_data) {
- char *address_family = NULL;
- address_family = data_to_str(address_family_data);
-
- if (!strcasecmp(address_family, "inet")) {
- addr->sa_family = AF_INET;
- } else if (!strcasecmp(address_family, "inet6")) {
- addr->sa_family = AF_INET6;
- } else if (!strcasecmp(address_family, "inet-sdp")) {
- addr->sa_family = AF_INET_SDP;
- } else if (!strcasecmp(address_family, "unix")) {
- addr->sa_family = AF_UNIX;
- } else {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY,
- "unknown address"
- " family (%s) specified",
- address_family);
- addr->sa_family = AF_UNSPEC;
- ret = -1;
- goto err;
- }
- } else {
- gf_msg_debug(this->name, 0,
- "option address-family not "
- "specified, defaulting to inet");
- addr->sa_family = AF_INET;
- }
-
- switch (addr->sa_family) {
- case AF_INET_SDP:
- is_inet_sdp = 1;
- addr->sa_family = AF_INET;
- /* Fall through */
- case AF_INET:
- case AF_INET6:
- case AF_UNSPEC:
- ret = af_inet_server_get_local_sockaddr(this, addr, addr_len);
- if (is_inet_sdp && !ret) {
- addr->sa_family = AF_INET_SDP;
- }
- break;
-
- case AF_UNIX:
- ret = af_unix_server_get_local_sockaddr(this, addr, addr_len);
- break;
- }
-
-err:
- return ret;
-}
-
-int32_t
-fill_inet6_inet_identifiers(rpc_transport_t *this,
- struct sockaddr_storage *addr, int32_t addr_len,
- char *identifier)
-{
- int32_t ret = 0, tmpaddr_len = 0;
- char service[NI_MAXSERV], host[NI_MAXHOST];
- union gf_sock_union sock_union;
-
- memset(&sock_union, 0, sizeof(sock_union));
- sock_union.storage = *addr;
- tmpaddr_len = addr_len;
-
- if (sock_union.sa.sa_family == AF_INET6) {
- int32_t one_to_four, four_to_eight, twelve_to_sixteen;
- int16_t eight_to_ten, ten_to_twelve;
-
- one_to_four = sock_union.sin6.sin6_addr.s6_addr32[0];
- four_to_eight = sock_union.sin6.sin6_addr.s6_addr32[1];
-#ifdef GF_SOLARIS_HOST_OS
- eight_to_ten = S6_ADDR16(sock_union.sin6.sin6_addr)[4];
- ten_to_twelve = S6_ADDR16(sock_union.sin6.sin6_addr)[5];
-#else
- eight_to_ten = sock_union.sin6.sin6_addr.s6_addr16[4];
- ten_to_twelve = sock_union.sin6.sin6_addr.s6_addr16[5];
-#endif
-
- twelve_to_sixteen = sock_union.sin6.sin6_addr.s6_addr32[3];
-
- /* ipv4 mapped ipv6 address has
- bits 0-80: 0
- bits 80-96: 0xffff
- bits 96-128: ipv4 address
- */
-
- if (one_to_four == 0 && four_to_eight == 0 && eight_to_ten == 0 &&
- ten_to_twelve == -1) {
- struct sockaddr_in *in_ptr = &sock_union.sin;
- memset(&sock_union, 0, sizeof(sock_union));
-
- in_ptr->sin_family = AF_INET;
- in_ptr->sin_port = ((struct sockaddr_in6 *)addr)->sin6_port;
- in_ptr->sin_addr.s_addr = twelve_to_sixteen;
- tmpaddr_len = sizeof(*in_ptr);
- }
- }
-
- ret = getnameinfo(&sock_union.sa, tmpaddr_len, host, sizeof(host), service,
- sizeof(service), NI_NUMERICHOST | NI_NUMERICSERV);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, ret, TRANS_MSG_GET_NAME_INFO_FAILED,
- "getnameinfo failed");
- }
-
- sprintf(identifier, "%s:%s", host, service);
-
- return ret;
-}
-
-int32_t
-gf_rdma_get_transport_identifiers(rpc_transport_t *this)
-{
- int32_t ret = 0;
- char is_inet_sdp = 0;
-
- switch (((struct sockaddr *)&this->myinfo.sockaddr)->sa_family) {
- case AF_INET_SDP:
- is_inet_sdp = 1;
- ((struct sockaddr *)&this->peerinfo.sockaddr)
- ->sa_family = ((struct sockaddr *)&this->myinfo.sockaddr)
- ->sa_family = AF_INET;
- /* Fall through */
- case AF_INET:
- case AF_INET6: {
- ret = fill_inet6_inet_identifiers(this, &this->myinfo.sockaddr,
- this->myinfo.sockaddr_len,
- this->myinfo.identifier);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_INET_ERROR,
- "can't fill inet/inet6 identifier for server");
- goto err;
- }
-
- ret = fill_inet6_inet_identifiers(this, &this->peerinfo.sockaddr,
- this->peerinfo.sockaddr_len,
- this->peerinfo.identifier);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_INET_ERROR,
- "can't fill inet/inet6 identifier for client");
- goto err;
- }
-
- if (is_inet_sdp) {
- ((struct sockaddr *)&this->peerinfo.sockaddr)
- ->sa_family = ((struct sockaddr *)&this->myinfo.sockaddr)
- ->sa_family = AF_INET_SDP;
- }
- } break;
-
- case AF_UNIX: {
- struct sockaddr_un *sunaddr = NULL;
-
- sunaddr = (struct sockaddr_un *)&this->myinfo.sockaddr;
- strcpy(this->myinfo.identifier, sunaddr->sun_path);
-
- sunaddr = (struct sockaddr_un *)&this->peerinfo.sockaddr;
- strcpy(this->peerinfo.identifier, sunaddr->sun_path);
- } break;
-
- default:
- gf_msg(this->name, GF_LOG_ERROR, 0, TRANS_MSG_UNKNOWN_ADDR_FAMILY,
- "unknown address family (%d)",
- ((struct sockaddr *)&this->myinfo.sockaddr)->sa_family);
- ret = -1;
- break;
- }
-
-err:
- return ret;
-}
diff --git a/rpc/rpc-transport/rdma/src/name.h b/rpc/rpc-transport/rdma/src/name.h
deleted file mode 100644
index 41c4c6a7e8e..00000000000
--- a/rpc/rpc-transport/rdma/src/name.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef _IB_VERBS_NAME_H
-#define _IB_VERBS_NAME_H
-
-#include <rdma/rdma_cma.h>
-
-#include <glusterfs/compat.h>
-
-int32_t
-gf_rdma_client_bind(rpc_transport_t *this, struct sockaddr *sockaddr,
- socklen_t *sockaddr_len, struct rdma_cm_id *cm_id);
-
-int32_t
-gf_rdma_client_get_remote_sockaddr(rpc_transport_t *this,
- struct sockaddr *sockaddr,
- socklen_t *sockaddr_len,
- int16_t remote_port);
-
-int32_t
-gf_rdma_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
- socklen_t *addr_len);
-
-int32_t
-gf_rdma_get_transport_identifiers(rpc_transport_t *this);
-
-#endif /* _IB_VERBS_NAME_H */
diff --git a/rpc/rpc-transport/rdma/src/rdma.c b/rpc/rpc-transport/rdma/src/rdma.c
deleted file mode 100644
index 61e6eea1d49..00000000000
--- a/rpc/rpc-transport/rdma/src/rdma.c
+++ /dev/null
@@ -1,4912 +0,0 @@
-/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include <glusterfs/dict.h>
-#include <glusterfs/glusterfs.h>
-#include <glusterfs/iobuf.h>
-#include <glusterfs/logging.h>
-#include "rdma.h"
-#include "name.h"
-#include <glusterfs/byte-order.h>
-#include <glusterfs/xlator.h>
-#include "xdr-rpc.h"
-#include "rpc-lib-messages.h"
-#include "rpc-trans-rdma-messages.h"
-#include <signal.h>
-
-#define GF_RDMA_LOG_NAME "rpc-transport/rdma"
-
-static int32_t
-__gf_rdma_ioq_churn(gf_rdma_peer_t *peer);
-
-gf_rdma_post_t *
-gf_rdma_post_ref(gf_rdma_post_t *post);
-
-int
-gf_rdma_post_unref(gf_rdma_post_t *post);
-
-static void *
-gf_rdma_send_completion_proc(void *data);
-
-static void *
-gf_rdma_recv_completion_proc(void *data);
-
-void *
-gf_rdma_async_event_thread(void *context);
-
-static int32_t
-gf_rdma_create_qp(rpc_transport_t *this);
-
-static int32_t
-__gf_rdma_teardown(rpc_transport_t *this);
-
-static int32_t
-gf_rdma_teardown(rpc_transport_t *this);
-
-static int32_t
-gf_rdma_disconnect(rpc_transport_t *this, gf_boolean_t wait);
-
-static void
-gf_rdma_cm_handle_disconnect(rpc_transport_t *this);
-
-static int
-gf_rdma_cm_handle_connect_init(struct rdma_cm_event *event);
-
-static void
-gf_rdma_put_post(gf_rdma_queue_t *queue, gf_rdma_post_t *post)
-{
- post->ctx.is_request = 0;
-
- pthread_mutex_lock(&queue->lock);
- {
- if (post->prev) {
- queue->active_count--;
- post->prev->next = post->next;
- }
-
- if (post->next) {
- post->next->prev = post->prev;
- }
-
- post->prev = &queue->passive_posts;
- post->next = post->prev->next;
- post->prev->next = post;
- post->next->prev = post;
- queue->passive_count++;
- }
- pthread_mutex_unlock(&queue->lock);
-}
-
-static gf_rdma_post_t *
-gf_rdma_new_post(rpc_transport_t *this, gf_rdma_device_t *device, int32_t len,
- gf_rdma_post_type_t type)
-{
- gf_rdma_post_t *post = NULL;
- int ret = -1;
-
- post = (gf_rdma_post_t *)GF_CALLOC(1, sizeof(*post),
- gf_common_mt_rdma_post_t);
- if (post == NULL) {
- goto out;
- }
-
- pthread_mutex_init(&post->lock, NULL);
-
- post->buf_size = len;
-
- post->buf = valloc(len);
- if (!post->buf) {
- gf_msg_nomem(GF_RDMA_LOG_NAME, GF_LOG_ERROR, len);
- goto out;
- }
-
- post->mr = ibv_reg_mr(device->pd, post->buf, post->buf_size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!post->mr) {
- gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_MR_ALOC_FAILED,
- "memory registration failed");
- goto out;
- }
-
- post->device = device;
- post->type = type;
-
- ret = 0;
-out:
- if (ret != 0 && post) {
- free(post->buf);
-
- GF_FREE(post);
- post = NULL;
- }
-
- return post;
-}
-
-static gf_rdma_post_t *
-gf_rdma_get_post(gf_rdma_queue_t *queue)
-{
- gf_rdma_post_t *post = NULL;
-
- pthread_mutex_lock(&queue->lock);
- {
- post = queue->passive_posts.next;
- if (post == &queue->passive_posts)
- post = NULL;
-
- if (post) {
- if (post->prev)
- post->prev->next = post->next;
- if (post->next)
- post->next->prev = post->prev;
- post->prev = &queue->active_posts;
- post->next = post->prev->next;
- post->prev->next = post;
- post->next->prev = post;
- post->reused++;
- queue->active_count++;
- }
- }
- pthread_mutex_unlock(&queue->lock);
-
- return post;
-}
-
-void
-gf_rdma_destroy_post(gf_rdma_post_t *post)
-{
- ibv_dereg_mr(post->mr);
- free(post->buf);
- GF_FREE(post);
-}
-
-static int32_t
-__gf_rdma_quota_get(gf_rdma_peer_t *peer)
-{
- int32_t ret = -1;
- gf_rdma_private_t *priv = NULL;
-
- priv = peer->trans->private;
-
- if (priv->connected && peer->quota > 0) {
- ret = peer->quota--;
- }
-
- return ret;
-}
-
-static void
-__gf_rdma_ioq_entry_free(gf_rdma_ioq_t *entry)
-{
- list_del_init(&entry->list);
-
- if (entry->iobref) {
- iobref_unref(entry->iobref);
- entry->iobref = NULL;
- }
-
- if (entry->msg.request.rsp_iobref) {
- iobref_unref(entry->msg.request.rsp_iobref);
- entry->msg.request.rsp_iobref = NULL;
- }
-
- mem_put(entry);
-}
-
-static void
-__gf_rdma_ioq_flush(gf_rdma_peer_t *peer)
-{
- gf_rdma_ioq_t *entry = NULL, *dummy = NULL;
-
- list_for_each_entry_safe(entry, dummy, &peer->ioq, list)
- {
- __gf_rdma_ioq_entry_free(entry);
- }
-}
-
-static int32_t
-__gf_rdma_disconnect(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
-
- priv = this->private;
-
- if (priv->connected) {
- rdma_disconnect(priv->peer.cm_id);
- }
-
- return 0;
-}
-
-static void
-gf_rdma_queue_init(gf_rdma_queue_t *queue)
-{
- pthread_mutex_init(&queue->lock, NULL);
-
- queue->active_posts.next = &queue->active_posts;
- queue->active_posts.prev = &queue->active_posts;
- queue->passive_posts.next = &queue->passive_posts;
- queue->passive_posts.prev = &queue->passive_posts;
-}
-
-static void
-__gf_rdma_destroy_queue(gf_rdma_post_t *post)
-{
- gf_rdma_post_t *tmp = NULL;
-
- while (post->next != post) {
- tmp = post->next;
-
- post->next = post->next->next;
- post->next->prev = post;
-
- gf_rdma_destroy_post(tmp);
- }
-}
-
-static void
-gf_rdma_destroy_queue(gf_rdma_queue_t *queue)
-{
- if (queue == NULL) {
- goto out;
- }
-
- pthread_mutex_lock(&queue->lock);
- {
- if (queue->passive_count > 0) {
- __gf_rdma_destroy_queue(&queue->passive_posts);
- queue->passive_count = 0;
- }
-
- if (queue->active_count > 0) {
- __gf_rdma_destroy_queue(&queue->active_posts);
- queue->active_count = 0;
- }
- }
- pthread_mutex_unlock(&queue->lock);
-
-out:
- return;
-}
-
-static void
-gf_rdma_destroy_posts(rpc_transport_t *this)
-{
- gf_rdma_device_t *device = NULL;
- gf_rdma_private_t *priv = NULL;
-
- if (this == NULL) {
- goto out;
- }
-
- priv = this->private;
- device = priv->device;
-
- gf_rdma_destroy_queue(&device->sendq);
- gf_rdma_destroy_queue(&device->recvq);
-
-out:
- return;
-}
-
-static int32_t
-__gf_rdma_create_posts(rpc_transport_t *this, int32_t count, int32_t size,
- gf_rdma_queue_t *q, gf_rdma_post_type_t type)
-{
- int32_t i = 0;
- int32_t ret = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- for (i = 0; i < count; i++) {
- gf_rdma_post_t *post = NULL;
-
- post = gf_rdma_new_post(this, device, size + 2048, type);
- if (!post) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_POST_CREATION_FAILED,
- "post creation failed");
- ret = -1;
- break;
- }
-
- gf_rdma_put_post(q, post);
- }
- return ret;
-}
-
-static int32_t
-gf_rdma_post_recv(struct ibv_srq *srq, gf_rdma_post_t *post)
-{
- struct ibv_sge list = {.addr = (unsigned long)post->buf,
- .length = post->buf_size,
- .lkey = post->mr->lkey};
-
- struct ibv_recv_wr wr =
- {
- .wr_id = (unsigned long)post,
- .sg_list = &list,
- .num_sge = 1,
- },
- *bad_wr;
-
- gf_rdma_post_ref(post);
-
- return ibv_post_srq_recv(srq, &wr, &bad_wr);
-}
-
-static void
-gf_rdma_deregister_iobuf_pool(gf_rdma_device_t *device)
-{
- gf_rdma_arena_mr *arena_mr = NULL;
- gf_rdma_arena_mr *tmp = NULL;
-
- while (device) {
- pthread_mutex_lock(&device->all_mr_lock);
- {
- if (!list_empty(&device->all_mr)) {
- list_for_each_entry_safe(arena_mr, tmp, &device->all_mr, list)
- {
- if (ibv_dereg_mr(arena_mr->mr)) {
- gf_msg("rdma", GF_LOG_WARNING, 0,
- RDMA_MSG_DEREGISTER_ARENA_FAILED,
- "deallocation of memory region "
- "failed");
- pthread_mutex_unlock(&device->all_mr_lock);
- return;
- }
- list_del(&arena_mr->list);
- GF_FREE(arena_mr);
- }
- }
- }
- pthread_mutex_unlock(&device->all_mr_lock);
-
- device = device->next;
- }
-}
-
-int
-gf_rdma_deregister_arena(struct list_head **mr_list,
- struct iobuf_arena *iobuf_arena)
-{
- gf_rdma_arena_mr *tmp = NULL;
- gf_rdma_arena_mr *dummy = NULL;
- gf_rdma_device_t *device = NULL;
- int count = 0, i = 0;
-
- count = iobuf_arena->iobuf_pool->rdma_device_count;
- for (i = 0; i < count; i++) {
- device = iobuf_arena->iobuf_pool->device[i];
- pthread_mutex_lock(&device->all_mr_lock);
- {
- list_for_each_entry_safe(tmp, dummy, mr_list[i], list)
- {
- if (tmp->iobuf_arena == iobuf_arena) {
- if (ibv_dereg_mr(tmp->mr)) {
- gf_msg("rdma", GF_LOG_WARNING, 0,
- RDMA_MSG_DEREGISTER_ARENA_FAILED,
- "deallocation of memory region "
- "failed");
- pthread_mutex_unlock(&device->all_mr_lock);
- return -1;
- }
- list_del(&tmp->list);
- GF_FREE(tmp);
- break;
- }
- }
- }
- pthread_mutex_unlock(&device->all_mr_lock);
- }
-
- return 0;
-}
-
-int
-gf_rdma_register_arena(void **arg1, void *arg2)
-{
- struct ibv_mr *mr = NULL;
- gf_rdma_arena_mr *new = NULL;
- struct iobuf_pool *iobuf_pool = NULL;
- gf_rdma_device_t **device = (gf_rdma_device_t **)arg1;
- struct iobuf_arena *iobuf_arena = arg2;
- int count = 0, i = 0;
-
- iobuf_pool = iobuf_arena->iobuf_pool;
- count = iobuf_pool->rdma_device_count;
- for (i = 0; i < count; i++) {
- new = GF_CALLOC(1, sizeof(gf_rdma_arena_mr),
- gf_common_mt_rdma_arena_mr);
- if (new == NULL) {
- gf_msg("rdma", GF_LOG_INFO, ENOMEM, RDMA_MSG_MR_ALOC_FAILED,
- "Out of "
- "memory: registering pre allocated buffer "
- "with rdma device failed.");
- return -1;
- }
- INIT_LIST_HEAD(&new->list);
- new->iobuf_arena = iobuf_arena;
-
- mr = ibv_reg_mr(device[i]->pd, iobuf_arena->mem_base,
- iobuf_arena->arena_size,
- IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE |
- IBV_ACCESS_REMOTE_WRITE);
- if (!mr)
- gf_msg("rdma", GF_LOG_WARNING, 0, RDMA_MSG_MR_ALOC_FAILED,
- "allocation of mr "
- "failed");
-
- new->mr = mr;
- pthread_mutex_lock(&device[i]->all_mr_lock);
- {
- list_add(&new->list, &device[i]->all_mr);
- }
- pthread_mutex_unlock(&device[i]->all_mr_lock);
- new = NULL;
- }
-
- return 0;
-}
-
-static void
-gf_rdma_register_iobuf_pool(gf_rdma_device_t *device,
- struct iobuf_pool *iobuf_pool)
-{
- struct iobuf_arena *tmp = NULL;
- struct iobuf_arena *dummy = NULL;
- struct ibv_mr *mr = NULL;
- gf_rdma_arena_mr *new = NULL;
-
- if (!list_empty(&iobuf_pool->all_arenas)) {
- list_for_each_entry_safe(tmp, dummy, &iobuf_pool->all_arenas, all_list)
- {
- new = GF_CALLOC(1, sizeof(gf_rdma_arena_mr),
- gf_common_mt_rdma_arena_mr);
- if (new == NULL) {
- gf_msg("rdma", GF_LOG_INFO, ENOMEM, RDMA_MSG_MR_ALOC_FAILED,
- "Out of "
- "memory: registering pre allocated "
- "buffer with rdma device failed.");
- return;
- }
- INIT_LIST_HEAD(&new->list);
- new->iobuf_arena = tmp;
-
- mr = ibv_reg_mr(device->pd, tmp->mem_base, tmp->arena_size,
- IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE |
- IBV_ACCESS_REMOTE_WRITE);
- if (!mr) {
- gf_msg("rdma", GF_LOG_WARNING, 0, RDMA_MSG_MR_ALOC_FAILED,
- "failed"
- " to pre register buffers with rdma "
- "devices.");
- }
- new->mr = mr;
- pthread_mutex_lock(&device->all_mr_lock);
- {
- list_add(&new->list, &device->all_mr);
- }
- pthread_mutex_unlock(&device->all_mr_lock);
-
- new = NULL;
- }
- }
-
- return;
-}
-
-static void
-gf_rdma_register_iobuf_pool_with_device(gf_rdma_device_t *device,
- struct iobuf_pool *iobuf_pool)
-{
- while (device) {
- gf_rdma_register_iobuf_pool(device, iobuf_pool);
- device = device->next;
- }
-}
-
-static struct ibv_mr *
-gf_rdma_get_pre_registred_mr(rpc_transport_t *this, void *ptr, int size)
-{
- gf_rdma_arena_mr *tmp = NULL;
- gf_rdma_arena_mr *dummy = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- pthread_mutex_lock(&device->all_mr_lock);
- {
- if (!list_empty(&device->all_mr)) {
- list_for_each_entry_safe(tmp, dummy, &device->all_mr, list)
- {
- if (tmp->iobuf_arena->mem_base <= ptr &&
- ptr < tmp->iobuf_arena->mem_base +
- tmp->iobuf_arena->arena_size) {
- pthread_mutex_unlock(&device->all_mr_lock);
- return tmp->mr;
- }
- }
- }
- }
- pthread_mutex_unlock(&device->all_mr_lock);
-
- return NULL;
-}
-
-static int32_t
-gf_rdma_create_posts(rpc_transport_t *this)
-{
- int32_t i = 0, ret = 0;
- gf_rdma_post_t *post = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- options = &priv->options;
- device = priv->device;
-
- ret = __gf_rdma_create_posts(this, options->send_count, options->send_size,
- &device->sendq, GF_RDMA_SEND_POST);
- if (!ret)
- ret = __gf_rdma_create_posts(this, options->recv_count,
- options->recv_size, &device->recvq,
- GF_RDMA_RECV_POST);
-
- if (!ret) {
- for (i = 0; i < options->recv_count; i++) {
- post = gf_rdma_get_post(&device->recvq);
- if (gf_rdma_post_recv(device->srq, post) != 0) {
- ret = -1;
- break;
- }
- }
- }
-
- if (ret)
- gf_rdma_destroy_posts(this);
-
- return ret;
-}
-
-static void
-gf_rdma_destroy_cq(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- if (device->recv_cq)
- ibv_destroy_cq(device->recv_cq);
- device->recv_cq = NULL;
-
- if (device->send_cq)
- ibv_destroy_cq(device->send_cq);
- device->send_cq = NULL;
-
- return;
-}
-
-static int32_t
-gf_rdma_create_cq(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
- uint64_t send_cqe = 0;
- int32_t ret = 0;
- struct ibv_device_attr device_attr = {
- {0},
- };
-
- priv = this->private;
- options = &priv->options;
- device = priv->device;
-
- device->recv_cq = ibv_create_cq(priv->device->context,
- options->recv_count * 2, device,
- device->recv_chan, 0);
- if (!device->recv_cq) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CQ_CREATION_FAILED,
- "creation of CQ for "
- "device %s failed",
- device->device_name);
- ret = -1;
- goto out;
- } else if (ibv_req_notify_cq(device->recv_cq, 0)) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_REQ_NOTIFY_CQ_REVQ_FAILED,
- "ibv_req_notify_"
- "cq on recv CQ of device %s failed",
- device->device_name);
- ret = -1;
- goto out;
- }
-
- do {
- ret = ibv_query_device(priv->device->context, &device_attr);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_QUERY_DEVICE_FAILED,
- "ibv_query_"
- "device on %s returned %d (%s)",
- priv->device->device_name, ret,
- (ret > 0) ? strerror(ret) : "");
- ret = -1;
- goto out;
- }
-
- send_cqe = (uint64_t)options->send_count * 128;
- send_cqe = (send_cqe > device_attr.max_cqe) ? device_attr.max_cqe
- : send_cqe;
-
- /* TODO: make send_cq size dynamically adaptive */
- device->send_cq = ibv_create_cq(priv->device->context, send_cqe, device,
- device->send_chan, 0);
- if (!device->send_cq) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CQ_CREATION_FAILED,
- "creation of send_cq "
- "for device %s failed",
- device->device_name);
- ret = -1;
- goto out;
- }
-
- if (ibv_req_notify_cq(device->send_cq, 0)) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- RDMA_MSG_REQ_NOTIFY_CQ_SENDQ_FAILED,
- "ibv_req_notify_cq on send_cq for device %s"
- " failed",
- device->device_name);
- ret = -1;
- goto out;
- }
- } while (0);
-
-out:
- if (ret != 0)
- gf_rdma_destroy_cq(this);
-
- return ret;
-}
-
-static gf_rdma_device_t *
-gf_rdma_get_device(rpc_transport_t *this, struct ibv_context *ibctx,
- char *device_name)
-{
- glusterfs_ctx_t *ctx = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- int32_t ret = 0;
- int32_t i = 0;
- gf_rdma_device_t *trav = NULL, *device = NULL;
- gf_rdma_ctx_t *rdma_ctx = NULL;
- struct iobuf_pool *iobuf_pool = NULL;
-
- priv = this->private;
- options = &priv->options;
- ctx = this->ctx;
- rdma_ctx = ctx->ib;
- iobuf_pool = ctx->iobuf_pool;
-
- trav = rdma_ctx->device;
-
- while (trav) {
- if (!strcmp(trav->device_name, device_name))
- break;
- trav = trav->next;
- }
-
- if (!trav) {
- trav = GF_CALLOC(1, sizeof(*trav), gf_common_mt_rdma_device_t);
- if (trav == NULL) {
- goto out;
- }
- priv->device = trav;
- trav->context = ibctx;
-
- trav->next = rdma_ctx->device;
- rdma_ctx->device = trav;
-
- iobuf_pool->device[iobuf_pool->rdma_device_count] = trav;
- iobuf_pool->mr_list[iobuf_pool->rdma_device_count++] = &trav->all_mr;
- trav->request_ctx_pool = mem_pool_new(gf_rdma_request_context_t,
- GF_RDMA_POOL_SIZE);
- if (trav->request_ctx_pool == NULL) {
- goto out;
- }
-
- trav->ioq_pool = mem_pool_new(gf_rdma_ioq_t, GF_RDMA_POOL_SIZE);
- if (trav->ioq_pool == NULL) {
- goto out;
- }
-
- trav->reply_info_pool = mem_pool_new(gf_rdma_reply_info_t,
- GF_RDMA_POOL_SIZE);
- if (trav->reply_info_pool == NULL) {
- goto out;
- }
-
- trav->device_name = gf_strdup(device_name);
-
- trav->send_chan = ibv_create_comp_channel(trav->context);
- if (!trav->send_chan) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_SEND_COMP_CHAN_FAILED,
- "could not "
- "create send completion channel for "
- "device (%s)",
- device_name);
- goto out;
- }
-
- trav->recv_chan = ibv_create_comp_channel(trav->context);
- if (!trav->recv_chan) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_RECV_COMP_CHAN_FAILED,
- "could not "
- "create recv completion channel for "
- "device (%s)",
- device_name);
-
- /* TODO: cleanup current mess */
- goto out;
- }
-
- if (gf_rdma_create_cq(this) < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CQ_CREATION_FAILED,
- "could not create CQ for device (%s)", device_name);
- goto out;
- }
-
- /* protection domain */
- trav->pd = ibv_alloc_pd(trav->context);
-
- if (!trav->pd) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_ALOC_PROT_DOM_FAILED,
- "could not "
- "allocate protection domain for device (%s)",
- device_name);
- goto out;
- }
-
- struct ibv_srq_init_attr attr = {.attr = {.max_wr = options->recv_count,
- .max_sge = 1,
- .srq_limit = 10}};
- trav->srq = ibv_create_srq(trav->pd, &attr);
-
- if (!trav->srq) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_CRE_SRQ_FAILED,
- "could not create SRQ"
- " for device (%s)",
- device_name);
- goto out;
- }
-
- /* queue init */
- gf_rdma_queue_init(&trav->sendq);
- gf_rdma_queue_init(&trav->recvq);
-
- INIT_LIST_HEAD(&trav->all_mr);
- pthread_mutex_init(&trav->all_mr_lock, NULL);
- gf_rdma_register_iobuf_pool(trav, iobuf_pool);
-
- if (gf_rdma_create_posts(this) < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, RDMA_MSG_ALOC_POST_FAILED,
- "could not allocate"
- "posts for device (%s)",
- device_name);
- goto out;
- }
-
- /* completion threads */
- ret = gf_thread_create(&trav->send_thread, NULL,
- gf_rdma_send_completion_proc, trav->send_chan,
- "rdmascom");
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- RDMA_MSG_SEND_COMP_THREAD_FAILED,
- "could not create send completion thread for "
- "device (%s)",
- device_name);
- goto out;
- }
-
- ret = gf_thread_create(&trav->recv_thread, NULL,
- gf_rdma_recv_completion_proc, trav->recv_chan,
- "rdmarcom");
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- RDMA_MSG_RECV_COMP_THREAD_FAILED,
- "could not create recv completion thread "
- "for device (%s)",
- device_name);
- return NULL;
- }
-
- ret = gf_thread_create(&trav->async_event_thread, NULL,
- gf_rdma_async_event_thread, ibctx, "rdmaAsyn");
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- RDMA_MSG_ASYNC_EVENT_THEAD_FAILED,
- "could not create async_event_thread");
- return NULL;
- }
-
- /* qpreg */
- pthread_mutex_init(&trav->qpreg.lock, NULL);
- for (i = 0; i < 42; i++) {
- trav->qpreg.ents[i].next = &trav->qpreg.ents[i];
- trav->qpreg.ents[i].prev = &trav->qpreg.ents[i];
- }
- }
-
- device = trav;
- trav = NULL;
-out:
-
- if (trav != NULL) {
- rdma_ctx->device = trav->next;
- gf_rdma_destroy_posts(this);
- mem_pool_destroy(trav->ioq_pool);
- mem_pool_destroy(trav->request_ctx_pool);
- mem_pool_destroy(trav->reply_info_pool);
- if (trav->pd != NULL) {
- ibv_dealloc_pd(trav->pd);
- }
- gf_rdma_destroy_cq(this);
- ibv_destroy_comp_channel(trav->recv_chan);
- ibv_destroy_comp_channel(trav->send_chan);
- GF_FREE((char *)trav->device_name);
- GF_FREE(trav);
- }
-
- return device;
-}
-
-static rpc_transport_t *
-gf_rdma_transport_new(rpc_transport_t *listener, struct rdma_cm_id *cm_id)
-{
- gf_rdma_private_t *listener_priv = NULL, *priv = NULL;
- rpc_transport_t *this = NULL, *new = NULL;
- gf_rdma_options_t *options = NULL;
- char *device_name = NULL;
-
- listener_priv = listener->private;
-
- this = GF_CALLOC(1, sizeof(rpc_transport_t), gf_common_mt_rpc_transport_t);
- if (this == NULL) {
- goto out;
- }
-
- this->listener = listener;
-
- priv = GF_CALLOC(1, sizeof(gf_rdma_private_t), gf_common_mt_rdma_private_t);
- if (priv == NULL) {
- goto out;
- }
-
- this->private = priv;
- priv->options = listener_priv->options;
-
- priv->listener = listener;
- priv->entity = GF_RDMA_SERVER;
-
- options = &priv->options;
-
- this->ops = listener->ops;
- this->init = listener->init;
- this->fini = listener->fini;
- this->ctx = listener->ctx;
- this->name = gf_strdup(listener->name);
- this->notify = listener->notify;
- this->mydata = listener->mydata;
- this->xl = listener->xl;
-
- this->myinfo.sockaddr_len = sizeof(cm_id->route.addr.src_addr);
- memcpy(&this->myinfo.sockaddr, &cm_id->route.addr.src_addr,
- this->myinfo.sockaddr_len);
-
- this->peerinfo.sockaddr_len = sizeof(cm_id->route.addr.dst_addr);
- memcpy(&this->peerinfo.sockaddr, &cm_id->route.addr.dst_addr,
- this->peerinfo.sockaddr_len);
-
- priv->peer.trans = this;
- gf_rdma_get_transport_identifiers(this);
-
- device_name = (char *)ibv_get_device_name(cm_id->verbs->device);
- if (device_name == NULL) {
- gf_msg(listener->name, GF_LOG_WARNING, 0,
- RDMA_MSG_GET_DEVICE_NAME_FAILED,
- "cannot get device "
- "name (peer:%s me:%s)",
- this->peerinfo.identifier, this->myinfo.identifier);
- goto out;
- }
-
- priv->device = gf_rdma_get_device(this, cm_id->verbs, device_name);
- if (priv->device == NULL) {
- gf_msg(listener->name, GF_LOG_WARNING, 0, RDMA_MSG_GET_IB_DEVICE_FAILED,
- "cannot get infiniband"
- " device %s (peer:%s me:%s)",
- device_name, this->peerinfo.identifier, this->myinfo.identifier);
- goto out;
- }
-
- priv->peer.send_count = options->send_count;
- priv->peer.recv_count = options->recv_count;
- priv->peer.send_size = options->send_size;
- priv->peer.recv_size = options->recv_size;
- priv->peer.cm_id = cm_id;
- INIT_LIST_HEAD(&priv->peer.ioq);
-
- pthread_mutex_init(&priv->write_mutex, NULL);
- pthread_mutex_init(&priv->recv_mutex, NULL);
-
- cm_id->context = this;
-
- new = rpc_transport_ref(this);
- this = NULL;
-out:
- if (this != NULL) {
- if (this->private != NULL) {
- GF_FREE(this->private);
- }
-
- if (this->name != NULL) {
- GF_FREE(this->name);
- }
-
- GF_FREE(this);
- }
-
- return new;
-}
-
-static int
-gf_rdma_cm_handle_connect_request(struct rdma_cm_event *event)
-{
- int ret = -1;
- rpc_transport_t *this = NULL, *listener = NULL;
- struct rdma_cm_id *child_cm_id = NULL, *listener_cm_id = NULL;
- struct rdma_conn_param conn_param = {
- 0,
- };
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
-
- child_cm_id = event->id;
- listener_cm_id = event->listen_id;
-
- listener = listener_cm_id->context;
- priv = listener->private;
- options = &priv->options;
-
- this = gf_rdma_transport_new(listener, child_cm_id);
- if (this == NULL) {
- gf_msg(listener->name, GF_LOG_WARNING, 0,
- RDMA_MSG_CREAT_INC_TRANS_FAILED,
- "could not create "
- "a transport for incoming connection"
- " (me.name:%s me.identifier:%s)",
- listener->name, listener->myinfo.identifier);
- rdma_destroy_id(child_cm_id);
- goto out;
- }
-
- gf_msg_trace(listener->name, 0,
- "got a connect request (me:%s peer:"
- "%s)",
- listener->myinfo.identifier, this->peerinfo.identifier);
-
- ret = gf_rdma_create_qp(this);
- if (ret < 0) {
- gf_msg(listener->name, GF_LOG_WARNING, 0, RDMA_MSG_CREAT_QP_FAILED,
- "could not create QP "
- "(peer:%s me:%s)",
- this->peerinfo.identifier, this->myinfo.identifier);
- gf_rdma_cm_handle_disconnect(this);
- goto out;
- }
-
- conn_param.responder_resources = 1;
- conn_param.initiator_depth = 1;
- conn_param.retry_count = options->attr_retry_cnt;
- conn_param.rnr_retry_count = options->attr_rnr_retry;
-
- ret = rdma_accept(child_cm_id, &conn_param);
- if (ret < 0) {
- gf_msg(listener->name, GF_LOG_WARNING, errno, RDMA_MSG_ACCEPT_FAILED,
- "rdma_accept failed peer:%s "
- "me:%s",
- this->peerinfo.identifier, this->myinfo.identifier);
- gf_rdma_cm_handle_disconnect(this);
- goto out;
- }
- gf_rdma_cm_handle_connect_init(event);
- ret = 0;
-
-out:
- return ret;
-}
-
-static int
-gf_rdma_cm_handle_route_resolved(struct rdma_cm_event *event)
-{
- struct rdma_conn_param conn_param = {
- 0,
- };
- int ret = 0;
- rpc_transport_t *this = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_peer_t *peer = NULL;
- gf_rdma_options_t *options = NULL;
-
- if (event == NULL) {
- goto out;
- }
-
- this = event->id->context;
-
- priv = this->private;
- peer = &priv->peer;
- options = &priv->options;
-
- ret = gf_rdma_create_qp(this);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_CREAT_QP_FAILED,
- "could not create QP "
- "(peer:%s me:%s)",
- this->peerinfo.identifier, this->myinfo.identifier);
- gf_rdma_cm_handle_disconnect(this);
- goto out;
- }
-
- memset(&conn_param, 0, sizeof conn_param);
- conn_param.responder_resources = 1;
- conn_param.initiator_depth = 1;
- conn_param.retry_count = options->attr_retry_cnt;
- conn_param.rnr_retry_count = options->attr_rnr_retry;
-
- ret = rdma_connect(peer->cm_id, &conn_param);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_CONNECT_FAILED,
- "rdma_connect failed");
- gf_rdma_cm_handle_disconnect(this);
- goto out;
- }
-
- gf_msg_trace(this->name, 0, "route resolved (me:%s peer:%s)",
- this->myinfo.identifier, this->peerinfo.identifier);
-
- ret = 0;
-out:
- return ret;
-}
-
-static int
-gf_rdma_cm_handle_addr_resolved(struct rdma_cm_event *event)
-{
- rpc_transport_t *this = NULL;
- gf_rdma_peer_t *peer = NULL;
- gf_rdma_private_t *priv = NULL;
- int ret = 0;
-
- this = event->id->context;
-
- priv = this->private;
- peer = &priv->peer;
-
- GF_ASSERT(peer->cm_id == event->id);
-
- this->myinfo.sockaddr_len = sizeof(peer->cm_id->route.addr.src_addr);
- memcpy(&this->myinfo.sockaddr, &peer->cm_id->route.addr.src_addr,
- this->myinfo.sockaddr_len);
-
- this->peerinfo.sockaddr_len = sizeof(peer->cm_id->route.addr.dst_addr);
- memcpy(&this->peerinfo.sockaddr, &peer->cm_id->route.addr.dst_addr,
- this->peerinfo.sockaddr_len);
-
- gf_rdma_get_transport_identifiers(this);
-
- ret = rdma_resolve_route(peer->cm_id, 2000);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_ROUTE_RESOLVE_FAILED,
- "rdma_resolve_route "
- "failed (me:%s peer:%s)",
- this->myinfo.identifier, this->peerinfo.identifier);
- gf_rdma_cm_handle_disconnect(this);
- return ret;
- }
-
- gf_msg_trace(this->name, 0, "Address resolved (me:%s peer:%s)",
- this->myinfo.identifier, this->peerinfo.identifier);
-
- return ret;
-}
-
-static void
-gf_rdma_cm_handle_disconnect(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- char need_unref = 0;
-
- priv = this->private;
- gf_msg_debug(this->name, 0, "peer disconnected, cleaning up");
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- if (priv->peer.cm_id != NULL) {
- need_unref = 1;
- priv->connected = 0;
- }
-
- __gf_rdma_teardown(this);
- }
- pthread_mutex_unlock(&priv->write_mutex);
-
- rpc_transport_notify(this, RPC_TRANSPORT_DISCONNECT, this);
-
- if (need_unref)
- rpc_transport_unref(this);
-}
-
-static int
-gf_rdma_cm_handle_connect_init(struct rdma_cm_event *event)
-{
- rpc_transport_t *this = NULL;
- gf_rdma_private_t *priv = NULL;
- struct rdma_cm_id *cm_id = NULL;
- int ret = 0;
-
- cm_id = event->id;
- this = cm_id->context;
- priv = this->private;
-
- if (priv->connected == 1) {
- gf_msg_trace(this->name, 0,
- "received event "
- "RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)",
- this->myinfo.identifier, this->peerinfo.identifier);
- return ret;
- }
-
- priv->connected = 1;
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- priv->peer.quota = 1;
- priv->peer.quota_set = 0;
- }
- pthread_mutex_unlock(&priv->write_mutex);
-
- if (priv->entity == GF_RDMA_CLIENT) {
- gf_msg_trace(this->name, 0,
- "received event "
- "RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)",
- this->myinfo.identifier, this->peerinfo.identifier);
- ret = rpc_transport_notify(this, RPC_TRANSPORT_CONNECT, this);
-
- } else if (priv->entity == GF_RDMA_SERVER) {
- ret = rpc_transport_notify(priv->listener, RPC_TRANSPORT_ACCEPT, this);
- }
-
- if (ret < 0) {
- gf_rdma_disconnect(this, _gf_false);
- }
-
- return ret;
-}
-
-static int
-gf_rdma_cm_handle_event_error(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
-
- priv = this->private;
-
- if (priv->entity != GF_RDMA_SERVER_LISTENER) {
- gf_rdma_cm_handle_disconnect(this);
- }
-
- return 0;
-}
-
-static int
-gf_rdma_cm_handle_device_removal(struct rdma_cm_event *event)
-{
- return 0;
-}
-
-static void *
-gf_rdma_cm_event_handler(void *data)
-{
- struct rdma_cm_event *event = NULL;
- int ret = 0;
- rpc_transport_t *this = NULL;
- struct rdma_event_channel *event_channel = NULL;
-
- event_channel = data;
-
- while (1) {
- ret = rdma_get_cm_event(event_channel, &event);
- if (ret != 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno,
- RDMA_MSG_CM_EVENT_FAILED, "rdma_cm_get_event failed");
- break;
- }
-
- switch (event->event) {
- case RDMA_CM_EVENT_ADDR_RESOLVED:
- gf_rdma_cm_handle_addr_resolved(event);
- break;
-
- case RDMA_CM_EVENT_ROUTE_RESOLVED:
- gf_rdma_cm_handle_route_resolved(event);
- break;
-
- case RDMA_CM_EVENT_CONNECT_REQUEST:
- gf_rdma_cm_handle_connect_request(event);
- break;
-
- case RDMA_CM_EVENT_ESTABLISHED:
- gf_rdma_cm_handle_connect_init(event);
- break;
-
- case RDMA_CM_EVENT_ADDR_ERROR:
- case RDMA_CM_EVENT_ROUTE_ERROR:
- case RDMA_CM_EVENT_CONNECT_ERROR:
- case RDMA_CM_EVENT_UNREACHABLE:
- case RDMA_CM_EVENT_REJECTED:
- this = event->id->context;
-
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_CM_EVENT_FAILED,
- "cma event %s, "
- "error %d (me:%s peer:%s)\n",
- rdma_event_str(event->event), event->status,
- this->myinfo.identifier, this->peerinfo.identifier);
-
- rdma_ack_cm_event(event);
- event = NULL;
-
- gf_rdma_cm_handle_event_error(this);
- continue;
-
- case RDMA_CM_EVENT_DISCONNECTED:
- this = event->id->context;
-
- gf_msg_debug(this->name, 0,
- "received disconnect "
- "(me:%s peer:%s)\n",
- this->myinfo.identifier,
- this->peerinfo.identifier);
-
- rdma_ack_cm_event(event);
- event = NULL;
-
- gf_rdma_cm_handle_disconnect(this);
- continue;
-
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_CM_EVENT_FAILED,
- "device "
- "removed");
- gf_rdma_cm_handle_device_removal(event);
- break;
-
- default:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_CM_EVENT_FAILED,
- "unhandled event: %s, ignoring",
- rdma_event_str(event->event));
- break;
- }
-
- rdma_ack_cm_event(event);
- }
-
- return NULL;
-}
-
-static int32_t
-gf_rdma_post_send(struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len)
-{
- struct ibv_sge list = {.addr = (unsigned long)post->buf,
- .length = len,
- .lkey = post->mr->lkey};
-
- struct ibv_send_wr wr =
- {
- .wr_id = (unsigned long)post,
- .sg_list = &list,
- .num_sge = 1,
- .opcode = IBV_WR_SEND,
- .send_flags = IBV_SEND_SIGNALED,
- },
- *bad_wr;
-
- if (!qp)
- return EINVAL;
-
- return ibv_post_send(qp, &wr, &bad_wr);
-}
-
-int
-__gf_rdma_encode_error(gf_rdma_peer_t *peer, gf_rdma_reply_info_t *reply_info,
- struct iovec *rpchdr, gf_rdma_header_t *hdr,
- gf_rdma_errcode_t err)
-{
- struct rpc_msg *rpc_msg = NULL;
-
- if (reply_info != NULL) {
- hdr->rm_xid = hton32(reply_info->rm_xid);
- } else {
- rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains
- * only one vector.
- * (which is true)
- */
- hdr->rm_xid = rpc_msg->rm_xid;
- }
-
- hdr->rm_vers = hton32(GF_RDMA_VERSION);
- hdr->rm_credit = hton32(peer->send_count);
- hdr->rm_type = hton32(GF_RDMA_ERROR);
- hdr->rm_body.rm_error.rm_type = hton32(err);
- if (err == ERR_VERS) {
- hdr->rm_body.rm_error.rm_version.gf_rdma_vers_low = hton32(
- GF_RDMA_VERSION);
- hdr->rm_body.rm_error.rm_version.gf_rdma_vers_high = hton32(
- GF_RDMA_VERSION);
- }
-
- return sizeof(*hdr);
-}
-
-int32_t
-__gf_rdma_send_error(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info,
- gf_rdma_errcode_t err)
-{
- int32_t ret = -1, len = 0;
-
- len = __gf_rdma_encode_error(peer, reply_info, entry->rpchdr,
- (gf_rdma_header_t *)post->buf, err);
- if (len == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0, RDMA_MSG_ENCODE_ERROR,
- "encode error returned -1");
- goto out;
- }
-
- gf_rdma_post_ref(post);
-
- ret = gf_rdma_post_send(peer->qp, post, len);
- if (!ret) {
- ret = len;
- } else {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED,
- "gf_rdma_post_send (to %s) failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
- gf_rdma_post_unref(post);
- __gf_rdma_disconnect(peer->trans);
- ret = -1;
- }
-
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_create_read_chunks_from_vector(gf_rdma_peer_t *peer,
- gf_rdma_read_chunk_t **readch_ptr,
- int32_t *pos, struct iovec *vector,
- int count,
- gf_rdma_request_context_t *request_ctx)
-{
- int i = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
- struct ibv_mr *mr = NULL;
- gf_rdma_read_chunk_t *readch = NULL;
- int32_t ret = -1;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, readch_ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *readch_ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, vector, out);
-
- priv = peer->trans->private;
- device = priv->device;
- readch = *readch_ptr;
-
- for (i = 0; i < count; i++) {
- readch->rc_discrim = hton32(1);
- readch->rc_position = hton32(*pos);
-
- mr = gf_rdma_get_pre_registred_mr(
- peer->trans, (void *)vector[i].iov_base, vector[i].iov_len);
- if (!mr) {
- mr = ibv_reg_mr(device->pd, vector[i].iov_base, vector[i].iov_len,
- IBV_ACCESS_REMOTE_READ);
- }
- if (!mr) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno,
- RDMA_MSG_MR_ALOC_FAILED,
- "memory registration failed (peer:%s)",
- peer->trans->peerinfo.identifier);
- goto out;
- }
-
- request_ctx->mr[request_ctx->mr_count++] = mr;
-
- readch->rc_target.rs_handle = hton32(mr->rkey);
- readch->rc_target.rs_length = hton32(vector[i].iov_len);
- readch->rc_target.rs_offset = hton64(
- (uint64_t)(unsigned long)vector[i].iov_base);
-
- *pos = *pos + vector[i].iov_len;
- readch++;
- }
-
- *readch_ptr = readch;
-
- ret = 0;
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_create_read_chunks(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_chunktype_t type, uint32_t **ptr,
- gf_rdma_request_context_t *request_ctx)
-{
- int32_t ret = -1;
- int pos = 0;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out);
-
- request_ctx->iobref = iobref_ref(entry->iobref);
-
- if (type == gf_rdma_areadch) {
- pos = 0;
- ret = __gf_rdma_create_read_chunks_from_vector(
- peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->rpchdr,
- entry->rpchdr_count, request_ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_READ_CHUNK_VECTOR_FAILED,
- "cannot create read chunks from vector "
- "entry->rpchdr");
- goto out;
- }
-
- ret = __gf_rdma_create_read_chunks_from_vector(
- peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->proghdr,
- entry->proghdr_count, request_ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_READ_CHUNK_VECTOR_FAILED,
- "cannot create read chunks from vector "
- "entry->proghdr");
- }
-
- if (entry->prog_payload_count != 0) {
- ret = __gf_rdma_create_read_chunks_from_vector(
- peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->prog_payload,
- entry->prog_payload_count, request_ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_READ_CHUNK_VECTOR_FAILED,
- "cannot create read chunks from vector"
- " entry->prog_payload");
- }
- }
- } else {
- pos = iov_length(entry->rpchdr, entry->rpchdr_count);
- ret = __gf_rdma_create_read_chunks_from_vector(
- peer, (gf_rdma_read_chunk_t **)ptr, &pos, entry->prog_payload,
- entry->prog_payload_count, request_ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_READ_CHUNK_VECTOR_FAILED,
- "cannot create read chunks from vector "
- "entry->prog_payload");
- }
- }
-
- /* terminate read-chunk list*/
- **ptr = 0;
- *ptr = *ptr + 1;
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_create_write_chunks_from_vector(
- gf_rdma_peer_t *peer, gf_rdma_write_chunk_t **writech_ptr,
- struct iovec *vector, int count, gf_rdma_request_context_t *request_ctx)
-{
- int i = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
- struct ibv_mr *mr = NULL;
- gf_rdma_write_chunk_t *writech = NULL;
- int32_t ret = -1;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, writech_ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *writech_ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, vector, out);
-
- writech = *writech_ptr;
-
- priv = peer->trans->private;
- device = priv->device;
-
- for (i = 0; i < count; i++) {
- mr = gf_rdma_get_pre_registred_mr(
- peer->trans, (void *)vector[i].iov_base, vector[i].iov_len);
- if (!mr) {
- mr = ibv_reg_mr(device->pd, vector[i].iov_base, vector[i].iov_len,
- IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE);
- }
-
- if (!mr) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno,
- RDMA_MSG_MR_ALOC_FAILED,
- "memory "
- "registration failed (peer:%s)",
- peer->trans->peerinfo.identifier);
- goto out;
- }
-
- request_ctx->mr[request_ctx->mr_count++] = mr;
-
- writech->wc_target.rs_handle = hton32(mr->rkey);
- writech->wc_target.rs_length = hton32(vector[i].iov_len);
- writech->wc_target.rs_offset = hton64(
- ((uint64_t)(unsigned long)vector[i].iov_base));
-
- writech++;
- }
-
- *writech_ptr = writech;
-
- ret = 0;
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_create_write_chunks(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_chunktype_t chunk_type, uint32_t **ptr,
- gf_rdma_request_context_t *request_ctx)
-{
- int32_t ret = -1;
- gf_rdma_write_array_t *warray = NULL;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, *ptr, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, request_ctx, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out);
-
- if ((chunk_type == gf_rdma_replych) &&
- ((entry->msg.request.rsphdr_count != 1) ||
- (entry->msg.request.rsphdr_vec[0].iov_base == NULL))) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_BUFFER_ERROR,
- (entry->msg.request.rsphdr_count == 1)
- ? "chunktype specified as reply chunk but the vector "
- "specifying the buffer to be used for holding reply"
- " header is not correct"
- : "chunktype specified as reply chunk, but more than one "
- "buffer provided for holding reply");
- goto out;
- }
-
- /*
- if ((chunk_type == gf_rdma_writech)
- && ((entry->msg.request.rsphdr_count == 0)
- || (entry->msg.request.rsphdr_vec[0].iov_base == NULL))) {
- gf_msg_debug (GF_RDMA_LOG_NAME, 0,
- "vector specifying buffer to hold the program's reply "
- "header should also be provided when buffers are "
- "provided for holding the program's payload in reply");
- goto out;
- }
- */
-
- if (chunk_type == gf_rdma_writech) {
- warray = (gf_rdma_write_array_t *)*ptr;
- warray->wc_discrim = hton32(1);
- warray->wc_nchunks = hton32(entry->msg.request.rsp_payload_count);
-
- *ptr = (uint32_t *)&warray->wc_array[0];
-
- ret = __gf_rdma_create_write_chunks_from_vector(
- peer, (gf_rdma_write_chunk_t **)ptr, entry->msg.request.rsp_payload,
- entry->msg.request.rsp_payload_count, request_ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED,
- "cannot create write chunks from vector "
- "entry->rpc_payload");
- goto out;
- }
-
- /* terminate write chunklist */
- **ptr = 0;
- *ptr = *ptr + 1;
-
- /* no reply chunklist */
- **ptr = 0;
- *ptr = *ptr + 1;
- } else {
- /* no write chunklist */
- **ptr = 0;
- *ptr = *ptr + 1;
-
- warray = (gf_rdma_write_array_t *)*ptr;
- warray->wc_discrim = hton32(1);
- warray->wc_nchunks = hton32(entry->msg.request.rsphdr_count);
-
- *ptr = (uint32_t *)&warray->wc_array[0];
-
- ret = __gf_rdma_create_write_chunks_from_vector(
- peer, (gf_rdma_write_chunk_t **)ptr, entry->msg.request.rsphdr_vec,
- entry->msg.request.rsphdr_count, request_ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED,
- "cannot create write chunks from vector "
- "entry->rpchdr");
- goto out;
- }
-
- /* terminate reply chunklist */
- **ptr = 0;
- *ptr = *ptr + 1;
- }
-
-out:
- return ret;
-}
-
-static void
-__gf_rdma_deregister_mr(gf_rdma_device_t *device, struct ibv_mr **mr, int count)
-{
- gf_rdma_arena_mr *tmp = NULL;
- gf_rdma_arena_mr *dummy = NULL;
- int i = 0;
- int found = 0;
-
- if (mr == NULL) {
- goto out;
- }
-
- for (i = 0; i < count; i++) {
- found = 0;
- pthread_mutex_lock(&device->all_mr_lock);
- {
- if (!list_empty(&device->all_mr)) {
- list_for_each_entry_safe(tmp, dummy, &device->all_mr, list)
- {
- if (tmp->mr == mr[i]) {
- found = 1;
- break;
- }
- }
- }
- }
- pthread_mutex_unlock(&device->all_mr_lock);
- if (!found)
- ibv_dereg_mr(mr[i]);
- }
-
-out:
- return;
-}
-
-static int32_t
-__gf_rdma_quota_put(gf_rdma_peer_t *peer)
-{
- int32_t ret = 0;
-
- peer->quota++;
- ret = peer->quota;
-
- if (!list_empty(&peer->ioq)) {
- ret = __gf_rdma_ioq_churn(peer);
- }
-
- return ret;
-}
-
-static int32_t
-gf_rdma_quota_put(gf_rdma_peer_t *peer)
-{
- int32_t ret = 0;
- gf_rdma_private_t *priv = NULL;
-
- priv = peer->trans->private;
- pthread_mutex_lock(&priv->write_mutex);
- {
- ret = __gf_rdma_quota_put(peer);
- }
- pthread_mutex_unlock(&priv->write_mutex);
-
- return ret;
-}
-
-/* to be called with priv->mutex held */
-void
-__gf_rdma_request_context_destroy(gf_rdma_request_context_t *context)
-{
- gf_rdma_peer_t *peer = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
- int32_t ret = 0;
-
- if (context == NULL) {
- goto out;
- }
-
- peer = context->peer;
-
- priv = peer->trans->private;
- device = priv->device;
- __gf_rdma_deregister_mr(device, context->mr, context->mr_count);
-
- if (priv->connected) {
- ret = __gf_rdma_quota_put(peer);
- if (ret < 0) {
- gf_msg_debug("rdma", 0, "failed to send message");
- mem_put(context);
- __gf_rdma_disconnect(peer->trans);
- goto out;
- }
- }
-
- if (context->iobref != NULL) {
- iobref_unref(context->iobref);
- context->iobref = NULL;
- }
-
- if (context->rsp_iobref != NULL) {
- iobref_unref(context->rsp_iobref);
- context->rsp_iobref = NULL;
- }
-
- mem_put(context);
-
-out:
- return;
-}
-
-void
-gf_rdma_post_context_destroy(gf_rdma_device_t *device,
- gf_rdma_post_context_t *ctx)
-{
- if (ctx == NULL) {
- goto out;
- }
-
- __gf_rdma_deregister_mr(device, ctx->mr, ctx->mr_count);
-
- if (ctx->iobref != NULL) {
- iobref_unref(ctx->iobref);
- }
-
- if (ctx->hdr_iobuf != NULL) {
- iobuf_unref(ctx->hdr_iobuf);
- }
-
- memset(ctx, 0, sizeof(*ctx));
-out:
- return;
-}
-
-int
-gf_rdma_post_unref(gf_rdma_post_t *post)
-{
- int refcount = -1;
-
- if (post == NULL) {
- goto out;
- }
-
- pthread_mutex_lock(&post->lock);
- {
- refcount = --post->refcount;
- }
- pthread_mutex_unlock(&post->lock);
-
- if (refcount == 0) {
- gf_rdma_post_context_destroy(post->device, &post->ctx);
- if (post->type == GF_RDMA_SEND_POST) {
- gf_rdma_put_post(&post->device->sendq, post);
- } else {
- gf_rdma_post_recv(post->device->srq, post);
- }
- }
-out:
- return refcount;
-}
-
-int
-gf_rdma_post_get_refcount(gf_rdma_post_t *post)
-{
- int refcount = -1;
-
- if (post == NULL) {
- goto out;
- }
-
- pthread_mutex_lock(&post->lock);
- {
- refcount = post->refcount;
- }
- pthread_mutex_unlock(&post->lock);
-
-out:
- return refcount;
-}
-
-gf_rdma_post_t *
-gf_rdma_post_ref(gf_rdma_post_t *post)
-{
- if (post == NULL) {
- goto out;
- }
-
- pthread_mutex_lock(&post->lock);
- {
- post->refcount++;
- }
- pthread_mutex_unlock(&post->lock);
-
-out:
- return post;
-}
-
-int32_t
-__gf_rdma_ioq_churn_request(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post)
-{
- gf_rdma_chunktype_t rtype = gf_rdma_noch;
- gf_rdma_chunktype_t wtype = gf_rdma_noch;
- uint64_t send_size = 0;
- gf_rdma_header_t *hdr = NULL;
- struct rpc_msg *rpc_msg = NULL;
- uint32_t *chunkptr = NULL;
- char *buf = NULL;
- int32_t ret = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
- int chunk_count = 0;
- gf_rdma_request_context_t *request_ctx = NULL;
- uint32_t prog_payload_length = 0, len = 0;
- struct rpc_req *rpc_req = NULL;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, post, out);
-
- if ((entry->msg.request.rsphdr_count != 0) &&
- (entry->msg.request.rsp_payload_count != 0)) {
- ret = -1;
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_WRITE_REPLY_CHUNCK_CONFLICT,
- "both write-chunklist and reply-chunk cannot be "
- "present");
- goto out;
- }
-
- post->ctx.is_request = 1;
- priv = peer->trans->private;
- device = priv->device;
-
- hdr = (gf_rdma_header_t *)post->buf;
-
- send_size = iov_length(entry->rpchdr, entry->rpchdr_count) +
- iov_length(entry->proghdr, entry->proghdr_count) +
- GLUSTERFS_RDMA_MAX_HEADER_SIZE;
-
- if (entry->prog_payload_count != 0) {
- prog_payload_length = iov_length(entry->prog_payload,
- entry->prog_payload_count);
- }
-
- if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) {
- rtype = gf_rdma_areadch;
- } else if ((send_size + prog_payload_length) <
- GLUSTERFS_RDMA_INLINE_THRESHOLD) {
- rtype = gf_rdma_noch;
- } else if (entry->prog_payload_count != 0) {
- rtype = gf_rdma_readch;
- }
-
- if (entry->msg.request.rsphdr_count != 0) {
- wtype = gf_rdma_replych;
- } else if (entry->msg.request.rsp_payload_count != 0) {
- wtype = gf_rdma_writech;
- }
-
- if (rtype == gf_rdma_readch) {
- chunk_count += entry->prog_payload_count;
- } else if (rtype == gf_rdma_areadch) {
- chunk_count += entry->rpchdr_count;
- chunk_count += entry->proghdr_count;
- }
-
- if (wtype == gf_rdma_writech) {
- chunk_count += entry->msg.request.rsp_payload_count;
- } else if (wtype == gf_rdma_replych) {
- chunk_count += entry->msg.request.rsphdr_count;
- }
-
- if (chunk_count > GF_RDMA_MAX_SEGMENTS) {
- ret = -1;
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_CHUNK_COUNT_GREAT_MAX_SEGMENTS,
- "chunk count(%d) exceeding maximum allowed RDMA "
- "segment count(%d)",
- chunk_count, GF_RDMA_MAX_SEGMENTS);
- goto out;
- }
-
- if ((wtype != gf_rdma_noch) || (rtype != gf_rdma_noch)) {
- request_ctx = mem_get(device->request_ctx_pool);
- if (request_ctx == NULL) {
- ret = -1;
- goto out;
- }
-
- memset(request_ctx, 0, sizeof(*request_ctx));
-
- request_ctx->pool = device->request_ctx_pool;
- request_ctx->peer = peer;
-
- entry->msg.request.rpc_req->conn_private = request_ctx;
-
- if (entry->msg.request.rsp_iobref != NULL) {
- request_ctx->rsp_iobref = iobref_ref(entry->msg.request.rsp_iobref);
- }
- }
-
- rpc_msg = (struct rpc_msg *)entry->rpchdr[0].iov_base;
-
- hdr->rm_xid = rpc_msg->rm_xid; /* no need of hton32(rpc_msg->rm_xid),
- * since rpc_msg->rm_xid is already
- * hton32ed value of actual xid
- */
- hdr->rm_vers = hton32(GF_RDMA_VERSION);
- hdr->rm_credit = hton32(peer->send_count);
- if (rtype == gf_rdma_areadch) {
- hdr->rm_type = hton32(GF_RDMA_NOMSG);
- } else {
- hdr->rm_type = hton32(GF_RDMA_MSG);
- }
-
- chunkptr = &hdr->rm_body.rm_chunks[0];
- if (rtype != gf_rdma_noch) {
- ret = __gf_rdma_create_read_chunks(peer, entry, rtype, &chunkptr,
- request_ctx);
- if (ret != 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_CREATE_READ_CHUNK_FAILED,
- "creation of read chunks failed");
- goto out;
- }
- } else {
- *chunkptr++ = 0; /* no read chunks */
- }
-
- if (wtype != gf_rdma_noch) {
- ret = __gf_rdma_create_write_chunks(peer, entry, wtype, &chunkptr,
- request_ctx);
- if (ret != 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_CREATE_WRITE_REPLAY_FAILED,
- "creation of write/reply chunk failed");
- goto out;
- }
- } else {
- *chunkptr++ = 0; /* no write chunks */
- *chunkptr++ = 0; /* no reply chunk */
- }
-
- buf = (char *)chunkptr;
-
- if (rtype != gf_rdma_areadch) {
- iov_unload(buf, entry->rpchdr, entry->rpchdr_count);
- buf += iov_length(entry->rpchdr, entry->rpchdr_count);
-
- iov_unload(buf, entry->proghdr, entry->proghdr_count);
- buf += iov_length(entry->proghdr, entry->proghdr_count);
-
- if (rtype != gf_rdma_readch) {
- iov_unload(buf, entry->prog_payload, entry->prog_payload_count);
- buf += iov_length(entry->prog_payload, entry->prog_payload_count);
- }
- }
-
- len = buf - post->buf;
-
- gf_rdma_post_ref(post);
-
- ret = gf_rdma_post_send(peer->qp, post, len);
- if (!ret) {
- ret = len;
- } else {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED,
- "gf_rdma_post_send (to %s) failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
- gf_rdma_post_unref(post);
- __gf_rdma_disconnect(peer->trans);
- ret = -1;
- }
-
-out:
- if (ret == -1) {
- rpc_req = entry->msg.request.rpc_req;
-
- if (request_ctx != NULL) {
- __gf_rdma_request_context_destroy(rpc_req->conn_private);
- }
-
- rpc_req->conn_private = NULL;
- }
-
- return ret;
-}
-
-static void
-__gf_rdma_fill_reply_header(gf_rdma_header_t *header, struct iovec *rpchdr,
- gf_rdma_reply_info_t *reply_info, int credits)
-{
- struct rpc_msg *rpc_msg = NULL;
-
- if (reply_info != NULL) {
- header->rm_xid = hton32(reply_info->rm_xid);
- } else {
- rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains
- * only one vector.
- * (which is true)
- */
- header->rm_xid = rpc_msg->rm_xid;
- }
-
- header->rm_type = hton32(GF_RDMA_MSG);
- header->rm_vers = hton32(GF_RDMA_VERSION);
- header->rm_credit = hton32(credits);
-
- header->rm_body.rm_chunks[0] = 0; /* no read chunks */
- header->rm_body.rm_chunks[1] = 0; /* no write chunks */
- header->rm_body.rm_chunks[2] = 0; /* no reply chunks */
-
- return;
-}
-
-int32_t
-__gf_rdma_send_reply_inline(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post,
- gf_rdma_reply_info_t *reply_info)
-{
- gf_rdma_header_t *header = NULL;
- int32_t send_size = 0, ret = 0;
- char *buf = NULL;
-
- send_size = iov_length(entry->rpchdr, entry->rpchdr_count) +
- iov_length(entry->proghdr, entry->proghdr_count) +
- iov_length(entry->prog_payload, entry->prog_payload_count) +
- sizeof(gf_rdma_header_t); /*
- * remember, no chunklists in the
- * reply
- */
-
- if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) {
- ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK);
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD,
- "msg size (%d) is greater than maximum size "
- "of msg that can be sent inlined (%d)",
- send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD);
- goto out;
- }
-
- header = (gf_rdma_header_t *)post->buf;
-
- __gf_rdma_fill_reply_header(header, entry->rpchdr, reply_info,
- peer->send_count);
-
- buf = (char *)&header->rm_body.rm_chunks[3];
-
- if (entry->rpchdr_count != 0) {
- iov_unload(buf, entry->rpchdr, entry->rpchdr_count);
- buf += iov_length(entry->rpchdr, entry->rpchdr_count);
- }
-
- if (entry->proghdr_count != 0) {
- iov_unload(buf, entry->proghdr, entry->proghdr_count);
- buf += iov_length(entry->proghdr, entry->proghdr_count);
- }
-
- if (entry->prog_payload_count != 0) {
- iov_unload(buf, entry->prog_payload, entry->prog_payload_count);
- buf += iov_length(entry->prog_payload, entry->prog_payload_count);
- }
-
- gf_rdma_post_ref(post);
-
- ret = gf_rdma_post_send(peer->qp, post, (buf - post->buf));
- if (!ret) {
- ret = send_size;
- } else {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED,
- "posting send (to %s) "
- "failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
- gf_rdma_post_unref(post);
- __gf_rdma_disconnect(peer->trans);
- ret = -1;
- }
-
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_reply_encode_write_chunks(gf_rdma_peer_t *peer, uint32_t payload_size,
- gf_rdma_post_t *post,
- gf_rdma_reply_info_t *reply_info,
- uint32_t **ptr)
-{
- uint32_t chunk_size = 0;
- int32_t ret = -1;
- gf_rdma_write_array_t *target_array = NULL;
- int i = 0;
-
- target_array = (gf_rdma_write_array_t *)*ptr;
-
- for (i = 0; i < reply_info->wc_array->wc_nchunks; i++) {
- chunk_size += reply_info->wc_array->wc_array[i].wc_target.rs_length;
- }
-
- if (chunk_size < payload_size) {
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "length of payload (%d) is "
- "exceeding the total write chunk length (%d)",
- payload_size, chunk_size);
- goto out;
- }
-
- target_array->wc_discrim = hton32(1);
- for (i = 0; (i < reply_info->wc_array->wc_nchunks) && (payload_size != 0);
- i++) {
- target_array->wc_array[i].wc_target.rs_offset = hton64(
- reply_info->wc_array->wc_array[i].wc_target.rs_offset);
-
- target_array->wc_array[i].wc_target.rs_length = hton32(
- min(payload_size,
- reply_info->wc_array->wc_array[i].wc_target.rs_length));
- }
-
- target_array->wc_nchunks = hton32(i);
- target_array->wc_array[i].wc_target.rs_handle = 0; /* terminate
- chunklist */
-
- ret = 0;
-
- *ptr = &target_array->wc_array[i].wc_target.rs_length;
-out:
- return ret;
-}
-
-static int32_t
-__gf_rdma_register_local_mr_for_rdma(gf_rdma_peer_t *peer, struct iovec *vector,
- int count, gf_rdma_post_context_t *ctx)
-{
- int i = 0;
- int32_t ret = -1;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, ctx, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, vector, out);
-
- priv = peer->trans->private;
- device = priv->device;
-
- for (i = 0; i < count; i++) {
- /* what if the memory is registered more than once?
- * Assume that a single write buffer is passed to afr, which
- * then passes it to its children. If more than one children
- * happen to use rdma, then the buffer is registered more than
- * once.
- * Ib-verbs specification says that multiple registrations of
- * same memory location is allowed. Refer to 10.6.3.8 of
- * Infiniband Architecture Specification Volume 1
- * (Release 1.2.1)
- */
- ctx->mr[ctx->mr_count] = gf_rdma_get_pre_registred_mr(
- peer->trans, (void *)vector[i].iov_base, vector[i].iov_len);
-
- if (!ctx->mr[ctx->mr_count]) {
- ctx->mr[ctx->mr_count] = ibv_reg_mr(device->pd, vector[i].iov_base,
- vector[i].iov_len,
- IBV_ACCESS_LOCAL_WRITE);
- }
- if (ctx->mr[ctx->mr_count] == NULL) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno,
- RDMA_MSG_MR_ALOC_FAILED,
- "registering memory for IBV_ACCESS_LOCAL_WRITE"
- " failed");
- goto out;
- }
-
- ctx->mr_count++;
- }
-
- ret = 0;
-out:
- return ret;
-}
-
-/* 1. assumes xfer_len of data is pointed by vector(s) starting from vec[*idx]
- * 2. modifies vec
- */
-int32_t
-__gf_rdma_write(gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *vec,
- uint32_t xfer_len, int *idx, gf_rdma_write_chunk_t *writech)
-{
- int size = 0, num_sge = 0, i = 0;
- int32_t ret = -1;
- struct ibv_sge *sg_list = NULL;
- struct ibv_send_wr wr =
- {
- .opcode = IBV_WR_RDMA_WRITE,
- .send_flags = IBV_SEND_SIGNALED,
- },
- *bad_wr;
-
- if ((peer == NULL) || (writech == NULL) || (idx == NULL) ||
- (post == NULL) || (vec == NULL) || (xfer_len == 0)) {
- goto out;
- }
-
- for (i = *idx; size < xfer_len; i++) {
- size += vec[i].iov_len;
- }
-
- num_sge = i - *idx;
-
- sg_list = GF_CALLOC(num_sge, sizeof(struct ibv_sge), gf_common_mt_sge);
- if (sg_list == NULL) {
- ret = -1;
- goto out;
- }
-
- for ((i = *idx), (num_sge = 0); (xfer_len != 0); i++, num_sge++) {
- size = min(xfer_len, vec[i].iov_len);
-
- sg_list[num_sge].addr = (unsigned long)vec[i].iov_base;
- sg_list[num_sge].length = size;
- sg_list[num_sge].lkey = post->ctx.mr[i]->lkey;
-
- xfer_len -= size;
- }
-
- *idx = i;
-
- if (size < vec[i - 1].iov_len) {
- vec[i - 1].iov_base += size;
- vec[i - 1].iov_len -= size;
- *idx = i - 1;
- }
-
- wr.sg_list = sg_list;
- wr.num_sge = num_sge;
- wr.wr_id = (unsigned long)gf_rdma_post_ref(post);
- wr.wr.rdma.rkey = writech->wc_target.rs_handle;
- wr.wr.rdma.remote_addr = writech->wc_target.rs_offset;
-
- ret = ibv_post_send(peer->qp, &wr, &bad_wr);
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_CLIENT_ERROR,
- "rdma write to "
- "client (%s) failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
- ret = -1;
- }
-
- GF_FREE(sg_list);
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_do_gf_rdma_write(gf_rdma_peer_t *peer, gf_rdma_post_t *post,
- struct iovec *vector, int count,
- struct iobref *iobref,
- gf_rdma_reply_info_t *reply_info)
-{
- int i = 0, payload_idx = 0;
- uint32_t payload_size = 0, xfer_len = 0;
- int32_t ret = -1;
-
- if (count != 0) {
- payload_size = iov_length(vector, count);
- }
-
- if (payload_size == 0) {
- ret = 0;
- goto out;
- }
-
- ret = __gf_rdma_register_local_mr_for_rdma(peer, vector, count, &post->ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_MR_ALOC_FAILED,
- "registering memory region for rdma failed");
- goto out;
- }
-
- post->ctx.iobref = iobref_ref(iobref);
-
- for (i = 0; (i < reply_info->wc_array->wc_nchunks) && (payload_size != 0);
- i++) {
- xfer_len = min(payload_size,
- reply_info->wc_array->wc_array[i].wc_target.rs_length);
-
- ret = __gf_rdma_write(peer, post, vector, xfer_len, &payload_idx,
- &reply_info->wc_array->wc_array[i]);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_WRITE_CLIENT_ERROR,
- "rdma write to "
- "client (%s) failed",
- peer->trans->peerinfo.identifier);
- goto out;
- }
-
- payload_size -= xfer_len;
- }
-
- ret = 0;
-out:
-
- return ret;
-}
-
-int32_t
-__gf_rdma_send_reply_type_nomsg(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post,
- gf_rdma_reply_info_t *reply_info)
-{
- gf_rdma_header_t *header = NULL;
- char *buf = NULL;
- uint32_t payload_size = 0;
- int count = 0, i = 0;
- int32_t ret = 0;
- struct iovec vector[MAX_IOVEC];
-
- header = (gf_rdma_header_t *)post->buf;
-
- __gf_rdma_fill_reply_header(header, entry->rpchdr, reply_info,
- peer->send_count);
-
- header->rm_type = hton32(GF_RDMA_NOMSG);
-
- payload_size = iov_length(entry->rpchdr, entry->rpchdr_count) +
- iov_length(entry->proghdr, entry->proghdr_count);
-
- /* encode reply chunklist */
- buf = (char *)&header->rm_body.rm_chunks[2];
- ret = __gf_rdma_reply_encode_write_chunks(peer, payload_size, post,
- reply_info, (uint32_t **)&buf);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_ENCODE_ERROR,
- "encoding write chunks failed");
- ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK);
- goto out;
- }
-
- gf_rdma_post_ref(post);
-
- for (i = 0; i < entry->rpchdr_count; i++) {
- vector[count++] = entry->rpchdr[i];
- }
-
- for (i = 0; i < entry->proghdr_count; i++) {
- vector[count++] = entry->proghdr[i];
- }
-
- ret = __gf_rdma_do_gf_rdma_write(peer, post, vector, count, entry->iobref,
- reply_info);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED,
- "rdma write to peer "
- "(%s) failed",
- peer->trans->peerinfo.identifier);
- gf_rdma_post_unref(post);
- goto out;
- }
-
- ret = gf_rdma_post_send(peer->qp, post, (buf - post->buf));
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_SEND_FAILED,
- "posting a send request "
- "to client (%s) failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
- ret = -1;
- gf_rdma_post_unref(post);
- } else {
- ret = payload_size;
- }
-
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_send_reply_type_msg(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post,
- gf_rdma_reply_info_t *reply_info)
-{
- gf_rdma_header_t *header = NULL;
- int32_t send_size = 0, ret = 0;
- char *ptr = NULL;
- uint32_t payload_size = 0;
-
- send_size = iov_length(entry->rpchdr, entry->rpchdr_count) +
- iov_length(entry->proghdr, entry->proghdr_count) +
- GLUSTERFS_RDMA_MAX_HEADER_SIZE;
-
- if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD,
- "client has provided only write chunks, but the "
- "combined size of rpc and program header (%d) is "
- "exceeding the size of msg that can be sent using "
- "RDMA send (%d)",
- send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD);
-
- ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK);
- goto out;
- }
-
- header = (gf_rdma_header_t *)post->buf;
-
- __gf_rdma_fill_reply_header(header, entry->rpchdr, reply_info,
- peer->send_count);
-
- payload_size = iov_length(entry->prog_payload, entry->prog_payload_count);
- ptr = (char *)&header->rm_body.rm_chunks[1];
-
- ret = __gf_rdma_reply_encode_write_chunks(peer, payload_size, post,
- reply_info, (uint32_t **)&ptr);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_ENCODE_ERROR,
- "encoding write chunks failed");
- ret = __gf_rdma_send_error(peer, entry, post, reply_info, ERR_CHUNK);
- goto out;
- }
-
- *(uint32_t *)ptr = 0; /* terminate reply chunklist */
- ptr += sizeof(uint32_t);
-
- gf_rdma_post_ref(post);
-
- ret = __gf_rdma_do_gf_rdma_write(peer, post, entry->prog_payload,
- entry->prog_payload_count, entry->iobref,
- reply_info);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED,
- "rdma write to peer "
- "(%s) failed",
- peer->trans->peerinfo.identifier);
- gf_rdma_post_unref(post);
- goto out;
- }
-
- iov_unload(ptr, entry->rpchdr, entry->rpchdr_count);
- ptr += iov_length(entry->rpchdr, entry->rpchdr_count);
-
- iov_unload(ptr, entry->proghdr, entry->proghdr_count);
- ptr += iov_length(entry->proghdr, entry->proghdr_count);
-
- ret = gf_rdma_post_send(peer->qp, post, (ptr - post->buf));
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_SEND_CLIENT_ERROR,
- "rdma send to client (%s) failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
- gf_rdma_post_unref(post);
- ret = -1;
- } else {
- ret = send_size + payload_size;
- }
-
-out:
- return ret;
-}
-
-void
-gf_rdma_reply_info_destroy(gf_rdma_reply_info_t *reply_info)
-{
- if (reply_info == NULL) {
- goto out;
- }
-
- if (reply_info->wc_array != NULL) {
- GF_FREE(reply_info->wc_array);
- reply_info->wc_array = NULL;
- }
-
- mem_put(reply_info);
-out:
- return;
-}
-
-gf_rdma_reply_info_t *
-gf_rdma_reply_info_alloc(gf_rdma_peer_t *peer)
-{
- gf_rdma_reply_info_t *reply_info = NULL;
- gf_rdma_private_t *priv = NULL;
-
- priv = peer->trans->private;
-
- reply_info = mem_get(priv->device->reply_info_pool);
- if (reply_info == NULL) {
- goto out;
- }
-
- memset(reply_info, 0, sizeof(*reply_info));
- reply_info->pool = priv->device->reply_info_pool;
-
-out:
- return reply_info;
-}
-
-int32_t
-__gf_rdma_ioq_churn_reply(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post)
-{
- gf_rdma_reply_info_t *reply_info = NULL;
- int32_t ret = -1;
- gf_rdma_chunktype_t type = gf_rdma_noch;
-
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, peer, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, entry, out);
- GF_VALIDATE_OR_GOTO(GF_RDMA_LOG_NAME, post, out);
-
- reply_info = entry->msg.reply_info;
- if (reply_info != NULL) {
- type = reply_info->type;
- }
-
- switch (type) {
- case gf_rdma_noch:
- ret = __gf_rdma_send_reply_inline(peer, entry, post, reply_info);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_SEND_REPLY_FAILED,
- "failed to send reply to peer (%s) as an "
- "inlined rdma msg",
- peer->trans->peerinfo.identifier);
- }
- break;
-
- case gf_rdma_replych:
- ret = __gf_rdma_send_reply_type_nomsg(peer, entry, post,
- reply_info);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_SEND_REPLY_FAILED,
- "failed to send reply to peer (%s) as "
- "RDMA_NOMSG",
- peer->trans->peerinfo.identifier);
- }
- break;
-
- case gf_rdma_writech:
- ret = __gf_rdma_send_reply_type_msg(peer, entry, post, reply_info);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_SEND_REPLY_FAILED,
- "failed to send reply with write chunks "
- "to peer (%s)",
- peer->trans->peerinfo.identifier);
- }
- break;
-
- default:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_INVALID_CHUNK_TYPE,
- "invalid chunktype (%d) specified for sending reply "
- " (peer:%s)",
- type, peer->trans->peerinfo.identifier);
- break;
- }
-
- if (reply_info != NULL) {
- gf_rdma_reply_info_destroy(reply_info);
- }
-out:
- return ret;
-}
-
-int32_t
-__gf_rdma_ioq_churn_entry(gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry)
-{
- int32_t ret = 0, quota = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_post_t *post = NULL;
-
- priv = peer->trans->private;
- options = &priv->options;
- device = priv->device;
-
- quota = __gf_rdma_quota_get(peer);
- if (quota > 0) {
- post = gf_rdma_get_post(&device->sendq);
- if (post == NULL) {
- post = gf_rdma_new_post(peer->trans, device,
- (options->send_size + 2048),
- GF_RDMA_SEND_POST);
- }
-
- if (post == NULL) {
- ret = -1;
- gf_msg_callingfn(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_POST_SEND_FAILED,
- "not able to get a post to send msg");
- goto out;
- }
-
- if (entry->is_request) {
- ret = __gf_rdma_ioq_churn_request(peer, entry, post);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_PROC_IOQ_ENTRY_FAILED,
- "failed to process request ioq entry "
- "to peer(%s)",
- peer->trans->peerinfo.identifier);
- }
- } else {
- ret = __gf_rdma_ioq_churn_reply(peer, entry, post);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_PROC_IOQ_ENTRY_FAILED,
- "failed to process reply ioq entry "
- "to peer (%s)",
- peer->trans->peerinfo.identifier);
- }
- }
-
- if (ret != 0) {
- __gf_rdma_ioq_entry_free(entry);
- }
- } else {
- ret = 0;
- }
-
-out:
- return ret;
-}
-
-static int32_t
-__gf_rdma_ioq_churn(gf_rdma_peer_t *peer)
-{
- gf_rdma_ioq_t *entry = NULL;
- int32_t ret = 0;
-
- while (!list_empty(&peer->ioq)) {
- /* pick next entry */
- entry = peer->ioq_next;
-
- ret = __gf_rdma_ioq_churn_entry(peer, entry);
-
- if (ret <= 0)
- break;
- }
-
- /*
- list_for_each_entry_safe (entry, dummy, &peer->ioq, list) {
- ret = __gf_rdma_ioq_churn_entry (peer, entry);
- if (ret <= 0) {
- break;
- }
- }
- */
-
- return ret;
-}
-
-static int32_t
-gf_rdma_writev(rpc_transport_t *this, gf_rdma_ioq_t *entry)
-{
- int32_t ret = 0, need_append = 1;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_peer_t *peer = NULL;
-
- priv = this->private;
- pthread_mutex_lock(&priv->write_mutex);
- {
- if (!priv->connected) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_PEER_DISCONNECTED,
- "rdma is not connected to peer (%s)",
- this->peerinfo.identifier);
- ret = -1;
- goto unlock;
- }
-
- peer = &priv->peer;
- if (list_empty(&peer->ioq)) {
- ret = __gf_rdma_ioq_churn_entry(peer, entry);
- if (ret != 0) {
- need_append = 0;
-
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_WARNING, 0,
- RDMA_MSG_PROC_IOQ_ENTRY_FAILED,
- "processing ioq entry destined"
- " to (%s) failed",
- this->peerinfo.identifier);
- }
- }
- }
-
- if (need_append) {
- list_add_tail(&entry->list, &peer->ioq);
- }
- }
-unlock:
- pthread_mutex_unlock(&priv->write_mutex);
- return ret;
-}
-
-gf_rdma_ioq_t *
-gf_rdma_ioq_new(rpc_transport_t *this, rpc_transport_data_t *data)
-{
- gf_rdma_ioq_t *entry = NULL;
- int count = 0, i = 0;
- rpc_transport_msg_t *msg = NULL;
- gf_rdma_private_t *priv = NULL;
-
- if ((data == NULL) || (this == NULL)) {
- goto out;
- }
-
- priv = this->private;
-
- entry = mem_get(priv->device->ioq_pool);
- if (entry == NULL) {
- goto out;
- }
- memset(entry, 0, sizeof(*entry));
- entry->pool = priv->device->ioq_pool;
-
- if (data->is_request) {
- msg = &data->data.req.msg;
- if (data->data.req.rsp.rsphdr_count != 0) {
- for (i = 0; i < data->data.req.rsp.rsphdr_count; i++) {
- entry->msg.request.rsphdr_vec[i] = data->data.req.rsp.rsphdr[i];
- }
-
- entry->msg.request.rsphdr_count = data->data.req.rsp.rsphdr_count;
- }
-
- if (data->data.req.rsp.rsp_payload_count != 0) {
- for (i = 0; i < data->data.req.rsp.rsp_payload_count; i++) {
- entry->msg.request.rsp_payload[i] = data->data.req.rsp
- .rsp_payload[i];
- }
-
- entry->msg.request.rsp_payload_count = data->data.req.rsp
- .rsp_payload_count;
- }
-
- entry->msg.request.rpc_req = data->data.req.rpc_req;
-
- if (data->data.req.rsp.rsp_iobref != NULL) {
- entry->msg.request.rsp_iobref = iobref_ref(
- data->data.req.rsp.rsp_iobref);
- }
- } else {
- msg = &data->data.reply.msg;
- entry->msg.reply_info = data->data.reply.private;
- }
-
- entry->is_request = data->is_request;
-
- count = msg->rpchdrcount + msg->proghdrcount + msg->progpayloadcount;
-
- GF_ASSERT(count <= MAX_IOVEC);
-
- if (msg->rpchdr != NULL) {
- memcpy(&entry->rpchdr[0], msg->rpchdr,
- sizeof(struct iovec) * msg->rpchdrcount);
- entry->rpchdr_count = msg->rpchdrcount;
- }
-
- if (msg->proghdr != NULL) {
- memcpy(&entry->proghdr[0], msg->proghdr,
- sizeof(struct iovec) * msg->proghdrcount);
- entry->proghdr_count = msg->proghdrcount;
- }
-
- if (msg->progpayload != NULL) {
- memcpy(&entry->prog_payload[0], msg->progpayload,
- sizeof(struct iovec) * msg->progpayloadcount);
- entry->prog_payload_count = msg->progpayloadcount;
- }
-
- if (msg->iobref != NULL) {
- entry->iobref = iobref_ref(msg->iobref);
- }
-
- INIT_LIST_HEAD(&entry->list);
-
-out:
- return entry;
-}
-
-int32_t
-gf_rdma_submit_request(rpc_transport_t *this, rpc_transport_req_t *req)
-{
- int32_t ret = 0;
- gf_rdma_ioq_t *entry = NULL;
- rpc_transport_data_t data;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_peer_t *peer = NULL;
-
- if (req == NULL) {
- goto out;
- }
-
- priv = this->private;
- if (priv == NULL) {
- ret = -1;
- goto out;
- }
-
- peer = &priv->peer;
- data.is_request = 1;
- data.data.req = *req;
- /*
- * when fist message is received on a transport, quota variable will
- * initiaize and quota_set will set to one. In gluster code client
- * process with respect to transport is the one who sends the first
- * message. Before settng quota_set variable if a submit request is
- * came on server, then the message should not send.
- */
-
- if (priv->entity == GF_RDMA_SERVER && peer->quota_set == 0) {
- ret = 0;
- goto out;
- }
-
- entry = gf_rdma_ioq_new(this, &data);
- if (entry == NULL) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_NEW_IOQ_ENTRY_FAILED,
- "getting a new ioq entry failed (peer:%s)",
- this->peerinfo.identifier);
- goto out;
- }
-
- ret = gf_rdma_writev(this, entry);
-
- if (ret > 0) {
- ret = 0;
- } else if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED,
- "sending request to peer (%s) failed",
- this->peerinfo.identifier);
- rpc_transport_disconnect(this, _gf_false);
- }
-
-out:
- return ret;
-}
-
-int32_t
-gf_rdma_submit_reply(rpc_transport_t *this, rpc_transport_reply_t *reply)
-{
- int32_t ret = 0;
- gf_rdma_ioq_t *entry = NULL;
- rpc_transport_data_t data;
-
- if (reply == NULL) {
- goto out;
- }
-
- data.data.reply = *reply;
-
- entry = gf_rdma_ioq_new(this, &data);
- if (entry == NULL) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_NEW_IOQ_ENTRY_FAILED,
- "getting a new ioq entry failed (peer:%s)",
- this->peerinfo.identifier);
- goto out;
- }
-
- ret = gf_rdma_writev(this, entry);
- if (ret > 0) {
- ret = 0;
- } else if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_WRITE_PEER_FAILED,
- "sending request to peer (%s) failed",
- this->peerinfo.identifier);
- rpc_transport_disconnect(this, _gf_false);
- }
-
-out:
- return ret;
-}
-
-static int
-gf_rdma_register_peer(gf_rdma_device_t *device, int32_t qp_num,
- gf_rdma_peer_t *peer)
-{
- struct _qpent *ent = NULL;
- gf_rdma_qpreg_t *qpreg = NULL;
- int32_t hash = 0;
- int ret = -1;
-
- qpreg = &device->qpreg;
- hash = qp_num % 42;
-
- pthread_mutex_lock(&qpreg->lock);
- {
- ent = qpreg->ents[hash].next;
- while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num)) {
- ent = ent->next;
- }
-
- if (ent->qp_num == qp_num) {
- ret = 0;
- goto unlock;
- }
-
- ent = (struct _qpent *)GF_CALLOC(1, sizeof(*ent), gf_common_mt_qpent);
- if (ent == NULL) {
- goto unlock;
- }
-
- /* TODO: ref reg->peer */
- ent->peer = peer;
- ent->next = &qpreg->ents[hash];
- ent->prev = ent->next->prev;
- ent->next->prev = ent;
- ent->prev->next = ent;
- ent->qp_num = qp_num;
- qpreg->count++;
- ret = 0;
- }
-unlock:
- pthread_mutex_unlock(&qpreg->lock);
-
- return ret;
-}
-
-static void
-gf_rdma_unregister_peer(gf_rdma_device_t *device, int32_t qp_num)
-{
- struct _qpent *ent = NULL;
- gf_rdma_qpreg_t *qpreg = NULL;
- int32_t hash = 0;
-
- qpreg = &device->qpreg;
- hash = qp_num % 42;
-
- pthread_mutex_lock(&qpreg->lock);
- {
- ent = qpreg->ents[hash].next;
- while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num))
- ent = ent->next;
- if ((ent->qp_num != qp_num) || (ent == &qpreg->ents[hash])) {
- pthread_mutex_unlock(&qpreg->lock);
- return;
- }
- ent->prev->next = ent->next;
- ent->next->prev = ent->prev;
- /* TODO: unref reg->peer */
- GF_FREE(ent);
- qpreg->count--;
- }
- pthread_mutex_unlock(&qpreg->lock);
-}
-
-static gf_rdma_peer_t *
-__gf_rdma_lookup_peer(gf_rdma_device_t *device, int32_t qp_num)
-{
- struct _qpent *ent = NULL;
- gf_rdma_peer_t *peer = NULL;
- gf_rdma_qpreg_t *qpreg = NULL;
- int32_t hash = 0;
-
- qpreg = &device->qpreg;
- hash = qp_num % 42;
- ent = qpreg->ents[hash].next;
- while ((ent != &qpreg->ents[hash]) && (ent->qp_num != qp_num))
- ent = ent->next;
-
- if (ent != &qpreg->ents[hash]) {
- peer = ent->peer;
- }
-
- return peer;
-}
-
-static void
-__gf_rdma_destroy_qp(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
-
- priv = this->private;
- if (priv->peer.qp) {
- gf_rdma_unregister_peer(priv->device, priv->peer.qp->qp_num);
- rdma_destroy_qp(priv->peer.cm_id);
- }
- priv->peer.qp = NULL;
-
- return;
-}
-
-static int32_t
-gf_rdma_create_qp(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
- int32_t ret = 0;
- gf_rdma_peer_t *peer = NULL;
- char *device_name = NULL;
-
- priv = this->private;
-
- peer = &priv->peer;
-
- device_name = (char *)ibv_get_device_name(peer->cm_id->verbs->device);
- if (device_name == NULL) {
- ret = -1;
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_GET_DEVICE_NAME_FAILED,
- "cannot get "
- "device_name");
- goto out;
- }
-
- device = gf_rdma_get_device(this, peer->cm_id->verbs, device_name);
- if (device == NULL) {
- ret = -1;
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_GET_DEVICE_FAILED,
- "cannot get device for "
- "device %s",
- device_name);
- goto out;
- }
-
- if (priv->device == NULL) {
- priv->device = device;
- }
-
- struct ibv_qp_init_attr init_attr = {
- .send_cq = device->send_cq,
- .recv_cq = device->recv_cq,
- .srq = device->srq,
- .cap = {.max_send_wr = peer->send_count,
- .max_recv_wr = peer->recv_count,
- .max_send_sge = 2,
- .max_recv_sge = 1},
- .qp_type = IBV_QPT_RC};
-
- ret = rdma_create_qp(peer->cm_id, device->pd, &init_attr);
- if (ret != 0) {
- gf_msg(peer->trans->name, GF_LOG_CRITICAL, errno,
- RDMA_MSG_CREAT_QP_FAILED, "%s: could not create QP", this->name);
- ret = -1;
- goto out;
- }
-
- peer->qp = peer->cm_id->qp;
-
- ret = gf_rdma_register_peer(device, peer->qp->qp_num, peer);
-
-out:
- if (ret == -1)
- __gf_rdma_destroy_qp(this);
-
- return ret;
-}
-
-static int32_t
-__gf_rdma_teardown(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_peer_t *peer = NULL;
-
- priv = this->private;
- peer = &priv->peer;
-
- if (peer->cm_id && peer->cm_id->qp != NULL) {
- __gf_rdma_destroy_qp(this);
- }
-
- if (!list_empty(&priv->peer.ioq)) {
- __gf_rdma_ioq_flush(peer);
- }
-
- if (peer->cm_id != NULL) {
- rdma_destroy_id(peer->cm_id);
- peer->cm_id = NULL;
- }
-
- /* TODO: decrement cq size */
- return 0;
-}
-
-static int32_t
-gf_rdma_teardown(rpc_transport_t *this)
-{
- int32_t ret = 0;
- gf_rdma_private_t *priv = NULL;
-
- if (this == NULL) {
- goto out;
- }
-
- priv = this->private;
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- ret = __gf_rdma_teardown(this);
- }
- pthread_mutex_unlock(&priv->write_mutex);
-
-out:
- return ret;
-}
-
-/*
- * allocates new memory to hold write-chunklist. New memory is needed since
- * write-chunklist will be used while sending reply and the post holding initial
- * write-chunklist sent from client will be put back to srq before a pollin
- * event is sent to upper layers.
- */
-int32_t
-gf_rdma_get_write_chunklist(char **ptr, gf_rdma_write_array_t **write_ary)
-{
- gf_rdma_write_array_t *from = NULL, *to = NULL;
- int32_t ret = -1, size = 0, i = 0;
-
- from = (gf_rdma_write_array_t *)*ptr;
- if (from->wc_discrim == 0) {
- ret = 0;
- goto out;
- }
-
- from->wc_nchunks = ntoh32(from->wc_nchunks);
-
- size = sizeof(*from) + (sizeof(gf_rdma_write_chunk_t) * from->wc_nchunks);
-
- to = GF_CALLOC(1, size, gf_common_mt_char);
- if (to == NULL) {
- ret = -1;
- goto out;
- }
-
- to->wc_discrim = ntoh32(from->wc_discrim);
- to->wc_nchunks = from->wc_nchunks;
-
- for (i = 0; i < to->wc_nchunks; i++) {
- to->wc_array[i].wc_target.rs_handle = ntoh32(
- from->wc_array[i].wc_target.rs_handle);
- to->wc_array[i].wc_target.rs_length = ntoh32(
- from->wc_array[i].wc_target.rs_length);
- to->wc_array[i].wc_target.rs_offset = ntoh64(
- from->wc_array[i].wc_target.rs_offset);
- }
-
- *write_ary = to;
- ret = 0;
- *ptr = (char *)&from->wc_array[i].wc_target.rs_handle;
-out:
- return ret;
-}
-
-/*
- * does not allocate new memory to hold read-chunklist. New memory is not
- * needed, since post is not put back to srq till we've completed all the
- * rdma-reads and hence readchunk-list can point to memory held by post.
- */
-int32_t
-gf_rdma_get_read_chunklist(char **ptr, gf_rdma_read_chunk_t **readch)
-{
- int32_t ret = -1;
- gf_rdma_read_chunk_t *chunk = NULL;
- int i = 0;
-
- chunk = (gf_rdma_read_chunk_t *)*ptr;
- if (chunk[0].rc_discrim == 0) {
- ret = 0;
- goto out;
- }
-
- for (i = 0; chunk[i].rc_discrim != 0; i++) {
- chunk[i].rc_discrim = ntoh32(chunk[i].rc_discrim);
- chunk[i].rc_position = ntoh32(chunk[i].rc_position);
- chunk[i].rc_target.rs_handle = ntoh32(chunk[i].rc_target.rs_handle);
- chunk[i].rc_target.rs_length = ntoh32(chunk[i].rc_target.rs_length);
- chunk[i].rc_target.rs_offset = ntoh64(chunk[i].rc_target.rs_offset);
- }
-
- *readch = &chunk[0];
- ret = 0;
- *ptr = (char *)&chunk[i].rc_discrim;
-out:
- return ret;
-}
-
-static int32_t
-gf_rdma_decode_error_msg(gf_rdma_peer_t *peer, gf_rdma_post_t *post,
- size_t bytes_in_post)
-{
- gf_rdma_header_t *header = NULL;
- struct iobuf *iobuf = NULL;
- struct iobref *iobref = NULL;
- int32_t ret = -1;
- struct rpc_msg rpc_msg = {
- 0,
- };
-
- header = (gf_rdma_header_t *)post->buf;
- header->rm_body.rm_error.rm_type = ntoh32(header->rm_body.rm_error.rm_type);
- if (header->rm_body.rm_error.rm_type == ERR_VERS) {
- header->rm_body.rm_error.rm_version.gf_rdma_vers_low = ntoh32(
- header->rm_body.rm_error.rm_version.gf_rdma_vers_low);
- header->rm_body.rm_error.rm_version.gf_rdma_vers_high = ntoh32(
- header->rm_body.rm_error.rm_version.gf_rdma_vers_high);
- }
-
- rpc_msg.rm_xid = header->rm_xid;
- rpc_msg.rm_direction = REPLY;
- rpc_msg.rm_reply.rp_stat = MSG_DENIED;
-
- iobuf = iobuf_get2(peer->trans->ctx->iobuf_pool, bytes_in_post);
- if (iobuf == NULL) {
- ret = -1;
- goto out;
- }
-
- post->ctx.iobref = iobref = iobref_new();
- if (iobref == NULL) {
- ret = -1;
- goto out;
- }
-
- ret = rpc_reply_to_xdr(&rpc_msg, iobuf_ptr(iobuf), iobuf_pagesize(iobuf),
- &post->ctx.vector[0]);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_RPC_REPLY_CREATE_FAILED,
- "Failed to create "
- "RPC reply");
- goto out;
- }
-
- iobref_add(iobref, iobuf);
- iobuf_unref(iobuf);
-
- post->ctx.count = 1;
-
- iobuf = NULL;
- iobref = NULL;
-
-out:
- if (ret == -1) {
- if (iobuf != NULL) {
- iobuf_unref(iobuf);
- }
-
- if (iobref != NULL) {
- iobref_unref(iobref);
- }
- }
-
- return 0;
-}
-
-int32_t
-gf_rdma_decode_msg(gf_rdma_peer_t *peer, gf_rdma_post_t *post,
- gf_rdma_read_chunk_t **readch, size_t bytes_in_post)
-{
- int32_t ret = -1;
- gf_rdma_header_t *header = NULL;
- gf_rdma_reply_info_t *reply_info = NULL;
- char *ptr = NULL;
- gf_rdma_write_array_t *write_ary = NULL;
- size_t header_len = 0;
-
- header = (gf_rdma_header_t *)post->buf;
-
- ptr = (char *)&header->rm_body.rm_chunks[0];
-
- ret = gf_rdma_get_read_chunklist(&ptr, readch);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_GET_READ_CHUNK_FAILED,
- "cannot get read "
- "chunklist from msg");
- goto out;
- }
-
- /* skip terminator of read-chunklist */
- ptr = ptr + sizeof(uint32_t);
-
- ret = gf_rdma_get_write_chunklist(&ptr, &write_ary);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_GET_WRITE_CHUNK_FAILED,
- "cannot get write "
- "chunklist from msg");
- goto out;
- }
-
- /* skip terminator of write-chunklist */
- ptr = ptr + sizeof(uint32_t);
-
- if (write_ary != NULL) {
- reply_info = gf_rdma_reply_info_alloc(peer);
- if (reply_info == NULL) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_REPLY_INFO_ALLOC_FAILED, "reply_info_alloc failed");
- ret = -1;
- goto out;
- }
-
- reply_info->type = gf_rdma_writech;
- reply_info->wc_array = write_ary;
- reply_info->rm_xid = header->rm_xid;
- } else {
- ret = gf_rdma_get_write_chunklist(&ptr, &write_ary);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_CHUNKLIST_ERROR,
- "cannot get reply "
- "chunklist from msg");
- goto out;
- }
-
- if (write_ary != NULL) {
- reply_info = gf_rdma_reply_info_alloc(peer);
- if (reply_info == NULL) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_REPLY_INFO_ALLOC_FAILED,
- "reply_info_alloc_failed");
- ret = -1;
- goto out;
- }
-
- reply_info->type = gf_rdma_replych;
- reply_info->wc_array = write_ary;
- reply_info->rm_xid = header->rm_xid;
- }
- }
-
- /* skip terminator of reply chunk */
- ptr = ptr + sizeof(uint32_t);
- if (header->rm_type != GF_RDMA_NOMSG) {
- header_len = (long)ptr - (long)post->buf;
- post->ctx.vector[0].iov_len = (bytes_in_post - header_len);
-
- post->ctx.hdr_iobuf = iobuf_get2(peer->trans->ctx->iobuf_pool,
- (bytes_in_post - header_len));
- if (post->ctx.hdr_iobuf == NULL) {
- ret = -1;
- goto out;
- }
-
- post->ctx.vector[0].iov_base = iobuf_ptr(post->ctx.hdr_iobuf);
- memcpy(post->ctx.vector[0].iov_base, ptr, post->ctx.vector[0].iov_len);
- post->ctx.count = 1;
- }
-
- post->ctx.reply_info = reply_info;
-out:
- if (ret == -1) {
- if (*readch != NULL) {
- GF_FREE(*readch);
- *readch = NULL;
- }
- if (reply_info)
- GF_FREE(reply_info);
- GF_FREE(write_ary);
- }
-
- return ret;
-}
-
-/* Assumes only one of either write-chunklist or a reply chunk is present */
-int32_t
-gf_rdma_decode_header(gf_rdma_peer_t *peer, gf_rdma_post_t *post,
- gf_rdma_read_chunk_t **readch, size_t bytes_in_post)
-{
- int32_t ret = -1;
- gf_rdma_header_t *header = NULL;
-
- header = (gf_rdma_header_t *)post->buf;
-
- header->rm_xid = ntoh32(header->rm_xid);
- header->rm_vers = ntoh32(header->rm_vers);
- header->rm_credit = ntoh32(header->rm_credit);
- header->rm_type = ntoh32(header->rm_type);
-
- switch (header->rm_type) {
- case GF_RDMA_MSG:
- case GF_RDMA_NOMSG:
- ret = gf_rdma_decode_msg(peer, post, readch, bytes_in_post);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_ENCODE_ERROR,
- "cannot decode msg of "
- "type (%d)",
- header->rm_type);
- }
-
- break;
-
- case GF_RDMA_MSGP:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY,
- "rdma msg of msg-type "
- "GF_RDMA_MSGP should not have been received");
- ret = -1;
- break;
-
- case GF_RDMA_DONE:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY,
- "rdma msg of msg-type "
- "GF_RDMA_DONE should not have been received");
- ret = -1;
- break;
-
- case GF_RDMA_ERROR:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_RDMA_ERROR_RECEIVED,
- "received a msg of type"
- " RDMA_ERROR");
- ret = gf_rdma_decode_error_msg(peer, post, bytes_in_post);
- break;
-
- default:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY,
- "unknown rdma msg-type (%d)", header->rm_type);
- }
-
- return ret;
-}
-
-int32_t
-gf_rdma_do_reads(gf_rdma_peer_t *peer, gf_rdma_post_t *post,
- gf_rdma_read_chunk_t *readch)
-{
- int32_t ret = -1, i = 0, count = 0;
- size_t size = 0;
- char *ptr = NULL;
- struct iobuf *iobuf = NULL;
- gf_rdma_private_t *priv = NULL;
- struct ibv_sge *list = NULL;
- struct ibv_send_wr *wr = NULL, *bad_wr = NULL;
- int total_ref = 0;
- priv = peer->trans->private;
-
- for (i = 0; readch[i].rc_discrim != 0; i++) {
- size += readch[i].rc_target.rs_length;
- }
-
- if (i == 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_CHUNK_TYPE,
- "message type specified "
- "as rdma-read but there are no rdma read-chunks "
- "present");
- goto out;
- }
-
- post->ctx.gf_rdma_reads = i;
- i = 0;
- iobuf = iobuf_get2(peer->trans->ctx->iobuf_pool, size);
- if (iobuf == NULL) {
- goto out;
- }
-
- if (post->ctx.iobref == NULL) {
- post->ctx.iobref = iobref_new();
- if (post->ctx.iobref == NULL) {
- iobuf_unref(iobuf);
- iobuf = NULL;
- goto out;
- }
- }
-
- ptr = iobuf_ptr(iobuf);
- iobref_add(post->ctx.iobref, iobuf);
- iobuf_unref(iobuf);
-
- iobuf = NULL;
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- if (!priv->connected) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_PEER_DISCONNECTED,
- "transport not "
- "connected to peer (%s), not doing rdma reads",
- peer->trans->peerinfo.identifier);
- goto unlock;
- }
-
- list = GF_CALLOC(post->ctx.gf_rdma_reads, sizeof(struct ibv_sge),
- gf_common_mt_sge);
-
- if (list == NULL) {
- errno = ENOMEM;
- ret = -1;
- goto unlock;
- }
- wr = GF_CALLOC(post->ctx.gf_rdma_reads, sizeof(struct ibv_send_wr),
- gf_common_mt_wr);
- if (wr == NULL) {
- errno = ENOMEM;
- ret = -1;
- goto unlock;
- }
- for (i = 0; readch[i].rc_discrim != 0; i++) {
- count = post->ctx.count++;
- post->ctx.vector[count].iov_base = ptr;
- post->ctx.vector[count].iov_len = readch[i].rc_target.rs_length;
-
- ret = __gf_rdma_register_local_mr_for_rdma(
- peer, &post->ctx.vector[count], 1, &post->ctx);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_MR_ALOC_FAILED,
- "registering local memory"
- " for rdma read failed");
- goto unlock;
- }
-
- list[i].addr = (unsigned long)post->ctx.vector[count].iov_base;
- list[i].length = post->ctx.vector[count].iov_len;
- list[i].lkey = post->ctx.mr[post->ctx.mr_count - 1]->lkey;
-
- wr[i].wr_id = (unsigned long)gf_rdma_post_ref(post);
- wr[i].sg_list = &list[i];
- wr[i].next = &wr[i + 1];
- wr[i].num_sge = 1;
- wr[i].opcode = IBV_WR_RDMA_READ;
- wr[i].send_flags = IBV_SEND_SIGNALED;
- wr[i].wr.rdma.remote_addr = readch[i].rc_target.rs_offset;
- wr[i].wr.rdma.rkey = readch[i].rc_target.rs_handle;
-
- ptr += readch[i].rc_target.rs_length;
- total_ref++;
- }
- wr[i - 1].next = NULL;
- ret = ibv_post_send(peer->qp, wr, &bad_wr);
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_READ_CLIENT_ERROR,
- "rdma read from "
- "client (%s) failed with ret = %d (%s)",
- peer->trans->peerinfo.identifier, ret,
- (ret > 0) ? strerror(ret) : "");
-
- if (!bad_wr) {
- ret = -1;
- goto unlock;
- }
-
- for (i = 0; i < post->ctx.gf_rdma_reads; i++) {
- if (&wr[i] != bad_wr)
- total_ref--;
- else
- break;
- }
-
- ret = -1;
- }
- }
-unlock:
- pthread_mutex_unlock(&priv->write_mutex);
-out:
- if (list)
- GF_FREE(list);
- if (wr)
- GF_FREE(wr);
-
- if (ret == -1) {
- while (total_ref-- > 0)
- gf_rdma_post_unref(post);
- }
-
- return ret;
-}
-
-int32_t
-gf_rdma_pollin_notify(gf_rdma_peer_t *peer, gf_rdma_post_t *post)
-{
- int32_t ret = -1;
- enum msg_type msg_type = 0;
- struct rpc_req *rpc_req = NULL;
- gf_rdma_request_context_t *request_context = NULL;
- rpc_request_info_t request_info = {
- 0,
- };
- gf_rdma_private_t *priv = NULL;
- uint32_t *ptr = NULL;
- rpc_transport_pollin_t *pollin = NULL;
-
- if ((peer == NULL) || (post == NULL)) {
- goto out;
- }
-
- if (post->ctx.iobref == NULL) {
- post->ctx.iobref = iobref_new();
- if (post->ctx.iobref == NULL) {
- goto out;
- }
-
- /* handling the case where both hdr and payload of
- * GF_FOP_READ_CBK were received in a single iobuf
- * because of server sending entire msg as inline without
- * doing rdma writes.
- */
- if (post->ctx.hdr_iobuf)
- iobref_add(post->ctx.iobref, post->ctx.hdr_iobuf);
- }
-
- pollin = rpc_transport_pollin_alloc(peer->trans, post->ctx.vector,
- post->ctx.count, post->ctx.hdr_iobuf,
- post->ctx.iobref, post->ctx.reply_info);
- if (pollin == NULL) {
- goto out;
- }
-
- ptr = (uint32_t *)pollin->vector[0].iov_base;
-
- request_info.xid = ntoh32(*ptr);
- msg_type = ntoh32(*(ptr + 1));
-
- if (msg_type == REPLY) {
- ret = rpc_transport_notify(peer->trans, RPC_TRANSPORT_MAP_XID_REQUEST,
- &request_info);
- if (ret == -1) {
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "cannot get request"
- "information from rpc layer");
- goto out;
- }
-
- rpc_req = request_info.rpc_req;
- if (rpc_req == NULL) {
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "rpc request "
- "structure not found");
- ret = -1;
- goto out;
- }
-
- request_context = rpc_req->conn_private;
- rpc_req->conn_private = NULL;
-
- priv = peer->trans->private;
- if (request_context != NULL) {
- pthread_mutex_lock(&priv->write_mutex);
- {
- __gf_rdma_request_context_destroy(request_context);
- }
- pthread_mutex_unlock(&priv->write_mutex);
- } else {
- gf_rdma_quota_put(peer);
- }
-
- pollin->is_reply = 1;
- }
-
- ret = rpc_transport_notify(peer->trans, RPC_TRANSPORT_MSG_RECEIVED, pollin);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, TRANS_MSG_TRANSPORT_ERROR,
- "transport_notify failed");
- }
-
-out:
- if (pollin != NULL) {
- pollin->private = NULL;
- rpc_transport_pollin_destroy(pollin);
- }
-
- return ret;
-}
-
-int32_t
-gf_rdma_recv_reply(gf_rdma_peer_t *peer, gf_rdma_post_t *post)
-{
- int32_t ret = -1;
- gf_rdma_header_t *header = NULL;
- gf_rdma_reply_info_t *reply_info = NULL;
- gf_rdma_write_array_t *wc_array = NULL;
- int i = 0;
- uint32_t *ptr = NULL;
- gf_rdma_request_context_t *ctx = NULL;
- rpc_request_info_t request_info = {
- 0,
- };
- struct rpc_req *rpc_req = NULL;
-
- header = (gf_rdma_header_t *)post->buf;
- reply_info = post->ctx.reply_info;
-
- /* no write chunklist, just notify upper layers */
- if (reply_info == NULL) {
- ret = 0;
- goto out;
- }
-
- wc_array = reply_info->wc_array;
-
- if (header->rm_type == GF_RDMA_NOMSG) {
- post->ctx.vector[0].iov_base = (void *)(long)wc_array->wc_array[0]
- .wc_target.rs_offset;
- post->ctx.vector[0].iov_len = wc_array->wc_array[0].wc_target.rs_length;
-
- post->ctx.count = 1;
- } else {
- for (i = 0; i < wc_array->wc_nchunks; i++) {
- post->ctx.vector[i + 1].iov_base =
- (void *)(long)wc_array->wc_array[i].wc_target.rs_offset;
- post->ctx.vector[i + 1].iov_len = wc_array->wc_array[i]
- .wc_target.rs_length;
- }
-
- post->ctx.count += wc_array->wc_nchunks;
- }
-
- ptr = (uint32_t *)post->ctx.vector[0].iov_base;
- request_info.xid = ntoh32(*ptr);
-
- ret = rpc_transport_notify(peer->trans, RPC_TRANSPORT_MAP_XID_REQUEST,
- &request_info);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, TRANS_MSG_TRANSPORT_ERROR,
- "cannot get request "
- "information (peer:%s) from rpc layer",
- peer->trans->peerinfo.identifier);
- goto out;
- }
-
- rpc_req = request_info.rpc_req;
- if (rpc_req == NULL) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_RPC_ST_ERROR,
- "rpc request structure not "
- "found");
- ret = -1;
- goto out;
- }
-
- ctx = rpc_req->conn_private;
- if ((post->ctx.iobref == NULL) && ctx->rsp_iobref) {
- post->ctx.iobref = iobref_ref(ctx->rsp_iobref);
- }
-
- ret = 0;
-
- gf_rdma_reply_info_destroy(reply_info);
-
-out:
- if (ret == 0) {
- ret = gf_rdma_pollin_notify(peer, post);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_POLL_IN_NOTIFY_FAILED, "pollin notify failed");
- }
- }
-
- return ret;
-}
-
-static int32_t
-gf_rdma_recv_request(gf_rdma_peer_t *peer, gf_rdma_post_t *post,
- gf_rdma_read_chunk_t *readch)
-{
- int32_t ret = -1;
-
- if (readch != NULL) {
- ret = gf_rdma_do_reads(peer, post, readch);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_PEER_READ_FAILED, "rdma read from peer (%s) failed",
- peer->trans->peerinfo.identifier);
- }
- } else {
- ret = gf_rdma_pollin_notify(peer, post);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_POLL_IN_NOTIFY_FAILED,
- "pollin notification failed");
- }
- }
-
- return ret;
-}
-
-void
-gf_rdma_process_recv(gf_rdma_peer_t *peer, struct ibv_wc *wc)
-{
- gf_rdma_post_t *post = NULL;
- gf_rdma_read_chunk_t *readch = NULL;
- int ret = -1;
- uint32_t *ptr = NULL;
- enum msg_type msg_type = 0;
- gf_rdma_header_t *header = NULL;
- gf_rdma_private_t *priv = NULL;
-
- post = (gf_rdma_post_t *)(long)wc->wr_id;
- if (post == NULL) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_POST_MISSING,
- "no post found in successful "
- "work completion element");
- goto out;
- }
-
- ret = gf_rdma_decode_header(peer, post, &readch, wc->byte_len);
- if (ret == -1) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_HEADER_DECODE_FAILED,
- "decoding of header "
- "failed");
- goto out;
- }
-
- header = (gf_rdma_header_t *)post->buf;
-
- priv = peer->trans->private;
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- if (!priv->peer.quota_set) {
- priv->peer.quota_set = 1;
-
- /* Initially peer.quota is set to 1 as per RFC 5666. We
- * have to account for the quota used while sending
- * first msg (which may or may not be returned to pool
- * at this point) while deriving peer.quota from
- * header->rm_credit. Hence the arithmetic below,
- * instead of directly setting it to header->rm_credit.
- */
- priv->peer.quota = header->rm_credit - (1 - priv->peer.quota);
- }
- }
- pthread_mutex_unlock(&priv->write_mutex);
-
- switch (header->rm_type) {
- case GF_RDMA_MSG:
- ptr = (uint32_t *)post->ctx.vector[0].iov_base;
- msg_type = ntoh32(*(ptr + 1));
- break;
-
- case GF_RDMA_NOMSG:
- if (readch != NULL) {
- msg_type = CALL;
- } else {
- msg_type = REPLY;
- }
- break;
-
- case GF_RDMA_ERROR:
- if (header->rm_body.rm_error.rm_type == ERR_CHUNK) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_RDMA_ERROR_RECEIVED,
- "peer (%s), couldn't encode or decode the msg "
- "properly or write chunks were not provided "
- "for replies that were bigger than "
- "RDMA_INLINE_THRESHOLD (%d)",
- peer->trans->peerinfo.identifier,
- GLUSTERFS_RDMA_INLINE_THRESHOLD);
- ret = gf_rdma_pollin_notify(peer, post);
- if (ret == -1) {
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "pollin "
- "notification failed");
- }
- goto out;
- } else {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0,
- TRANS_MSG_TRANSPORT_ERROR,
- "an error has "
- "happened while transmission of msg, "
- "disconnecting the transport");
- ret = -1;
- goto out;
- }
-
- default:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_INVALID_ENTRY,
- "invalid rdma msg-type (%d)", header->rm_type);
- goto out;
- }
-
- if (msg_type == CALL) {
- ret = gf_rdma_recv_request(peer, post, readch);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_PEER_REQ_FAILED,
- "receiving a request"
- " from peer (%s) failed",
- peer->trans->peerinfo.identifier);
- }
- } else {
- ret = gf_rdma_recv_reply(peer, post);
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_PEER_REP_FAILED,
- "receiving a reply "
- "from peer (%s) failed",
- peer->trans->peerinfo.identifier);
- }
- }
-
-out:
- if (ret == -1) {
- rpc_transport_disconnect(peer->trans, _gf_false);
- }
-
- return;
-}
-
-void *
-gf_rdma_async_event_thread(void *context)
-{
- struct ibv_async_event event;
- int ret;
-
- while (1) {
- do {
- ret = ibv_get_async_event((struct ibv_context *)context, &event);
-
- if (ret && errno != EINTR) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno,
- RDMA_MSG_EVENT_ERROR,
- "Error getting "
- "event");
- }
- } while (ret && errno == EINTR);
-
- switch (event.event_type) {
- case IBV_EVENT_SRQ_LIMIT_REACHED:
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_EVENT_SRQ_LIMIT_REACHED,
- "received "
- "srq_limit reached");
- break;
-
- default:
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "event (%d) "
- "received",
- event.event_type);
- break;
- }
-
- ibv_ack_async_event(&event);
- }
-
- return 0;
-}
-
-static void *
-gf_rdma_recv_completion_proc(void *data)
-{
- struct ibv_comp_channel *chan = NULL;
- gf_rdma_device_t *device = NULL;
- ;
- gf_rdma_post_t *post = NULL;
- gf_rdma_peer_t *peer = NULL;
- struct ibv_cq *event_cq = NULL;
- struct ibv_wc wc[10] = {
- {0},
- };
- void *event_ctx = NULL;
- int32_t ret = 0;
- int32_t num_wr = 0, index = 0;
- uint8_t failed = 0;
-
- chan = data;
-
- while (1) {
- failed = 0;
- ret = ibv_get_cq_event(chan, &event_cq, &event_ctx);
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno,
- RDMA_MSG_IBV_GET_CQ_FAILED,
- "ibv_get_cq_event failed, terminating recv "
- "thread %d (%d)",
- ret, errno);
- continue;
- }
-
- device = event_ctx;
-
- ret = ibv_req_notify_cq(event_cq, 0);
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno,
- RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED,
- "ibv_req_notify_cq on %s failed, terminating "
- "recv thread: %d (%d)",
- device->device_name, ret, errno);
- continue;
- }
-
- device = (gf_rdma_device_t *)event_ctx;
-
- while (!failed && (num_wr = ibv_poll_cq(event_cq, 10, wc)) > 0) {
- for (index = 0; index < num_wr && !failed; index++) {
- post = (gf_rdma_post_t *)(long)wc[index].wr_id;
-
- pthread_mutex_lock(&device->qpreg.lock);
- {
- peer = __gf_rdma_lookup_peer(device, wc[index].qp_num);
-
- /*
- * keep a refcount on transport so that it
- * does not get freed because of some error
- * indicated by wc.status till we are done
- * with usage of peer and thereby that of
- * trans.
- */
- if (peer != NULL) {
- rpc_transport_ref(peer->trans);
- }
- }
- pthread_mutex_unlock(&device->qpreg.lock);
-
- if (wc[index].status != IBV_WC_SUCCESS) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, 0,
- RDMA_MSG_RECV_ERROR,
- "recv work "
- "request on `%s' returned error (%d)",
- device->device_name, wc[index].status);
- failed = 1;
- if (peer) {
- ibv_ack_cq_events(event_cq, num_wr);
- rpc_transport_disconnect(peer->trans, _gf_false);
- rpc_transport_unref(peer->trans);
- }
-
- if (post) {
- gf_rdma_post_unref(post);
- }
-
- continue;
- }
-
- if (peer) {
- gf_rdma_process_recv(peer, &wc[index]);
- rpc_transport_unref(peer->trans);
- } else {
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "could not lookup peer "
- "for qp_num: %d",
- wc[index].qp_num);
- }
-
- gf_rdma_post_unref(post);
- }
- }
-
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno,
- RDMA_MSG_IBV_POLL_CQ_ERROR,
- "ibv_poll_cq on `%s' returned error "
- "(ret = %d, errno = %d)",
- device->device_name, ret, errno);
- continue;
- }
- if (!failed)
- ibv_ack_cq_events(event_cq, num_wr);
- }
-
- return NULL;
-}
-
-void
-gf_rdma_handle_failed_send_completion(gf_rdma_peer_t *peer, struct ibv_wc *wc)
-{
- gf_rdma_post_t *post = NULL;
- gf_rdma_device_t *device = NULL;
- gf_rdma_private_t *priv = NULL;
-
- if (peer != NULL) {
- priv = peer->trans->private;
- if (priv != NULL) {
- device = priv->device;
- }
- }
-
- post = (gf_rdma_post_t *)(long)wc->wr_id;
-
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0, RDMA_MSG_RDMA_HANDLE_FAILED,
- "send work request on `%s' returned error "
- "wc.status = %d, wc.vendor_err = %d, post->buf = %p, "
- "wc.byte_len = %d, post->reused = %d",
- (device != NULL) ? device->device_name : "", wc->status,
- wc->vendor_err, post->buf, wc->byte_len, post->reused);
-
- if (wc->status == IBV_WC_RETRY_EXC_ERR) {
- gf_msg("rdma", GF_LOG_ERROR, 0, TRANS_MSG_TIMEOUT_EXCEEDED,
- "connection between client and server not working. "
- "check by running 'ibv_srq_pingpong'. also make sure "
- "subnet manager is running (eg: 'opensm'), or check "
- "if rdma port is valid (or active) by running "
- "'ibv_devinfo'. contact Gluster Support Team if the "
- "problem persists.");
- }
-
- if (peer) {
- rpc_transport_disconnect(peer->trans, _gf_false);
- }
-
- return;
-}
-
-void
-gf_rdma_handle_successful_send_completion(gf_rdma_peer_t *peer,
- struct ibv_wc *wc)
-{
- gf_rdma_post_t *post = NULL;
- int reads = 0, ret = 0;
- gf_rdma_header_t *header = NULL;
-
- if (wc->opcode != IBV_WC_RDMA_READ) {
- goto out;
- }
-
- post = (gf_rdma_post_t *)(long)wc->wr_id;
-
- pthread_mutex_lock(&post->lock);
- {
- reads = --post->ctx.gf_rdma_reads;
- }
- pthread_mutex_unlock(&post->lock);
-
- if (reads != 0) {
- /* if it is not the last rdma read, we've got nothing to do */
- goto out;
- }
-
- header = (gf_rdma_header_t *)post->buf;
-
- if (header->rm_type == GF_RDMA_NOMSG) {
- post->ctx.count = 1;
- post->ctx.vector[0].iov_len += post->ctx.vector[1].iov_len;
- }
- /*
- * if reads performed as vectored, then all the buffers are actually
- * contiguous memory, so that we can use it as single vector, instead
- * of multiple.
- */
- while (post->ctx.count > 2) {
- post->ctx.vector[1].iov_len += post->ctx.vector[post->ctx.count - 1]
- .iov_len;
- post->ctx.count--;
- }
-
- ret = gf_rdma_pollin_notify(peer, post);
- if ((ret == -1) && (peer != NULL)) {
- rpc_transport_disconnect(peer->trans, _gf_false);
- }
-
-out:
- return;
-}
-
-static void *
-gf_rdma_send_completion_proc(void *data)
-{
- struct ibv_comp_channel *chan = NULL;
- gf_rdma_post_t *post = NULL;
- gf_rdma_peer_t *peer = NULL;
- struct ibv_cq *event_cq = NULL;
- void *event_ctx = NULL;
- gf_rdma_device_t *device = NULL;
- struct ibv_wc wc[10] = {
- {0},
- };
- char is_request = 0;
- int32_t ret = 0, quota_ret = 0, num_wr = 0;
- int32_t index = 0, failed = 0;
- chan = data;
- while (1) {
- failed = 0;
- ret = ibv_get_cq_event(chan, &event_cq, &event_ctx);
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno,
- RDMA_MSG_IBV_GET_CQ_FAILED,
- "ibv_get_cq_event on failed, terminating "
- "send thread: %d (%d)",
- ret, errno);
- continue;
- }
-
- device = event_ctx;
-
- ret = ibv_req_notify_cq(event_cq, 0);
- if (ret) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno,
- RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED,
- "ibv_req_notify_cq on %s failed, terminating "
- "send thread: %d (%d)",
- device->device_name, ret, errno);
- continue;
- }
-
- while (!failed && (num_wr = ibv_poll_cq(event_cq, 10, wc)) > 0) {
- for (index = 0; index < num_wr && !failed; index++) {
- post = (gf_rdma_post_t *)(long)wc[index].wr_id;
-
- pthread_mutex_lock(&device->qpreg.lock);
- {
- peer = __gf_rdma_lookup_peer(device, wc[index].qp_num);
-
- /*
- * keep a refcount on transport so that it
- * does not get freed because of some error
- * indicated by wc.status, till we are done
- * with usage of peer and thereby that of trans.
- */
- if (peer != NULL) {
- rpc_transport_ref(peer->trans);
- }
- }
- pthread_mutex_unlock(&device->qpreg.lock);
-
- if (wc[index].status != IBV_WC_SUCCESS) {
- ibv_ack_cq_events(event_cq, num_wr);
- failed = 1;
- gf_rdma_handle_failed_send_completion(peer, &wc[index]);
- } else {
- gf_rdma_handle_successful_send_completion(peer, &wc[index]);
- }
-
- if (post) {
- is_request = post->ctx.is_request;
-
- ret = gf_rdma_post_unref(post);
- if ((ret == 0) && (wc[index].status == IBV_WC_SUCCESS) &&
- !is_request && (post->type == GF_RDMA_SEND_POST) &&
- (peer != NULL)) {
- /* An GF_RDMA_RECV_POST can end up in
- * gf_rdma_send_completion_proc for
- * rdma-reads, and we do not take
- * quota for getting an GF_RDMA_RECV_POST.
- */
-
- /*
- * if it is request, quota is returned
- * after reply has come.
- */
- quota_ret = gf_rdma_quota_put(peer);
- if (quota_ret < 0) {
- gf_msg_debug("rdma", 0,
- "failed to send "
- "message");
- }
- }
- }
-
- if (peer) {
- rpc_transport_unref(peer->trans);
- } else {
- gf_msg_debug(GF_RDMA_LOG_NAME, 0,
- "could not lookup peer for qp_num: %d",
- wc[index].qp_num);
- }
- }
- }
-
- if (ret < 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_ERROR, errno,
- RDMA_MSG_IBV_POLL_CQ_ERROR,
- "ibv_poll_cq on `%s' returned error (ret = %d,"
- " errno = %d)",
- device->device_name, ret, errno);
- continue;
- }
- if (!failed)
- ibv_ack_cq_events(event_cq, num_wr);
- }
-
- return NULL;
-}
-
-static void
-gf_rdma_options_init(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- int32_t mtu = 0;
- data_t *temp = NULL;
-
- /* TODO: validate arguments from options below */
-
- priv = this->private;
- options = &priv->options;
- options->send_size =
- GLUSTERFS_RDMA_INLINE_THRESHOLD; /*this->ctx->page_size * 4; 512 KB*/
- options->recv_size =
- GLUSTERFS_RDMA_INLINE_THRESHOLD; /*this->ctx->page_size * 4; 512 KB*/
- options->send_count = 4096;
- options->recv_count = 4096;
- options->attr_timeout = GF_RDMA_TIMEOUT;
- options->attr_retry_cnt = GF_RDMA_RETRY_CNT;
- options->attr_rnr_retry = GF_RDMA_RNR_RETRY;
-
- temp = dict_get(this->options, "transport.listen-backlog");
- if (temp)
- options->backlog = data_to_uint32(temp);
- else
- options->backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG;
-
- temp = dict_get(this->options, "transport.rdma.work-request-send-count");
- if (temp)
- options->send_count = data_to_int32(temp);
-
- temp = dict_get(this->options, "transport.rdma.work-request-recv-count");
- if (temp)
- options->recv_count = data_to_int32(temp);
-
- temp = dict_get(this->options, "transport.rdma.attr-timeout");
-
- if (temp)
- options->attr_timeout = data_to_uint8(temp);
-
- temp = dict_get(this->options, "transport.rdma.attr-retry-cnt");
-
- if (temp)
- options->attr_retry_cnt = data_to_uint8(temp);
-
- temp = dict_get(this->options, "transport.rdma.attr-rnr-retry");
-
- if (temp)
- options->attr_rnr_retry = data_to_uint8(temp);
-
- options->port = 1;
- temp = dict_get(this->options, "transport.rdma.port");
- if (temp)
- options->port = data_to_uint64(temp);
-
- options->mtu = mtu = IBV_MTU_2048;
- temp = dict_get(this->options, "transport.rdma.mtu");
- if (temp)
- mtu = data_to_int32(temp);
- switch (mtu) {
- case 256:
- options->mtu = IBV_MTU_256;
- break;
-
- case 512:
- options->mtu = IBV_MTU_512;
- break;
-
- case 1024:
- options->mtu = IBV_MTU_1024;
- break;
-
- case 2048:
- options->mtu = IBV_MTU_2048;
- break;
-
- case 4096:
- options->mtu = IBV_MTU_4096;
- break;
- default:
- if (temp)
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, 0,
- RDMA_MSG_UNRECG_MTU_VALUE,
- "%s: unrecognized "
- "MTU value '%s', defaulting to '2048'",
- this->name, data_to_str(temp));
- else
- gf_msg_trace(GF_RDMA_LOG_NAME, 0,
- "%s: defaulting "
- "MTU to '2048'",
- this->name);
- options->mtu = IBV_MTU_2048;
- break;
- }
-
- temp = dict_get(this->options, "transport.rdma.device-name");
- if (temp)
- options->device_name = gf_strdup(temp->data);
-
- return;
-}
-
-gf_rdma_ctx_t *
-__gf_rdma_ctx_create(void)
-{
- gf_rdma_ctx_t *rdma_ctx = NULL;
- int ret = -1;
-
- rdma_ctx = GF_CALLOC(1, sizeof(*rdma_ctx), gf_common_mt_char);
- if (rdma_ctx == NULL) {
- goto out;
- }
- pthread_mutex_init(&rdma_ctx->lock, NULL);
- rdma_ctx->rdma_cm_event_channel = rdma_create_event_channel();
- if (rdma_ctx->rdma_cm_event_channel == NULL) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, errno,
- RDMA_MSG_CM_EVENT_FAILED,
- "rdma_cm event channel "
- "creation failed");
- goto out;
- }
-
- ret = gf_thread_create(&rdma_ctx->rdma_cm_thread, NULL,
- gf_rdma_cm_event_handler,
- rdma_ctx->rdma_cm_event_channel, "rdmaehan");
- if (ret != 0) {
- gf_msg(GF_RDMA_LOG_NAME, GF_LOG_WARNING, ret, RDMA_MSG_CM_EVENT_FAILED,
- "creation of thread to "
- "handle rdma-cm events failed");
- goto out;
- }
-
-out:
- if (ret < 0 && rdma_ctx) {
- if (rdma_ctx->rdma_cm_event_channel != NULL) {
- rdma_destroy_event_channel(rdma_ctx->rdma_cm_event_channel);
- }
-
- GF_FREE(rdma_ctx);
- rdma_ctx = NULL;
- }
-
- return rdma_ctx;
-}
-
-static int32_t
-gf_rdma_init(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- int32_t ret = 0;
- glusterfs_ctx_t *ctx = NULL;
- gf_rdma_options_t *options = NULL;
-
- ctx = this->ctx;
-
- priv = this->private;
-
- ibv_fork_init();
- gf_rdma_options_init(this);
-
- options = &priv->options;
- priv->peer.send_count = options->send_count;
- priv->peer.recv_count = options->recv_count;
- priv->peer.send_size = options->send_size;
- priv->peer.recv_size = options->recv_size;
- priv->backlog = options->backlog;
-
- priv->peer.trans = this;
- INIT_LIST_HEAD(&priv->peer.ioq);
-
- pthread_mutex_init(&priv->write_mutex, NULL);
- pthread_mutex_init(&priv->recv_mutex, NULL);
- pthread_cond_init(&priv->recv_cond, NULL);
-
- LOCK(&ctx->lock);
- {
- if (ctx->ib == NULL) {
- ctx->ib = __gf_rdma_ctx_create();
- if (ctx->ib == NULL) {
- ret = -1;
- }
- }
- }
- UNLOCK(&ctx->lock);
-
- return ret;
-}
-
-static int32_t
-gf_rdma_disconnect(rpc_transport_t *this, gf_boolean_t wait)
-{
- gf_rdma_private_t *priv = NULL;
- int32_t ret = 0;
-
- priv = this->private;
- gf_msg_callingfn(this->name, GF_LOG_DEBUG, 0, RDMA_MSG_PEER_DISCONNECTED,
- "disconnect called (peer:%s)", this->peerinfo.identifier);
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- ret = __gf_rdma_disconnect(this);
- }
- pthread_mutex_unlock(&priv->write_mutex);
-
- return ret;
-}
-
-static int32_t
-gf_rdma_connect(struct rpc_transport *this, int port)
-{
- gf_rdma_private_t *priv = NULL;
- int32_t ret = 0;
- union gf_sock_union sock_union = {
- {
- 0,
- },
- };
- socklen_t sockaddr_len = 0;
- gf_rdma_peer_t *peer = NULL;
- gf_rdma_ctx_t *rdma_ctx = NULL;
- gf_boolean_t connected = _gf_false;
-
- priv = this->private;
-
- peer = &priv->peer;
-
- rpc_transport_ref(this);
-
- ret = gf_rdma_client_get_remote_sockaddr(this, &sock_union.sa,
- &sockaddr_len, port);
- if (ret != 0) {
- gf_msg_debug(this->name, 0,
- "cannot get remote address to "
- "connect");
- goto out;
- }
-
- rdma_ctx = this->ctx->ib;
-
- pthread_mutex_lock(&priv->write_mutex);
- {
- if (peer->cm_id != NULL) {
- ret = -1;
- errno = EINPROGRESS;
- connected = _gf_true;
- goto unlock;
- }
-
- priv->entity = GF_RDMA_CLIENT;
-
- ret = rdma_create_id(rdma_ctx->rdma_cm_event_channel, &peer->cm_id,
- this, RDMA_PS_TCP);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, errno, RDMA_MSG_CM_EVENT_FAILED,
- "creation of "
- "rdma_cm_id failed");
- ret = -errno;
- goto unlock;
- }
-
- memcpy(&this->peerinfo.sockaddr, &sock_union.storage, sockaddr_len);
- this->peerinfo.sockaddr_len = sockaddr_len;
-
- if (port > 0)
- sock_union.sin.sin_port = htons(port);
-
- ((struct sockaddr *)&this->myinfo.sockaddr)->sa_family =
- ((struct sockaddr *)&this->peerinfo.sockaddr)->sa_family;
-
- ret = gf_rdma_client_bind(this,
- (struct sockaddr *)&this->myinfo.sockaddr,
- &this->myinfo.sockaddr_len, peer->cm_id);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno,
- RDMA_MSG_CLIENT_BIND_FAILED, "client bind failed");
- goto unlock;
- }
-
- ret = rdma_resolve_addr(peer->cm_id, NULL, &sock_union.sa, 2000);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno,
- RDMA_MSG_RDMA_RESOLVE_ADDR_FAILED,
- "rdma_resolve_addr failed");
- goto unlock;
- }
-
- priv->connected = 0;
- }
-unlock:
- pthread_mutex_unlock(&priv->write_mutex);
-
-out:
- if (ret != 0) {
- if (!connected) {
- gf_rdma_teardown(this);
- }
-
- rpc_transport_unref(this);
- }
-
- return ret;
-}
-
-static int32_t
-gf_rdma_listen(rpc_transport_t *this)
-{
- union gf_sock_union sock_union = {
- {
- 0,
- },
- };
- socklen_t sockaddr_len = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_peer_t *peer = NULL;
- int ret = 0;
- gf_rdma_ctx_t *rdma_ctx = NULL;
- cmd_args_t *cmd_args = NULL;
- char service[NI_MAXSERV], host[NI_MAXHOST];
- int optval = 2;
-
- priv = this->private;
- peer = &priv->peer;
-
- priv->entity = GF_RDMA_SERVER_LISTENER;
-
- rdma_ctx = this->ctx->ib;
-
- ret = gf_rdma_server_get_local_sockaddr(this, &sock_union.sa,
- &sockaddr_len);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_NW_ADDR_UNKNOWN,
- "cannot find network address of server to bind to");
- goto err;
- }
-
- ret = rdma_create_id(rdma_ctx->rdma_cm_event_channel, &peer->cm_id, this,
- RDMA_PS_TCP);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_CM_EVENT_FAILED,
- "creation of rdma_cm_id "
- "failed");
- goto err;
- }
-
- memcpy(&this->myinfo.sockaddr, &sock_union.storage, sockaddr_len);
- this->myinfo.sockaddr_len = sockaddr_len;
-
- ret = getnameinfo((struct sockaddr *)&this->myinfo.sockaddr,
- this->myinfo.sockaddr_len, host, sizeof(host), service,
- sizeof(service), NI_NUMERICHOST);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, ret, TRANS_MSG_GET_NAME_INFO_FAILED,
- "getnameinfo failed");
- goto err;
- }
-
- if (snprintf(this->myinfo.identifier, UNIX_PATH_MAX, "%s:%s", host,
- service) >= UNIX_PATH_MAX) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_BUFFER_ERROR,
- "host and service name too large");
- goto err;
- }
-
- ret = rdma_set_option(peer->cm_id, RDMA_OPTION_ID, RDMA_OPTION_ID_REUSEADDR,
- (void *)&optval, sizeof(optval));
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_OPTION_SET_FAILED,
- "rdma option set failed");
- goto err;
- }
-
- ret = rdma_bind_addr(peer->cm_id, &sock_union.sa);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno,
- RDMA_MSG_RDMA_BIND_ADDR_FAILED, "rdma_bind_addr failed");
- goto err;
- }
- ret = rdma_listen(peer->cm_id, priv->backlog);
- if (ret != 0) {
- gf_msg(this->name, GF_LOG_WARNING, errno, RDMA_MSG_LISTEN_FAILED,
- "rdma_listen failed");
- goto err;
- }
-
- cmd_args = &(this->ctx->cmd_args);
- if (!cmd_args->brick_port2) {
- cmd_args->brick_port2 = rdma_get_src_port(peer->cm_id);
- gf_log(this->name, GF_LOG_INFO,
- "process started listening on port (%d)", cmd_args->brick_port2);
- }
-
- rpc_transport_ref(this);
-
- ret = 0;
-err:
- if (ret < 0) {
- if (peer->cm_id != NULL) {
- rdma_destroy_id(peer->cm_id);
- peer->cm_id = NULL;
- }
- }
-
- return ret;
-}
-
-struct rpc_transport_ops tops = {
- .submit_request = gf_rdma_submit_request,
- .submit_reply = gf_rdma_submit_reply,
- .connect = gf_rdma_connect,
- .disconnect = gf_rdma_disconnect,
- .listen = gf_rdma_listen,
-};
-
-int32_t
-init(rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_ctx_t *rdma_ctx = NULL;
- struct iobuf_pool *iobuf_pool = NULL;
-
- priv = GF_CALLOC(1, sizeof(*priv), gf_common_mt_rdma_private_t);
- if (!priv)
- return -1;
-
- this->private = priv;
-
- if (gf_rdma_init(this)) {
- gf_msg(this->name, GF_LOG_WARNING, 0, RDMA_MSG_INIT_IB_DEVICE_FAILED,
- "Failed to initialize IB Device");
- this->private = NULL;
- GF_FREE(priv);
- return -1;
- }
- rdma_ctx = this->ctx->ib;
- if (!rdma_ctx)
- return -1;
-
- pthread_mutex_lock(&rdma_ctx->lock);
- {
- if (this->dl_handle && (++(rdma_ctx->dlcount)) == 1) {
- iobuf_pool = this->ctx->iobuf_pool;
- iobuf_pool->rdma_registration = gf_rdma_register_arena;
- iobuf_pool->rdma_deregistration = gf_rdma_deregister_arena;
- gf_rdma_register_iobuf_pool_with_device(rdma_ctx->device,
- iobuf_pool);
- }
- }
- pthread_mutex_unlock(&rdma_ctx->lock);
-
- return 0;
-}
-
-int
-reconfigure(rpc_transport_t *this, dict_t *options)
-{
- gf_rdma_private_t *priv = NULL;
- uint32_t backlog = 0;
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO("rdma", this, out);
- GF_VALIDATE_OR_GOTO("rdma", this->private, out);
-
- priv = this->private;
-
- if (dict_get_uint32(options, "transport.listen-backlog", &backlog) == 0) {
- priv->backlog = backlog;
- gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.listen-backlog=%d",
- priv->backlog);
- }
- ret = 0;
-out:
- return ret;
-}
-void
-fini(struct rpc_transport *this)
-{
- /* TODO: verify this function does graceful finish */
- gf_rdma_private_t *priv = NULL;
- struct iobuf_pool *iobuf_pool = NULL;
- gf_rdma_ctx_t *rdma_ctx = NULL;
-
- priv = this->private;
-
- this->private = NULL;
-
- if (priv) {
- pthread_mutex_destroy(&priv->recv_mutex);
- pthread_mutex_destroy(&priv->write_mutex);
-
- gf_msg_trace(this->name, 0, "called fini on transport: %p", this);
- GF_FREE(priv);
- }
-
- rdma_ctx = this->ctx->ib;
- if (!rdma_ctx)
- return;
-
- pthread_mutex_lock(&rdma_ctx->lock);
- {
- if (this->dl_handle && (--(rdma_ctx->dlcount)) == 0) {
- iobuf_pool = this->ctx->iobuf_pool;
- gf_rdma_deregister_iobuf_pool(rdma_ctx->device);
- iobuf_pool->rdma_registration = NULL;
- iobuf_pool->rdma_deregistration = NULL;
- }
- }
- pthread_mutex_unlock(&rdma_ctx->lock);
-
- return;
-}
-
-/* TODO: expand each option */
-struct volume_options options[] = {
- {.key = {"transport.rdma.port", "rdma-port"},
- .type = GF_OPTION_TYPE_INT,
- .min = 1,
- .max = 4,
- .description = "check the option by 'ibv_devinfo'"},
- {
- .key = {"transport.rdma.mtu", "rdma-mtu"},
- .type = GF_OPTION_TYPE_INT,
- },
- {.key = {"transport.rdma.device-name", "rdma-device-name"},
- .type = GF_OPTION_TYPE_ANY,
- .description = "check by 'ibv_devinfo'"},
- {
- .key = {"transport.rdma.work-request-send-count",
- "rdma-work-request-send-count"},
- .type = GF_OPTION_TYPE_INT,
- },
- {
- .key = {"transport.rdma.work-request-recv-count",
- "rdma-work-request-recv-count"},
- .type = GF_OPTION_TYPE_INT,
- },
- {.key = {"remote-port", "transport.remote-port",
- "transport.rdma.remote-port"},
- .type = GF_OPTION_TYPE_INT},
- {.key = {"transport.rdma.attr-timeout", "rdma-attr-timeout"},
- .type = GF_OPTION_TYPE_INT},
- {.key = {"transport.rdma.attr-retry-cnt", "rdma-attr-retry-cnt"},
- .type = GF_OPTION_TYPE_INT},
- {.key = {"transport.rdma.attr-rnr-retry", "rdma-attr-rnr-retry"},
- .type = GF_OPTION_TYPE_INT},
- {.key = {"transport.rdma.listen-port", "listen-port"},
- .type = GF_OPTION_TYPE_INT},
- {.key = {"transport.rdma.connect-path", "connect-path"},
- .type = GF_OPTION_TYPE_ANY},
- {.key = {"transport.rdma.bind-path", "bind-path"},
- .type = GF_OPTION_TYPE_ANY},
- {.key = {"transport.rdma.listen-path", "listen-path"},
- .type = GF_OPTION_TYPE_ANY},
- {.key = {"transport.address-family", "address-family"},
- .value = {"inet", "inet6", "inet/inet6", "inet6/inet", "unix", "inet-sdp"},
- .type = GF_OPTION_TYPE_STR},
- {.key = {"transport.socket.lowlat"}, .type = GF_OPTION_TYPE_BOOL},
- {.key = {NULL}}};
diff --git a/rpc/rpc-transport/rdma/src/rdma.h b/rpc/rpc-transport/rdma/src/rdma.h
deleted file mode 100644
index 403f5678ad8..00000000000
--- a/rpc/rpc-transport/rdma/src/rdma.h
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef _XPORT_RDMA_H
-#define _XPORT_RDMA_H
-
-#ifndef MAX_IOVEC
-#define MAX_IOVEC 16
-#endif /* MAX_IOVEC */
-
-#include "rpc-clnt.h"
-#include "rpc-transport.h"
-#include <glusterfs/xlator.h>
-#include <glusterfs/gf-event.h>
-#include <stdio.h>
-#include <glusterfs/list.h>
-#include <arpa/inet.h>
-#include <infiniband/verbs.h>
-#include <rdma/rdma_cma.h>
-
-/* FIXME: give appropriate values to these macros */
-#define GF_DEFAULT_RDMA_LISTEN_PORT (GF_DEFAULT_BASE_PORT + 1)
-
-/* If you are changing GF_RDMA_MAX_SEGMENTS, please make sure to update
- * GLUSTERFS_GF_RDMA_MAX_HEADER_SIZE defined in glusterfs.h .
- */
-#define GF_RDMA_MAX_SEGMENTS 8
-
-#define GF_RDMA_VERSION 1
-#define GF_RDMA_POOL_SIZE 512
-
-/* Additional attributes */
-#define GF_RDMA_TIMEOUT 14
-#define GF_RDMA_RETRY_CNT 7
-#define GF_RDMA_RNR_RETRY 7
-
-typedef enum gf_rdma_errcode { ERR_VERS = 1, ERR_CHUNK = 2 } gf_rdma_errcode_t;
-
-struct gf_rdma_err_vers {
- uint32_t gf_rdma_vers_low; /* Version range supported by peer */
- uint32_t gf_rdma_vers_high;
-} __attribute__((packed));
-typedef struct gf_rdma_err_vers gf_rdma_err_vers_t;
-
-typedef enum gf_rdma_proc {
- GF_RDMA_MSG = 0, /* An RPC call or reply msg */
- GF_RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
- GF_RDMA_MSGP = 2, /* An RPC call or reply msg with padding */
- GF_RDMA_DONE = 3, /* Client signals reply completion */
- GF_RDMA_ERROR = 4 /* An RPC RDMA encoding error */
-} gf_rdma_proc_t;
-
-typedef enum gf_rdma_chunktype {
- gf_rdma_noch = 0, /* no chunk */
- gf_rdma_readch, /* some argument through rdma read */
- gf_rdma_areadch, /* entire request through rdma read */
- gf_rdma_writech, /* some result through rdma write */
- gf_rdma_replych /* entire reply through rdma write */
-} gf_rdma_chunktype_t;
-
-/* If you are modifying __gf_rdma_header, please make sure to change
- * GLUSTERFS_GF_RDMA_MAX_HEADER_SIZE defined in glusterfs.h to reflect your
- * changes
- */
-struct __gf_rdma_header {
- uint32_t rm_xid; /* Mirrors the RPC header xid */
- uint32_t rm_vers; /* Version of this protocol */
- uint32_t rm_credit; /* Buffers requested/granted */
- uint32_t rm_type; /* Type of message (enum gf_rdma_proc) */
- union {
- struct { /* no chunks */
- uint32_t rm_empty[3]; /* 3 empty chunk lists */
- } __attribute__((packed)) rm_nochunks;
-
- struct { /* no chunks and padded */
- uint32_t rm_align; /* Padding alignment */
- uint32_t rm_thresh; /* Padding threshold */
- uint32_t rm_pempty[3]; /* 3 empty chunk lists */
- } __attribute__((packed)) rm_padded;
-
- struct {
- uint32_t rm_type;
- gf_rdma_err_vers_t rm_version;
- } __attribute__((packed)) rm_error;
-
- uint32_t rm_chunks[0]; /* read, write and reply chunks */
- } __attribute__((packed)) rm_body;
-} __attribute__((packed));
-typedef struct __gf_rdma_header gf_rdma_header_t;
-
-/* If you are modifying __gf_rdma_segment or __gf_rdma_read_chunk, please make
- * sure to change GLUSTERFS_GF_RDMA_MAX_HEADER_SIZE defined in glusterfs.h to
- * reflect your changes.
- */
-struct __gf_rdma_segment {
- uint32_t rs_handle; /* Registered memory handle */
- uint32_t rs_length; /* Length of the chunk in bytes */
- uint64_t rs_offset; /* Chunk virtual address or offset */
-} __attribute__((packed));
-typedef struct __gf_rdma_segment gf_rdma_segment_t;
-
-/* read chunk(s), encoded as a linked list. */
-struct __gf_rdma_read_chunk {
- uint32_t rc_discrim; /* 1 indicates presence */
- uint32_t rc_position; /* Position in XDR stream */
- gf_rdma_segment_t rc_target;
-} __attribute__((packed));
-typedef struct __gf_rdma_read_chunk gf_rdma_read_chunk_t;
-
-/* write chunk, and reply chunk. */
-struct __gf_rdma_write_chunk {
- gf_rdma_segment_t wc_target;
-} __attribute__((packed));
-typedef struct __gf_rdma_write_chunk gf_rdma_write_chunk_t;
-
-/* write chunk(s), encoded as a counted array. */
-struct __gf_rdma_write_array {
- uint32_t wc_discrim; /* 1 indicates presence */
- uint32_t wc_nchunks; /* Array count */
- struct __gf_rdma_write_chunk wc_array[0];
-} __attribute__((packed));
-typedef struct __gf_rdma_write_array gf_rdma_write_array_t;
-
-/* options per transport end point */
-struct __gf_rdma_options {
- int32_t port;
- char *device_name;
- enum ibv_mtu mtu;
- int32_t send_count;
- int32_t recv_count;
- uint64_t recv_size;
- uint64_t send_size;
- uint8_t attr_timeout;
- uint8_t attr_retry_cnt;
- uint8_t attr_rnr_retry;
- uint32_t backlog;
-};
-typedef struct __gf_rdma_options gf_rdma_options_t;
-
-struct __gf_rdma_reply_info {
- uint32_t rm_xid; /* xid in network endian */
- gf_rdma_chunktype_t type; /*
- * can be either gf_rdma_replych
- * or gf_rdma_writech.
- */
- gf_rdma_write_array_t *wc_array;
- struct mem_pool *pool;
-};
-typedef struct __gf_rdma_reply_info gf_rdma_reply_info_t;
-
-struct __gf_rdma_ioq {
- union {
- struct list_head list;
- struct {
- struct __gf_rdma_ioq *next;
- struct __gf_rdma_ioq *prev;
- };
- };
-
- char is_request;
- struct iovec rpchdr[MAX_IOVEC];
- int rpchdr_count;
- struct iovec proghdr[MAX_IOVEC];
- int proghdr_count;
- struct iovec prog_payload[MAX_IOVEC];
- int prog_payload_count;
-
- struct iobref *iobref;
-
- union {
- struct __gf_rdma_ioq_request {
- /* used to build reply_chunk for GF_RDMA_NOMSG type msgs */
- struct iovec rsphdr_vec[MAX_IOVEC];
- int rsphdr_count;
-
- /*
- * used to build write_array during operations like
- * read.
- */
- struct iovec rsp_payload[MAX_IOVEC];
- int rsp_payload_count;
-
- struct rpc_req *rpc_req; /* FIXME: hack! hack! should be
- * cleaned up later
- */
- struct iobref *rsp_iobref;
- } request;
-
- gf_rdma_reply_info_t *reply_info;
- } msg;
-
- struct mem_pool *pool;
-};
-typedef struct __gf_rdma_ioq gf_rdma_ioq_t;
-
-typedef enum __gf_rdma_send_post_type {
- GF_RDMA_SEND_POST_NO_CHUNKLIST, /* post which is sent using rdma-send
- * and the msg carries no
- * chunklists.
- */
- GF_RDMA_SEND_POST_READ_CHUNKLIST, /* post which is sent using rdma-send
- * and the msg carries only read
- * chunklist.
- */
- GF_RDMA_SEND_POST_WRITE_CHUNKLIST, /* post which is sent using
- * rdma-send and the msg carries
- * only write chunklist.
- */
- GF_RDMA_SEND_POST_READ_WRITE_CHUNKLIST, /* post which is sent using
- * rdma-send and the msg
- * carries both read and
- * write chunklists.
- */
- GF_RDMA_SEND_POST_GF_RDMA_READ, /* RDMA read */
- GF_RDMA_SEND_POST_GF_RDMA_WRITE, /* RDMA write */
-} gf_rdma_send_post_type_t;
-
-/* represents one communication peer, two per transport_t */
-struct __gf_rdma_peer {
- rpc_transport_t *trans;
- struct rdma_cm_id *cm_id;
- struct ibv_qp *qp;
- pthread_t rdma_event_thread;
- char quota_set;
-
- int32_t recv_count;
- int32_t send_count;
- int32_t recv_size;
- int32_t send_size;
-
- int32_t quota;
- union {
- struct list_head ioq;
- struct {
- gf_rdma_ioq_t *ioq_next;
- gf_rdma_ioq_t *ioq_prev;
- };
- };
-
- /* QP attributes, needed to connect with remote QP */
- int32_t local_lid;
- int32_t local_psn;
- int32_t local_qpn;
- int32_t remote_lid;
- int32_t remote_psn;
- int32_t remote_qpn;
-};
-typedef struct __gf_rdma_peer gf_rdma_peer_t;
-
-struct __gf_rdma_post_context {
- struct ibv_mr *mr[GF_RDMA_MAX_SEGMENTS];
- int mr_count;
- struct iovec vector[MAX_IOVEC];
- int count;
- struct iobref *iobref;
- struct iobuf *hdr_iobuf;
- char is_request;
- int gf_rdma_reads;
- gf_rdma_reply_info_t *reply_info;
-};
-typedef struct __gf_rdma_post_context gf_rdma_post_context_t;
-
-typedef enum { GF_RDMA_SEND_POST, GF_RDMA_RECV_POST } gf_rdma_post_type_t;
-
-struct __gf_rdma_post {
- struct __gf_rdma_post *next, *prev;
- struct ibv_mr *mr;
- char *buf;
- int32_t buf_size;
- char aux;
- int32_t reused;
- struct __gf_rdma_device *device;
- gf_rdma_post_type_t type;
- gf_rdma_post_context_t ctx;
- int refcount;
- pthread_mutex_t lock;
-};
-typedef struct __gf_rdma_post gf_rdma_post_t;
-
-struct __gf_rdma_queue {
- gf_rdma_post_t active_posts, passive_posts;
- int32_t active_count, passive_count;
- pthread_mutex_t lock;
-};
-typedef struct __gf_rdma_queue gf_rdma_queue_t;
-
-struct __gf_rdma_qpreg {
- pthread_mutex_t lock;
- int32_t count;
- struct _qpent {
- struct _qpent *next, *prev;
- int32_t qp_num;
- gf_rdma_peer_t *peer;
- } ents[42];
-};
-typedef struct __gf_rdma_qpreg gf_rdma_qpreg_t;
-
-/* context per device, stored in global glusterfs_ctx_t->ib */
-struct __gf_rdma_device {
- struct __gf_rdma_device *next;
- const char *device_name;
- struct ibv_context *context;
- int32_t port;
- struct ibv_pd *pd;
- struct ibv_srq *srq;
- gf_rdma_qpreg_t qpreg;
- struct ibv_comp_channel *send_chan, *recv_chan;
- struct ibv_cq *send_cq, *recv_cq;
- gf_rdma_queue_t sendq, recvq;
- pthread_t send_thread, recv_thread, async_event_thread;
- struct mem_pool *request_ctx_pool;
- struct mem_pool *ioq_pool;
- struct mem_pool *reply_info_pool;
- struct list_head all_mr;
- pthread_mutex_t all_mr_lock;
-};
-typedef struct __gf_rdma_device gf_rdma_device_t;
-
-struct __gf_rdma_arena_mr {
- struct list_head list;
- struct iobuf_arena *iobuf_arena;
- struct ibv_mr *mr;
-};
-
-typedef struct __gf_rdma_arena_mr gf_rdma_arena_mr;
-struct __gf_rdma_ctx {
- gf_rdma_device_t *device;
- struct rdma_event_channel *rdma_cm_event_channel;
- pthread_t rdma_cm_thread;
- pthread_mutex_t lock;
- int32_t dlcount;
-};
-typedef struct __gf_rdma_ctx gf_rdma_ctx_t;
-
-struct __gf_rdma_request_context {
- struct ibv_mr *mr[GF_RDMA_MAX_SEGMENTS];
- int mr_count;
- struct mem_pool *pool;
- gf_rdma_peer_t *peer;
- struct iobref *iobref;
- struct iobref *rsp_iobref;
-};
-typedef struct __gf_rdma_request_context gf_rdma_request_context_t;
-
-typedef enum {
- GF_RDMA_SERVER_LISTENER,
- GF_RDMA_SERVER,
- GF_RDMA_CLIENT,
-} gf_rdma_transport_entity_t;
-
-struct __gf_rdma_private {
- int32_t idx;
- unsigned char connected;
- in_addr_t addr;
- unsigned short port;
-
- /* IB Verbs Driver specific variables, pointers */
- gf_rdma_peer_t peer;
- struct __gf_rdma_device *device;
- gf_rdma_options_t options;
-
- /* Used by trans->op->receive */
- char *data_ptr;
- int32_t data_offset;
- int32_t data_len;
-
- /* Mutex */
- pthread_mutex_t write_mutex;
- rpc_transport_t *listener;
- pthread_mutex_t recv_mutex;
- pthread_cond_t recv_cond;
- gf_rdma_transport_entity_t entity;
- uint32_t backlog;
-};
-typedef struct __gf_rdma_private gf_rdma_private_t;
-
-#endif /* _XPORT_GF_RDMA_H */
diff --git a/rpc/rpc-transport/rdma/src/rpc-trans-rdma-messages.h b/rpc/rpc-transport/rdma/src/rpc-trans-rdma-messages.h
deleted file mode 100644
index 662a8980648..00000000000
--- a/rpc/rpc-transport/rdma/src/rpc-trans-rdma-messages.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef _RPC_TRANS_RDMA_MESSAGES_H_
-#define _RPC_TRANS_RDMA_MESSAGES_H_
-
-#include <glusterfs/glfs-message-id.h>
-
-/* To add new message IDs, append new identifiers at the end of the list.
- *
- * Never remove a message ID. If it's not used anymore, you can rename it or
- * leave it as it is, but not delete it. This is to prevent reutilization of
- * IDs by other messages.
- *
- * The component name must match one of the entries defined in
- * glfs-message-id.h.
- */
-
-GLFS_MSGID(
- RPC_TRANS_RDMA, RDMA_MSG_PORT_BIND_FAILED, RDMA_MSG_POST_CREATION_FAILED,
- RDMA_MSG_DEREGISTER_ARENA_FAILED, RDMA_MSG_MR_ALOC_FAILED,
- RDMA_MSG_PREREG_BUFFER_FAILED, RDMA_MSG_CQ_CREATION_FAILED,
- RDMA_MSG_REQ_NOTIFY_CQ_REVQ_FAILED, RDMA_MSG_QUERY_DEVICE_FAILED,
- RDMA_MSG_REQ_NOTIFY_CQ_SENDQ_FAILED, RDMA_MSG_SEND_COMP_CHAN_FAILED,
- RDMA_MSG_RECV_COMP_CHAN_FAILED, RDMA_MSG_ALOC_PROT_DOM_FAILED,
- RDMA_MSG_CRE_SRQ_FAILED, RDMA_MSG_ALOC_POST_FAILED,
- RDMA_MSG_SEND_COMP_THREAD_FAILED, RDMA_MSG_RECV_COMP_THREAD_FAILED,
- RDMA_MSG_ASYNC_EVENT_THEAD_FAILED, RDMA_MSG_GET_DEVICE_NAME_FAILED,
- RDMA_MSG_GET_IB_DEVICE_FAILED, RDMA_MSG_CREAT_INC_TRANS_FAILED,
- RDMA_MSG_CREAT_QP_FAILED, RDMA_MSG_ACCEPT_FAILED, RDMA_MSG_CONNECT_FAILED,
- RDMA_MSG_ROUTE_RESOLVE_FAILED, RDMA_MSG_GET_DEVICE_FAILED,
- RDMA_MSG_PEER_DISCONNECTED, RDMA_MSG_ENCODE_ERROR,
- RDMA_MSG_POST_SEND_FAILED, RDMA_MSG_READ_CHUNK_VECTOR_FAILED,
- RDMA_MSG_WRITE_CHUNK_VECTOR_FAILED, RDMA_MSG_WRITE_REPLY_CHUNCK_CONFLICT,
- RDMA_MSG_CHUNK_COUNT_GREAT_MAX_SEGMENTS, RDMA_MSG_CREATE_READ_CHUNK_FAILED,
- RDMA_MSG_CREATE_WRITE_REPLAY_FAILED,
- RDMA_MSG_SEND_SIZE_GREAT_INLINE_THRESHOLD,
- RDMA_MSG_REG_ACCESS_LOCAL_WRITE_FAILED, RDMA_MSG_WRITE_PEER_FAILED,
- RDMA_MSG_SEND_REPLY_FAILED, RDMA_MSG_INVALID_CHUNK_TYPE,
- RDMA_MSG_PROC_IOQ_ENTRY_FAILED, RDMA_MSG_NEW_IOQ_ENTRY_FAILED,
- RDMA_MSG_RPC_REPLY_CREATE_FAILED, RDMA_MSG_GET_READ_CHUNK_FAILED,
- RDMA_MSG_GET_WRITE_CHUNK_FAILED, RDMA_MSG_REPLY_INFO_ALLOC_FAILED,
- RDMA_MSG_RDMA_ERROR_RECEIVED, RDMA_MSG_GET_REQ_INFO_RPC_FAILED,
- RDMA_MSG_POLL_IN_NOTIFY_FAILED, RDMA_MSG_HEADER_DECODE_FAILED,
- RDMA_MSG_EVENT_SRQ_LIMIT_REACHED, RDMA_MSG_UNRECG_MQ_VALUE,
- RDMA_MSG_BUFFER_ERROR, RDMA_MSG_OPTION_SET_FAILED, RDMA_MSG_LISTEN_FAILED,
- RDMA_MSG_INIT_IB_DEVICE_FAILED, RDMA_MSG_WRITE_CLIENT_ERROR,
- RDMA_MSG_CHUNKLIST_ERROR, RDMA_MSG_INVALID_ENTRY,
- RDMA_MSG_READ_CLIENT_ERROR, RDMA_MSG_RPC_ST_ERROR,
- RDMA_MSG_PEER_READ_FAILED, RDMA_MSG_POST_MISSING, RDMA_MSG_PEER_REQ_FAILED,
- RDMA_MSG_PEER_REP_FAILED, RDMA_MSG_EVENT_ERROR, RDMA_MSG_IBV_GET_CQ_FAILED,
- RDMA_MSG_IBV_REQ_NOTIFY_CQ_FAILED, RDMA_MSG_RECV_ERROR,
- RDMA_MSG_IBV_POLL_CQ_ERROR, RDMA_MSG_RDMA_HANDLE_FAILED,
- RDMA_MSG_CM_EVENT_FAILED, RDMA_MSG_CLIENT_BIND_FAILED,
- RDMA_MSG_RDMA_RESOLVE_ADDR_FAILED, RDMA_MSG_NW_ADDR_UNKNOWN,
- RDMA_MSG_RDMA_BIND_ADDR_FAILED, RDMA_MSG_SEND_CLIENT_ERROR,
- RDMA_MSG_UNRECG_MTU_VALUE);
-
-#endif /* !_RPC_TRANS_RDMA_MESSAGES_H_ */
diff --git a/rpc/rpc-transport/socket/src/name.c b/rpc/rpc-transport/socket/src/name.c
index 6336d7d25c4..9286bbb236d 100644
--- a/rpc/rpc-transport/socket/src/name.c
+++ b/rpc/rpc-transport/socket/src/name.c
@@ -105,10 +105,10 @@ af_unix_client_bind(rpc_transport_t *this, struct sockaddr *sockaddr,
struct sockaddr_un *addr = NULL;
int32_t ret = 0;
- path_data = dict_get(this->options, "transport.socket.bind-path");
+ path_data = dict_get_sizen(this->options, "transport.socket.bind-path");
if (path_data) {
char *path = data_to_str(path_data);
- if (!path || strlen(path) > UNIX_PATH_MAX) {
+ if (!path || path_data->len > 108) { /* 108 = addr->sun_path length */
gf_log(this->name, GF_LOG_TRACE,
"bind-path not specified for unix socket, "
"letting connect to assign default value");
@@ -134,7 +134,7 @@ err:
return ret;
}
-int32_t
+static int32_t
client_fill_address_family(rpc_transport_t *this, sa_family_t *sa_family)
{
data_t *address_family_data = NULL;
@@ -145,12 +145,13 @@ client_fill_address_family(rpc_transport_t *this, sa_family_t *sa_family)
goto out;
}
- address_family_data = dict_get(this->options, "transport.address-family");
+ address_family_data = dict_get_sizen(this->options,
+ "transport.address-family");
if (!address_family_data) {
data_t *remote_host_data = NULL, *connect_path_data = NULL;
- remote_host_data = dict_get(this->options, "remote-host");
- connect_path_data = dict_get(this->options,
- "transport.socket.connect-path");
+ remote_host_data = dict_get_sizen(this->options, "remote-host");
+ connect_path_data = dict_get_sizen(this->options,
+ "transport.socket.connect-path");
if (!(remote_host_data || connect_path_data) ||
(remote_host_data && connect_path_data)) {
@@ -179,7 +180,7 @@ client_fill_address_family(rpc_transport_t *this, sa_family_t *sa_family)
}
} else {
- char *address_family = data_to_str(address_family_data);
+ const char *address_family = data_to_str(address_family_data);
if (!strcasecmp(address_family, "unix")) {
*sa_family = AF_UNIX;
} else if (!strcasecmp(address_family, "inet")) {
@@ -211,12 +212,12 @@ af_inet_client_get_remote_sockaddr(rpc_transport_t *this,
data_t *remote_host_data = NULL;
data_t *remote_port_data = NULL;
char *remote_host = NULL;
- uint16_t remote_port = 0;
+ uint16_t remote_port = GF_DEFAULT_SOCKET_LISTEN_PORT;
struct addrinfo *addr_info = NULL;
int32_t ret = 0;
struct in6_addr serveraddr;
- remote_host_data = dict_get(options, "remote-host");
+ remote_host_data = dict_get_sizen(options, "remote-host");
if (remote_host_data == NULL) {
gf_log(this->name, GF_LOG_ERROR,
"option remote-host missing in volume %s", this->name);
@@ -232,25 +233,23 @@ af_inet_client_get_remote_sockaddr(rpc_transport_t *this,
goto err;
}
- remote_port_data = dict_get(options, "remote-port");
+ remote_port_data = dict_get_sizen(options, "remote-port");
if (remote_port_data == NULL) {
gf_log(this->name, GF_LOG_TRACE,
"option remote-port missing in volume %s. Defaulting to %d",
this->name, GF_DEFAULT_SOCKET_LISTEN_PORT);
-
- remote_port = GF_DEFAULT_SOCKET_LISTEN_PORT;
} else {
remote_port = data_to_uint16(remote_port_data);
+ if (remote_port == (uint16_t)-1) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "option remote-port has invalid port in volume %s",
+ this->name);
+ ret = -1;
+ goto err;
+ }
}
- if (remote_port == (uint16_t)-1) {
- gf_log(this->name, GF_LOG_ERROR,
- "option remote-port has invalid port in volume %s", this->name);
- ret = -1;
- goto err;
- }
-
- /* Need to update transport-address family if address-family is not provide
+ /* Need to update transport-address family if address-family is not provided
to command-line arguments
*/
if (inet_pton(AF_INET6, remote_host, &serveraddr)) {
@@ -282,31 +281,29 @@ af_unix_client_get_remote_sockaddr(rpc_transport_t *this,
struct sockaddr_un *sockaddr_un = NULL;
char *connect_path = NULL;
data_t *connect_path_data = NULL;
- int32_t ret = 0;
+ int32_t ret = -1;
- connect_path_data = dict_get(this->options,
- "transport.socket.connect-path");
+ connect_path_data = dict_get_sizen(this->options,
+ "transport.socket.connect-path");
if (!connect_path_data) {
gf_log(this->name, GF_LOG_ERROR,
"option transport.unix.connect-path not specified for "
"address-family unix");
- ret = -1;
goto err;
}
- connect_path = data_to_str(connect_path_data);
- if (!connect_path) {
+ /* 108 = sockaddr_un->sun_path length */
+ if ((connect_path_data->len + 1) > 108) {
gf_log(this->name, GF_LOG_ERROR,
- "transport.unix.connect-path is null-string");
- ret = -1;
+ "connect-path value length %d > %d octets",
+ connect_path_data->len + 1, UNIX_PATH_MAX);
goto err;
}
- if ((strlen(connect_path) + 1) > UNIX_PATH_MAX) {
+ connect_path = data_to_str(connect_path_data);
+ if (!connect_path) {
gf_log(this->name, GF_LOG_ERROR,
- "connect-path value length %" GF_PRI_SIZET " > %d octets",
- strlen(connect_path), UNIX_PATH_MAX);
- ret = -1;
+ "transport.unix.connect-path is null-string");
goto err;
}
@@ -315,6 +312,7 @@ af_unix_client_get_remote_sockaddr(rpc_transport_t *this,
strcpy(sockaddr_un->sun_path, connect_path);
*sockaddr_len = sizeof(struct sockaddr_un);
+ ret = 0;
err:
return ret;
}
@@ -328,7 +326,8 @@ af_unix_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
int32_t ret = 0;
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
- listen_path_data = dict_get(this->options, "transport.socket.listen-path");
+ listen_path_data = dict_get_sizen(this->options,
+ "transport.socket.listen-path");
if (!listen_path_data) {
gf_log(this->name, GF_LOG_ERROR,
"missing option transport.socket.listen-path");
@@ -342,7 +341,7 @@ af_unix_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
#define UNIX_PATH_MAX 108
#endif
- if ((strlen(listen_path) + 1) > UNIX_PATH_MAX) {
+ if ((listen_path_data->len + 1) > UNIX_PATH_MAX) {
gf_log(this->name, GF_LOG_ERROR,
"option transport.unix.listen-path has value length "
"%" GF_PRI_SIZET " > %d",
@@ -375,12 +374,14 @@ af_inet_server_get_local_sockaddr(rpc_transport_t *this, struct sockaddr *addr,
options = this->options;
- listen_port_data = dict_get(options, "transport.socket.listen-port");
+ listen_port_data = dict_get_sizen(options, "transport.socket.listen-port");
if (listen_port_data) {
listen_port = data_to_uint16(listen_port_data);
+ } else {
+ listen_port = GF_DEFAULT_SOCKET_LISTEN_PORT;
}
- listen_host_data = dict_get(options, "transport.socket.bind-address");
+ listen_host_data = dict_get_sizen(options, "transport.socket.bind-address");
if (listen_host_data) {
listen_host = data_to_str(listen_host_data);
} else {
@@ -544,23 +545,24 @@ err:
return ret;
}
-int32_t
+static int32_t
server_fill_address_family(rpc_transport_t *this, sa_family_t *sa_family)
{
data_t *address_family_data = NULL;
int32_t ret = -1;
#ifdef IPV6_DEFAULT
- char *addr_family = "inet6";
+ const char *addr_family = "inet6";
sa_family_t default_family = AF_INET6;
#else
- char *addr_family = "inet";
+ const char *addr_family = "inet";
sa_family_t default_family = AF_INET;
#endif
GF_VALIDATE_OR_GOTO("socket", sa_family, out);
- address_family_data = dict_get(this->options, "transport.address-family");
+ address_family_data = dict_get_sizen(this->options,
+ "transport.address-family");
if (address_family_data) {
char *address_family = NULL;
address_family = data_to_str(address_family_data);
@@ -632,7 +634,7 @@ err:
return ret;
}
-int32_t
+static int32_t
fill_inet6_inet_identifiers(rpc_transport_t *this,
struct sockaddr_storage *addr, int32_t addr_len,
char *identifier)
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index b1e9f5c07b6..ed8b473be23 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -11,7 +11,6 @@
#include "socket.h"
#include "name.h"
#include <glusterfs/dict.h>
-#include "rpc-transport.h"
#include <glusterfs/syscall.h>
#include <glusterfs/byte-order.h>
#include <glusterfs/compat-errno.h>
@@ -30,7 +29,6 @@
#include <netinet/tcp.h>
#endif
-#include <fcntl.h>
#include <errno.h>
#include <rpc/xdr.h>
#include <sys/ioctl.h>
@@ -193,7 +191,7 @@ socket_dump_info(struct sockaddr *sa, int is_server, int is_ssl, int sock,
0,
};
char *addr = NULL;
- char *peer_type = NULL;
+ const char *peer_type = NULL;
int af = sa->sa_family;
int so_error = -1;
socklen_t slen = sizeof(so_error);
@@ -248,7 +246,6 @@ ssl_do(rpc_transport_t *this, void *buf, size_t len, SSL_trinary_func *func)
int r = (-1);
socket_private_t *priv = NULL;
- GF_VALIDATE_OR_GOTO(this->name, this->private, out);
priv = this->private;
if (buf) {
@@ -358,14 +355,12 @@ ssl_set_crl_verify_flags(SSL_CTX *ssl_ctx)
#endif
}
-int
+static int
ssl_setup_connection_prefix(rpc_transport_t *this, gf_boolean_t server)
{
int ret = -1;
socket_private_t *priv = NULL;
- GF_VALIDATE_OR_GOTO(this->name, this->private, done);
-
priv = this->private;
if (ssl_setup_connection_params(this) < 0) {
@@ -417,7 +412,6 @@ ssl_setup_connection_postfix(rpc_transport_t *this)
char peer_CN[256] = "";
socket_private_t *priv = NULL;
- GF_VALIDATE_OR_GOTO(this->name, this->private, done);
priv = this->private;
/* Make sure _SSL verification_ succeeded, yielding an identity. */
@@ -439,6 +433,7 @@ ssl_setup_connection_postfix(rpc_transport_t *this)
gf_log(this->name, GF_LOG_DEBUG,
"SSL verification succeeded (client: %s) (server: %s)",
this->peerinfo.identifier, this->myinfo.identifier);
+ X509_free(peer);
return gf_strdup(peer_CN);
/* Error paths. */
@@ -450,11 +445,10 @@ ssl_error:
SSL_free(priv->ssl_ssl);
priv->ssl_ssl = NULL;
-done:
return NULL;
}
-int
+static int
ssl_complete_connection(rpc_transport_t *this)
{
int ret = -1; /* 1 : implies go back to epoll_wait()
@@ -693,7 +687,6 @@ __socket_rwv(rpc_transport_t *this, struct iovec *vector, int count,
int opcount = 0;
int moved = 0;
- GF_VALIDATE_OR_GOTO("socket", this, out);
GF_VALIDATE_OR_GOTO("socket", this->private, out);
priv = this->private;
@@ -826,24 +819,16 @@ static int
__socket_readv(rpc_transport_t *this, struct iovec *vector, int count,
struct iovec **pending_vector, int *pending_count, size_t *bytes)
{
- int ret = -1;
-
- ret = __socket_rwv(this, vector, count, pending_vector, pending_count,
- bytes, 0);
-
- return ret;
+ return __socket_rwv(this, vector, count, pending_vector, pending_count,
+ bytes, 0);
}
static int
__socket_writev(rpc_transport_t *this, struct iovec *vector, int count,
struct iovec **pending_vector, int *pending_count)
{
- int ret = -1;
-
- ret = __socket_rwv(this, vector, count, pending_vector, pending_count, NULL,
- 1);
-
- return ret;
+ return __socket_rwv(this, vector, count, pending_vector, pending_count,
+ NULL, 1);
}
static int
@@ -870,20 +855,14 @@ __socket_shutdown(rpc_transport_t *this)
static int
__socket_teardown_connection(rpc_transport_t *this)
{
- int ret = -1;
socket_private_t *priv = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
if (priv->use_ssl)
ssl_teardown_connection(priv);
- ret = __socket_shutdown(this);
-out:
- return ret;
+ return __socket_shutdown(this);
}
static int
@@ -892,9 +871,6 @@ __socket_disconnect(rpc_transport_t *this)
int ret = -1;
socket_private_t *priv = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
gf_log(this->name, GF_LOG_TRACE, "disconnecting %p, sock=%d", this,
@@ -911,7 +887,6 @@ __socket_disconnect(rpc_transport_t *this)
}
}
-out:
return ret;
}
@@ -928,9 +903,6 @@ __socket_server_bind(rpc_transport_t *this)
uint16_t sin_port = 0;
int retries = 0;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
ctx = this->ctx;
cmd_args = &ctx->cmd_args;
@@ -1158,9 +1130,6 @@ __socket_reset(rpc_transport_t *this)
{
socket_private_t *priv = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* TODO: use mem-pool on incoming data */
@@ -1180,7 +1149,15 @@ __socket_reset(rpc_transport_t *this)
memset(&priv->incoming, 0, sizeof(priv->incoming));
gf_event_unregister_close(this->ctx->event_pool, priv->sock, priv->idx);
-
+ if (priv->use_ssl && priv->ssl_ssl) {
+ SSL_clear(priv->ssl_ssl);
+ SSL_free(priv->ssl_ssl);
+ priv->ssl_ssl = NULL;
+ }
+ if (priv->ssl_ctx) {
+ SSL_CTX_free(priv->ssl_ctx);
+ priv->ssl_ctx = NULL;
+ }
priv->sock = -1;
priv->idx = -1;
priv->connected = -1;
@@ -1200,8 +1177,6 @@ __socket_reset(rpc_transport_t *this)
GF_FREE(priv->ssl_ca_list);
priv->ssl_ca_list = NULL;
}
-out:
- return;
}
static void
@@ -1231,8 +1206,6 @@ __socket_ioq_new(rpc_transport_t *this, rpc_transport_msg_t *msg)
int count = 0;
uint32_t size = 0;
- GF_VALIDATE_OR_GOTO("socket", this, out);
-
/* TODO: use mem-pool */
entry = GF_CALLOC(1, sizeof(*entry), gf_common_mt_ioq);
if (!entry)
@@ -1287,7 +1260,6 @@ __socket_ioq_new(rpc_transport_t *this, rpc_transport_msg_t *msg)
INIT_LIST_HEAD(&entry->list);
-out:
return entry;
}
@@ -1308,27 +1280,18 @@ out:
}
static void
-__socket_ioq_flush(rpc_transport_t *this)
+__socket_ioq_flush(socket_private_t *priv)
{
- socket_private_t *priv = NULL;
struct ioq *entry = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
- priv = this->private;
-
while (!list_empty(&priv->ioq)) {
entry = priv->ioq_next;
__socket_ioq_entry_free(entry);
}
-
-out:
- return;
}
static int
-__socket_ioq_churn_entry(rpc_transport_t *this, struct ioq *entry, int direct)
+__socket_ioq_churn_entry(rpc_transport_t *this, struct ioq *entry)
{
int ret = -1;
@@ -1351,16 +1314,13 @@ __socket_ioq_churn(rpc_transport_t *this)
int ret = 0;
struct ioq *entry = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
while (!list_empty(&priv->ioq)) {
/* pick next entry */
entry = priv->ioq_next;
- ret = __socket_ioq_churn_entry(this, entry, 0);
+ ret = __socket_ioq_churn_entry(this, entry);
if (ret != 0)
break;
@@ -1372,7 +1332,6 @@ __socket_ioq_churn(rpc_transport_t *this)
priv->idx, -1, 0);
}
-out:
return ret;
}
@@ -1382,15 +1341,12 @@ socket_event_poll_err(rpc_transport_t *this, int gen, int idx)
socket_private_t *priv = NULL;
gf_boolean_t socket_closed = _gf_false;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
pthread_mutex_lock(&priv->out_lock);
{
if ((priv->gen == gen) && (priv->idx == idx) && (priv->sock >= 0)) {
- __socket_ioq_flush(this);
+ __socket_ioq_flush(priv);
__socket_reset(this);
socket_closed = _gf_true;
}
@@ -1408,7 +1364,6 @@ socket_event_poll_err(rpc_transport_t *this, int gen, int idx)
rpc_transport_notify(this, RPC_TRANSPORT_DISCONNECT, this);
}
-out:
return socket_closed;
}
@@ -1418,9 +1373,6 @@ socket_event_poll_out(rpc_transport_t *this)
socket_private_t *priv = NULL;
int ret = -1;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
pthread_mutex_lock(&priv->out_lock);
@@ -1443,7 +1395,7 @@ socket_event_poll_out(rpc_transport_t *this)
if (ret > 0)
ret = 0;
-out:
+
return ret;
}
@@ -1513,12 +1465,6 @@ out:
return ret;
}
-static int
-__socket_read_simple_request(rpc_transport_t *this)
-{
- return __socket_read_simple_msg(this);
-}
-
#define rpc_cred_addr(buf) (buf + RPC_MSGTYPE_SIZE + RPC_CALL_BODY_SIZE - 4)
#define rpc_verf_addr(fragcurrent) (fragcurrent - 4)
@@ -1545,9 +1491,6 @@ __socket_read_vectored_request(rpc_transport_t *this,
struct gf_sock_incoming_frag *frag = NULL;
sp_rpcfrag_request_state_t *request = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* used to reduce the indirection */
@@ -1694,7 +1637,6 @@ __socket_read_vectored_request(rpc_transport_t *this,
break;
}
-out:
return ret;
}
@@ -1711,9 +1653,6 @@ __socket_read_request(rpc_transport_t *this)
struct gf_sock_incoming_frag *frag = NULL;
sp_rpcfrag_request_state_t *request = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* used to reduce the indirection */
@@ -1757,7 +1696,7 @@ __socket_read_request(rpc_transport_t *this)
if (vector_sizer) {
ret = __socket_read_vectored_request(this, vector_sizer);
} else {
- ret = __socket_read_simple_request(this);
+ ret = __socket_read_simple_msg(this);
}
remaining_size = RPC_FRAGSIZE(in->fraghdr) - frag->bytes_read;
@@ -1770,7 +1709,6 @@ __socket_read_request(rpc_transport_t *this)
break;
}
-out:
return ret;
}
@@ -1790,9 +1728,6 @@ __socket_read_accepted_successful_reply(rpc_transport_t *this)
struct gf_sock_incoming_frag *frag = NULL;
uint32_t remaining_size = 0;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* used to reduce the indirection */
@@ -1922,9 +1857,6 @@ __socket_read_accepted_successful_reply_v2(rpc_transport_t *this)
struct gf_sock_incoming_frag *frag = NULL;
uint32_t remaining_size = 0;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* used to reduce the indirection */
@@ -2053,9 +1985,6 @@ __socket_read_accepted_reply(rpc_transport_t *this)
struct gf_sock_incoming *in = NULL;
struct gf_sock_incoming_frag *frag = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* used to reduce the indirection */
in = &priv->incoming;
@@ -2135,7 +2064,6 @@ __socket_read_accepted_reply(rpc_transport_t *this)
break;
}
-out:
return ret;
}
@@ -2157,9 +2085,6 @@ __socket_read_vectored_reply(rpc_transport_t *this)
struct gf_sock_incoming *in = NULL;
struct gf_sock_incoming_frag *frag = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
in = &priv->incoming;
frag = &in->frag;
@@ -2203,7 +2128,6 @@ __socket_read_vectored_reply(rpc_transport_t *this)
break;
}
-out:
return ret;
}
@@ -2226,9 +2150,6 @@ __socket_read_reply(rpc_transport_t *this)
struct gf_sock_incoming *in = NULL;
struct gf_sock_incoming_frag *frag = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
in = &priv->incoming;
frag = &in->frag;
@@ -2294,9 +2215,6 @@ __socket_read_frag(rpc_transport_t *this)
struct gf_sock_incoming *in = NULL;
struct gf_sock_incoming_frag *frag = NULL;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
/* used to reduce the indirection */
in = &priv->incoming;
@@ -2354,7 +2272,6 @@ __socket_read_frag(rpc_transport_t *this)
break;
}
-out:
return ret;
}
@@ -2568,14 +2485,7 @@ static int
socket_proto_state_machine(rpc_transport_t *this,
rpc_transport_pollin_t **pollin)
{
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO("socket", this, out);
-
- ret = __socket_proto_state_machine(this, pollin);
-
-out:
- return ret;
+ return __socket_proto_state_machine(this, pollin);
}
static void
@@ -2644,9 +2554,6 @@ socket_connect_finish(rpc_transport_t *this)
rpc_transport_event_t event = 0;
char notify_rpc = 0;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
pthread_mutex_lock(&priv->out_lock);
@@ -2701,7 +2608,7 @@ unlock:
if (notify_rpc) {
rpc_transport_notify(this, event, this);
}
-out:
+
return ret;
}
@@ -2710,12 +2617,8 @@ socket_disconnect(rpc_transport_t *this, gf_boolean_t wait);
/* socket_is_connected() is for use only in socket_event_handler() */
static inline gf_boolean_t
-socket_is_connected(rpc_transport_t *this)
+socket_is_connected(socket_private_t *priv)
{
- socket_private_t *priv = NULL;
-
- priv = this->private;
-
if (priv->use_ssl) {
return priv->is_server ? priv->ssl_accepted : priv->ssl_connected;
} else {
@@ -2812,7 +2715,7 @@ ssl_handle_client_connection_attempt(rpc_transport_t *this)
/* SSL client */
if (priv->connect_failed) {
gf_log(this->name, GF_LOG_TRACE, ">>> disconnecting SSL socket");
- ret = socket_disconnect(this, _gf_false);
+ (void)socket_disconnect(this, _gf_false);
/* Force ret to be -1, as we are officially done with
this socket */
ret = -1;
@@ -2842,7 +2745,7 @@ ssl_handle_client_connection_attempt(rpc_transport_t *this)
ret = 1;
} else {
/* this is a connection failure */
- ret = socket_connect_finish(this);
+ (void)socket_connect_finish(this);
gf_log(this->name, GF_LOG_TRACE,
"ssl_complete_connection "
"returned error");
@@ -2991,7 +2894,7 @@ socket_event_handler(int fd, int idx, int gen, void *data, int poll_in,
poll_out, poll_err);
if (!poll_err) {
- if (!socket_is_connected(this)) {
+ if (!socket_is_connected(priv)) {
gf_log(this->name, GF_LOG_TRACE,
"%s (sock:%d) socket is not connected, "
"completing connection",
@@ -3047,6 +2950,13 @@ socket_event_handler(int fd, int idx, int gen, void *data, int poll_in,
socket_dump_info(sa, priv->is_server, priv->use_ssl, priv->sock,
this->name, "disconnecting from");
+ /* Dump the SSL error stack to clear any errors that may otherwise
+ * resurface in the future.
+ */
+ if (priv->use_ssl && priv->ssl_ssl) {
+ ssl_dump_error_stack(this->name);
+ }
+
/* Logging has happened already in earlier cases */
gf_log("transport", ((ret >= 0) ? GF_LOG_INFO : GF_LOG_DEBUG),
"EPOLLERR - disconnecting (sock:%d) (%s)", priv->sock,
@@ -3127,23 +3037,25 @@ socket_server_event_handler(int fd, int idx, int gen, void *data, int poll_in,
goto out;
}
- if (priv->nodelay && (new_sockaddr.ss_family != AF_UNIX)) {
- ret = __socket_nodelay(new_sock);
- if (ret != 0) {
- gf_log(this->name, GF_LOG_WARNING,
- "setsockopt() failed for "
- "NODELAY (%s)",
- strerror(errno));
+ if (new_sockaddr.ss_family != AF_UNIX) {
+ if (priv->nodelay) {
+ ret = __socket_nodelay(new_sock);
+ if (ret != 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "setsockopt() failed for "
+ "NODELAY (%s)",
+ strerror(errno));
+ }
}
- }
- if (priv->keepalive && new_sockaddr.ss_family != AF_UNIX) {
- ret = __socket_keepalive(new_sock, new_sockaddr.ss_family,
- priv->keepaliveintvl, priv->keepaliveidle,
- priv->keepalivecnt, priv->timeout);
- if (ret != 0)
- gf_log(this->name, GF_LOG_WARNING,
- "Failed to set keep-alive: %s", strerror(errno));
+ if (priv->keepalive) {
+ ret = __socket_keepalive(
+ new_sock, new_sockaddr.ss_family, priv->keepaliveintvl,
+ priv->keepaliveidle, priv->keepalivecnt, priv->timeout);
+ if (ret != 0)
+ gf_log(this->name, GF_LOG_WARNING,
+ "Failed to set keep-alive: %s", strerror(errno));
+ }
}
new_trans = GF_CALLOC(1, sizeof(*new_trans), gf_common_mt_rpc_trans_t);
@@ -3257,7 +3169,6 @@ socket_server_event_handler(int fd, int idx, int gen, void *data, int poll_in,
new_priv->sock = new_sock;
new_priv->ssl_enabled = priv->ssl_enabled;
- new_priv->ssl_ctx = priv->ssl_ctx;
new_priv->connected = 1;
new_priv->is_server = _gf_true;
@@ -3341,9 +3252,6 @@ socket_disconnect(rpc_transport_t *this, gf_boolean_t wait)
socket_private_t *priv = NULL;
int ret = -1;
- GF_VALIDATE_OR_GOTO("socket", this, out);
- GF_VALIDATE_OR_GOTO("socket", this->private, out);
-
priv = this->private;
pthread_mutex_lock(&priv->out_lock);
@@ -3352,7 +3260,6 @@ socket_disconnect(rpc_transport_t *this, gf_boolean_t wait)
}
pthread_mutex_unlock(&priv->out_lock);
-out:
return ret;
}
@@ -3528,30 +3435,32 @@ socket_connect(rpc_transport_t *this, int port)
}
#endif
- if (priv->nodelay && (sa_family != AF_UNIX)) {
- ret = __socket_nodelay(priv->sock);
-
- if (ret != 0) {
- gf_log(this->name, GF_LOG_ERROR, "NODELAY on %d failed (%s)",
- priv->sock, strerror(errno));
+ if (sa_family != AF_UNIX) {
+ if (priv->nodelay) {
+ ret = __socket_nodelay(priv->sock);
+ if (ret != 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "NODELAY on %d failed (%s)", priv->sock,
+ strerror(errno));
+ }
}
- }
- if (priv->keepalive && sa_family != AF_UNIX) {
- ret = __socket_keepalive(priv->sock, sa_family,
- priv->keepaliveintvl, priv->keepaliveidle,
- priv->keepalivecnt, priv->timeout);
- if (ret != 0)
- gf_log(this->name, GF_LOG_ERROR, "Failed to set keep-alive: %s",
- strerror(errno));
+ if (priv->keepalive) {
+ ret = __socket_keepalive(
+ priv->sock, sa_family, priv->keepaliveintvl,
+ priv->keepaliveidle, priv->keepalivecnt, priv->timeout);
+ if (ret != 0)
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to set keep-alive: %s", strerror(errno));
+ }
}
SA(&this->myinfo.sockaddr)->sa_family = SA(&this->peerinfo.sockaddr)
->sa_family;
/* If a source addr is explicitly specified, use it */
- ret = dict_get_str(this->options, "transport.socket.source-addr",
- &local_addr);
+ ret = dict_get_str_sizen(this->options, "transport.socket.source-addr",
+ &local_addr);
if (!ret && SA(&this->myinfo.sockaddr)->sa_family == AF_INET) {
addr = (struct sockaddr_in *)(&this->myinfo.sockaddr);
ret = inet_pton(AF_INET, local_addr, &(addr->sin_addr.s_addr));
@@ -3808,6 +3717,7 @@ socket_listen(rpc_transport_t *this)
}
}
+ /* coverity[SLEEP] */
ret = __socket_server_bind(this);
if (ret < 0) {
@@ -3893,7 +3803,7 @@ socket_submit_outgoing_msg(rpc_transport_t *this, rpc_transport_msg_t *msg)
goto unlock;
if (list_empty(&priv->ioq)) {
- ret = __socket_ioq_churn_entry(this, entry, 1);
+ ret = __socket_ioq_churn_entry(this, entry);
if (ret == 0) {
need_append = 0;
@@ -4053,31 +3963,23 @@ reconfigure(rpc_transport_t *this, dict_t *options)
socket_private_t *priv = NULL;
gf_boolean_t tmp_bool = _gf_false;
char *optstr = NULL;
- int ret = 0;
+ int ret = -1;
uint32_t backlog = 0;
uint64_t windowsize = 0;
- uint32_t timeout = GF_NETWORK_TIMEOUT;
- int keepaliveidle = GF_KEEPALIVE_TIME;
- int keepaliveintvl = GF_KEEPALIVE_INTERVAL;
- int keepalivecnt = GF_KEEPALIVE_COUNT;
+ data_t *data;
GF_VALIDATE_OR_GOTO("socket", this, out);
GF_VALIDATE_OR_GOTO("socket", this->private, out);
- if (!this || !this->private) {
- ret = -1;
- goto out;
- }
-
priv = this->private;
- if (dict_get_str(options, "transport.socket.keepalive", &optstr) == 0) {
+ if (dict_get_str_sizen(options, "transport.socket.keepalive", &optstr) ==
+ 0) {
if (gf_string2boolean(optstr, &tmp_bool) != 0) {
gf_log(this->name, GF_LOG_ERROR,
"'transport.socket.keepalive' takes only "
"boolean options, not taking any action");
priv->keepalive = 1;
- ret = -1;
goto out;
}
gf_log(this->name, GF_LOG_DEBUG,
@@ -4087,48 +3989,43 @@ reconfigure(rpc_transport_t *this, dict_t *options)
} else
priv->keepalive = 1;
- if (dict_get_int32(options, "transport.tcp-user-timeout",
- &(priv->timeout)) != 0)
- priv->timeout = timeout;
+ if (dict_get_int32_sizen(options, "transport.tcp-user-timeout",
+ &(priv->timeout)) != 0)
+ priv->timeout = GF_NETWORK_TIMEOUT;
gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.tcp-user-timeout=%d",
- priv->timeout);
+ "Reconfigured transport.tcp-user-timeout=%d", priv->timeout);
if (dict_get_uint32(options, "transport.listen-backlog", &backlog) == 0) {
priv->backlog = backlog;
gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.listen-backlog=%d",
- priv->backlog);
+ "Reconfigured transport.listen-backlog=%d", priv->backlog);
}
- if (dict_get_int32(options, "transport.socket.keepalive-time",
- &(priv->keepaliveidle)) != 0)
- priv->keepaliveidle = keepaliveidle;
- gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.socket.keepalive-time=%d",
- priv->keepaliveidle);
+ if (priv->keepalive) {
+ if (dict_get_int32_sizen(options, "transport.socket.keepalive-time",
+ &(priv->keepaliveidle)) != 0)
+ priv->keepaliveidle = GF_KEEPALIVE_TIME;
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Reconfigured transport.socket.keepalive-time=%d",
+ priv->keepaliveidle);
- if (dict_get_int32(options, "transport.socket.keepalive-interval",
- &(priv->keepaliveintvl)) != 0)
- priv->keepaliveintvl = keepaliveintvl;
- gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.socket.keepalive-interval=%d",
- priv->keepaliveintvl);
+ if (dict_get_int32_sizen(options, "transport.socket.keepalive-interval",
+ &(priv->keepaliveintvl)) != 0)
+ priv->keepaliveintvl = GF_KEEPALIVE_INTERVAL;
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Reconfigured transport.socket.keepalive-interval=%d",
+ priv->keepaliveintvl);
- if (dict_get_int32(options, "transport.socket.keepalive-count",
- &(priv->keepalivecnt)) != 0)
- priv->keepalivecnt = keepalivecnt;
- gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.socket.keepalive-count=%d",
- priv->keepalivecnt);
+ if (dict_get_int32_sizen(options, "transport.socket.keepalive-count",
+ &(priv->keepalivecnt)) != 0)
+ priv->keepalivecnt = GF_KEEPALIVE_COUNT;
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Reconfigured transport.socket.keepalive-count=%d",
+ priv->keepalivecnt);
+ }
optstr = NULL;
- if (dict_get_str(options, "tcp-window-size", &optstr) == 0) {
+ if (dict_get_str_sizen(options, "tcp-window-size", &optstr) == 0) {
if (gf_string2uint64(optstr, &windowsize) != 0) {
gf_log(this->name, GF_LOG_ERROR, "invalid number format: %s",
optstr);
@@ -4138,8 +4035,9 @@ reconfigure(rpc_transport_t *this, dict_t *options)
priv->windowsize = (int)windowsize;
- if (dict_get(options, "non-blocking-io")) {
- optstr = data_to_str(dict_get(options, "non-blocking-io"));
+ data = dict_get_sizen(options, "non-blocking-io");
+ if (data) {
+ optstr = data_to_str(data);
if (gf_string2boolean(optstr, &tmp_bool) != 0) {
gf_log(this->name, GF_LOG_ERROR,
@@ -4270,6 +4168,34 @@ static void __attribute__((destructor)) fini_openssl_mt(void)
ERR_free_strings();
}
+/* The function returns 0 if AES bit is enabled on the CPU */
+static int
+ssl_check_aes_bit(void)
+{
+ FILE *fp = fopen("/proc/cpuinfo", "r");
+ int ret = 1;
+ size_t len = 0;
+ char *line = NULL;
+ char *match = NULL;
+
+ GF_ASSERT(fp != NULL);
+
+ while (getline(&line, &len, fp) > 0) {
+ if (!strncmp(line, "flags", 5)) {
+ match = strstr(line, " aes");
+ if ((match != NULL) && ((match[4] == ' ') || (match[4] == 0))) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+
+ free(line);
+ fclose(fp);
+
+ return ret;
+}
+
static int
ssl_setup_connection_params(rpc_transport_t *this)
{
@@ -4280,6 +4206,7 @@ ssl_setup_connection_params(rpc_transport_t *this)
char *cipher_list = DEFAULT_CIPHER_LIST;
char *dh_param = DEFAULT_DH_PARAM;
char *ec_curve = DEFAULT_EC_CURVE;
+ gf_boolean_t dh_flag = _gf_false;
priv = this->private;
@@ -4288,8 +4215,16 @@ ssl_setup_connection_params(rpc_transport_t *this)
return 0;
}
+ if (!priv->ssl_enabled && !priv->mgmt_ssl) {
+ return 0;
+ }
+
+ if (!ssl_check_aes_bit()) {
+ cipher_list = "AES128:" DEFAULT_CIPHER_LIST;
+ }
+
priv->ssl_own_cert = DEFAULT_CERT_PATH;
- if (dict_get_str(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) {
+ if (dict_get_str_sizen(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) {
if (!priv->ssl_enabled) {
gf_log(this->name, GF_LOG_WARNING,
"%s specified without %s (ignored)", SSL_OWN_CERT_OPT,
@@ -4300,7 +4235,7 @@ ssl_setup_connection_params(rpc_transport_t *this)
priv->ssl_own_cert = gf_strdup(priv->ssl_own_cert);
priv->ssl_private_key = DEFAULT_KEY_PATH;
- if (dict_get_str(this->options, SSL_PRIVATE_KEY_OPT, &optstr) == 0) {
+ if (dict_get_str_sizen(this->options, SSL_PRIVATE_KEY_OPT, &optstr) == 0) {
if (!priv->ssl_enabled) {
gf_log(this->name, GF_LOG_WARNING,
"%s specified without %s (ignored)", SSL_PRIVATE_KEY_OPT,
@@ -4311,7 +4246,7 @@ ssl_setup_connection_params(rpc_transport_t *this)
priv->ssl_private_key = gf_strdup(priv->ssl_private_key);
priv->ssl_ca_list = DEFAULT_CA_PATH;
- if (dict_get_str(this->options, SSL_CA_LIST_OPT, &optstr) == 0) {
+ if (dict_get_str_sizen(this->options, SSL_CA_LIST_OPT, &optstr) == 0) {
if (!priv->ssl_enabled) {
gf_log(this->name, GF_LOG_WARNING,
"%s specified without %s (ignored)", SSL_CA_LIST_OPT,
@@ -4322,7 +4257,7 @@ ssl_setup_connection_params(rpc_transport_t *this)
priv->ssl_ca_list = gf_strdup(priv->ssl_ca_list);
optstr = NULL;
- if (dict_get_str(this->options, SSL_CRL_PATH_OPT, &optstr) == 0) {
+ if (dict_get_str_sizen(this->options, SSL_CRL_PATH_OPT, &optstr) == 0) {
if (!priv->ssl_enabled) {
gf_log(this->name, GF_LOG_WARNING,
"%s specified without %s (ignored)", SSL_CRL_PATH_OPT,
@@ -4334,30 +4269,28 @@ ssl_setup_connection_params(rpc_transport_t *this)
priv->crl_path = gf_strdup(optstr);
}
- gf_log(this->name, priv->ssl_enabled ? GF_LOG_INFO : GF_LOG_DEBUG,
- "SSL support on the I/O path is %s",
- priv->ssl_enabled ? "ENABLED" : "NOT enabled");
- gf_log(this->name, priv->mgmt_ssl ? GF_LOG_INFO : GF_LOG_DEBUG,
- "SSL support for glusterd is %s",
- priv->mgmt_ssl ? "ENABLED" : "NOT enabled");
-
if (!priv->mgmt_ssl) {
- if (!dict_get_int32(this->options, SSL_CERT_DEPTH_OPT, &cert_depth)) {
- gf_log(this->name, GF_LOG_INFO, "using certificate depth %d",
- cert_depth);
+ if (!dict_get_int32_sizen(this->options, SSL_CERT_DEPTH_OPT,
+ &cert_depth)) {
}
} else {
cert_depth = this->ctx->ssl_cert_depth;
- gf_log(this->name, GF_LOG_INFO, "using certificate depth %d",
- cert_depth);
}
- if (!dict_get_str(this->options, SSL_CIPHER_LIST_OPT, &cipher_list)) {
+ gf_log(this->name, priv->ssl_enabled ? GF_LOG_INFO : GF_LOG_DEBUG,
+ "SSL support for MGMT is %s IO path is %s certificate depth is %d "
+ "for peer %s",
+ (priv->mgmt_ssl ? "ENABLED" : "NOT enabled"),
+ (priv->ssl_enabled ? "ENABLED" : "NOT enabled"), cert_depth,
+ this->peerinfo.identifier);
+
+ if (!dict_get_str_sizen(this->options, SSL_CIPHER_LIST_OPT, &cipher_list)) {
gf_log(this->name, GF_LOG_INFO, "using cipher list %s", cipher_list);
}
- if (!dict_get_str(this->options, SSL_DH_PARAM_OPT, &dh_param)) {
+ if (!dict_get_str_sizen(this->options, SSL_DH_PARAM_OPT, &dh_param)) {
+ dh_flag = _gf_true;
gf_log(this->name, GF_LOG_INFO, "using DH parameters %s", dh_param);
}
- if (!dict_get_str(this->options, SSL_EC_CURVE_OPT, &ec_curve)) {
+ if (!dict_get_str_sizen(this->options, SSL_EC_CURVE_OPT, &ec_curve)) {
gf_log(this->name, GF_LOG_INFO, "using EC curve %s", ec_curve);
}
@@ -4389,12 +4322,15 @@ ssl_setup_connection_params(rpc_transport_t *this)
#ifdef SSL_OP_NO_COMPRESSION
SSL_CTX_set_options(priv->ssl_ctx, SSL_OP_NO_COMPRESSION);
#endif
-
- if ((bio = BIO_new_file(dh_param, "r")) == NULL) {
- gf_log(this->name, GF_LOG_INFO,
- "failed to open %s, "
- "DH ciphers are disabled",
- dh_param);
+ /* Upload file to bio wrapper only if dh param is configured
+ */
+ if (dh_flag) {
+ if ((bio = BIO_new_file(dh_param, "r")) == NULL) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to open %s, "
+ "DH ciphers are disabled",
+ dh_param);
+ }
}
if (bio != NULL) {
@@ -4511,22 +4447,17 @@ socket_init(rpc_transport_t *this)
gf_boolean_t tmp_bool = 0;
uint64_t windowsize = GF_DEFAULT_SOCKET_WINDOW_SIZE;
char *optstr = NULL;
- uint32_t timeout = GF_NETWORK_TIMEOUT;
- int keepaliveidle = GF_KEEPALIVE_TIME;
- int keepaliveintvl = GF_KEEPALIVE_INTERVAL;
- int keepalivecnt = GF_KEEPALIVE_COUNT;
- uint32_t backlog = 0;
+ data_t *data;
if (this->private) {
gf_log_callingfn(this->name, GF_LOG_ERROR, "double init attempted");
return -1;
}
- priv = GF_MALLOC(sizeof(*priv), gf_common_mt_socket_private_t);
+ priv = GF_CALLOC(1, sizeof(*priv), gf_common_mt_socket_private_t);
if (!priv) {
return -1;
}
- memset(priv, 0, sizeof(*priv));
this->private = priv;
pthread_mutex_init(&priv->out_lock, NULL);
@@ -4551,8 +4482,9 @@ socket_init(rpc_transport_t *this)
if (!this->options)
goto out;
- if (dict_get(this->options, "non-blocking-io")) {
- optstr = data_to_str(dict_get(this->options, "non-blocking-io"));
+ data = dict_get_sizen(this->options, "non-blocking-io");
+ if (data) {
+ optstr = data_to_str(data);
if (gf_string2boolean(optstr, &tmp_bool) != 0) {
gf_log(this->name, GF_LOG_ERROR,
@@ -4570,9 +4502,9 @@ socket_init(rpc_transport_t *this)
optstr = NULL;
/* By default, we enable NODELAY */
- if (dict_get(this->options, "transport.socket.nodelay")) {
- optstr = data_to_str(
- dict_get(this->options, "transport.socket.nodelay"));
+ data = dict_get_sizen(this->options, "transport.socket.nodelay");
+ if (data) {
+ optstr = data_to_str(data);
if (gf_string2boolean(optstr, &tmp_bool) != 0) {
gf_log(this->name, GF_LOG_ERROR,
@@ -4587,7 +4519,7 @@ socket_init(rpc_transport_t *this)
}
optstr = NULL;
- if (dict_get_str(this->options, "tcp-window-size", &optstr) == 0) {
+ if (dict_get_str_sizen(this->options, "tcp-window-size", &optstr) == 0) {
if (gf_string2uint64(optstr, &windowsize) != 0) {
gf_log(this->name, GF_LOG_ERROR, "invalid number format: %s",
optstr);
@@ -4603,8 +4535,8 @@ socket_init(rpc_transport_t *this)
priv->keepaliveintvl = GF_KEEPALIVE_INTERVAL;
priv->keepaliveidle = GF_KEEPALIVE_TIME;
priv->keepalivecnt = GF_KEEPALIVE_COUNT;
- if (dict_get_str(this->options, "transport.socket.keepalive", &optstr) ==
- 0) {
+ if (dict_get_str_sizen(this->options, "transport.socket.keepalive",
+ &optstr) == 0) {
if (gf_string2boolean(optstr, &tmp_bool) != 0) {
gf_log(this->name, GF_LOG_ERROR,
"'transport.socket.keepalive' takes only "
@@ -4616,45 +4548,45 @@ socket_init(rpc_transport_t *this)
priv->keepalive = 0;
}
- if (dict_get_int32(this->options, "transport.tcp-user-timeout",
- &(priv->timeout)) != 0)
- priv->timeout = timeout;
- gf_log(this->name, GF_LOG_DEBUG,
- "Configued "
- "transport.tcp-user-timeout=%d",
+ if (dict_get_int32_sizen(this->options, "transport.tcp-user-timeout",
+ &(priv->timeout)) != 0)
+ priv->timeout = GF_NETWORK_TIMEOUT;
+ gf_log(this->name, GF_LOG_DEBUG, "Configured transport.tcp-user-timeout=%d",
priv->timeout);
- if (dict_get_int32(this->options, "transport.socket.keepalive-time",
- &(priv->keepaliveidle)) != 0) {
- priv->keepaliveidle = keepaliveidle;
- }
+ if (priv->keepalive) {
+ if (dict_get_int32_sizen(this->options,
+ "transport.socket.keepalive-time",
+ &(priv->keepaliveidle)) != 0) {
+ priv->keepaliveidle = GF_KEEPALIVE_TIME;
+ }
- if (dict_get_int32(this->options, "transport.socket.keepalive-interval",
- &(priv->keepaliveintvl)) != 0) {
- priv->keepaliveintvl = keepaliveintvl;
- }
+ if (dict_get_int32_sizen(this->options,
+ "transport.socket.keepalive-interval",
+ &(priv->keepaliveintvl)) != 0) {
+ priv->keepaliveintvl = GF_KEEPALIVE_INTERVAL;
+ }
- if (dict_get_int32(this->options, "transport.socket.keepalive-count",
- &(priv->keepalivecnt)) != 0)
- priv->keepalivecnt = keepalivecnt;
- gf_log(this->name, GF_LOG_DEBUG,
- "Reconfigued "
- "transport.keepalivecnt=%d",
- keepalivecnt);
+ if (dict_get_int32_sizen(this->options,
+ "transport.socket.keepalive-count",
+ &(priv->keepalivecnt)) != 0)
+ priv->keepalivecnt = GF_KEEPALIVE_COUNT;
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Reconfigured transport.keepalivecnt=%d", priv->keepalivecnt);
+ }
- if (dict_get_uint32(this->options, "transport.listen-backlog", &backlog) !=
- 0) {
- backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG;
+ if (dict_get_uint32(this->options, "transport.listen-backlog",
+ &(priv->backlog)) != 0) {
+ priv->backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG;
}
- priv->backlog = backlog;
optstr = NULL;
/* Check if socket read failures are to be logged */
priv->read_fail_log = 1;
- if (dict_get(this->options, "transport.socket.read-fail-log")) {
- optstr = data_to_str(
- dict_get(this->options, "transport.socket.read-fail-log"));
+ data = dict_get_sizen(this->options, "transport.socket.read-fail-log");
+ if (data) {
+ optstr = data_to_str(data);
if (gf_string2boolean(optstr, &tmp_bool) != 0) {
gf_log(this->name, GF_LOG_WARNING,
"'transport.socket.read-fail-log' takes only "
@@ -4667,7 +4599,7 @@ socket_init(rpc_transport_t *this)
priv->windowsize = (int)windowsize;
priv->ssl_enabled = _gf_false;
- if (dict_get_str(this->options, SSL_ENABLED_OPT, &optstr) == 0) {
+ if (dict_get_str_sizen(this->options, SSL_ENABLED_OPT, &optstr) == 0) {
if (gf_string2boolean(optstr, &priv->ssl_enabled) != 0) {
gf_log(this->name, GF_LOG_ERROR,
"invalid value given for ssl-enabled boolean");
@@ -4695,7 +4627,7 @@ fini(rpc_transport_t *this)
if (priv->sock >= 0) {
pthread_mutex_lock(&priv->out_lock);
{
- __socket_ioq_flush(this);
+ __socket_ioq_flush(priv);
__socket_reset(this);
}
pthread_mutex_unlock(&priv->out_lock);
@@ -4705,6 +4637,21 @@ fini(rpc_transport_t *this)
pthread_mutex_destroy(&priv->out_lock);
pthread_mutex_destroy(&priv->cond_lock);
pthread_cond_destroy(&priv->cond);
+
+ GF_ASSERT(priv->notify.in_progress == 0);
+ pthread_mutex_destroy(&priv->notify.lock);
+ pthread_cond_destroy(&priv->notify.cond);
+
+ if (priv->use_ssl && priv->ssl_ssl) {
+ SSL_clear(priv->ssl_ssl);
+ SSL_free(priv->ssl_ssl);
+ priv->ssl_ssl = NULL;
+ }
+ if (priv->ssl_ctx) {
+ SSL_CTX_free(priv->ssl_ctx);
+ priv->ssl_ctx = NULL;
+ }
+
if (priv->ssl_private_key) {
GF_FREE(priv->ssl_private_key);
}
@@ -4779,7 +4726,6 @@ struct volume_options options[] = {
{.key = {"transport.socket.nodelay"},
.type = GF_OPTION_TYPE_BOOL,
.default_value = "1"},
- {.key = {"transport.socket.lowlat"}, .type = GF_OPTION_TYPE_BOOL},
{.key = {"transport.socket.keepalive"},
.type = GF_OPTION_TYPE_BOOL,
.op_version = {1},
diff --git a/rpc/rpc-transport/socket/src/socket.h b/rpc/rpc-transport/socket/src/socket.h
index 44a727cc4a1..8a2eda70605 100644
--- a/rpc/rpc-transport/socket/src/socket.h
+++ b/rpc/rpc-transport/socket/src/socket.h
@@ -23,7 +23,6 @@
#endif
#include "rpc-transport.h"
-#include <glusterfs/refcount.h>
#ifndef MAX_IOVEC
#define MAX_IOVEC 16
@@ -105,11 +104,12 @@ struct ioq {
};
struct iovec vector[MAX_IOVEC];
- int count;
struct iovec *pending_vector;
+ int count;
int pending_count;
struct iobref *iobref;
uint32_t fraghdr;
+ char _pad[4];
};
typedef struct {
@@ -168,13 +168,13 @@ struct gf_sock_incoming {
char *proghdr_base_addr;
struct iobuf *iobuf;
size_t iobuf_size;
- int count;
struct gf_sock_incoming_frag frag;
struct iovec vector[2];
struct iovec payload_vector;
struct iobref *iobref;
rpc_request_info_t *request_info;
struct iovec *pending_vector;
+ int count;
int pending_count;
size_t total_bytes_read;
@@ -183,19 +183,11 @@ struct gf_sock_incoming {
size_t ra_served;
char *ra_buf;
uint32_t fraghdr;
- char complete_record;
msg_type_t msg_type;
sp_rpcrecord_state_t record_state;
+ char _pad[4];
};
-typedef enum {
- OT_IDLE, /* Uninitialized or termination complete. */
- OT_SPAWNING, /* Past pthread_create but not in thread yet. */
- OT_RUNNING, /* Poller thread running normally. */
- OT_CALLBACK, /* Poller thread in the middle of a callback. */
- OT_PLEASE_DIE, /* Poller termination requested. */
-} ot_state_t;
-
typedef struct {
union {
struct list_head ioq;
@@ -207,7 +199,6 @@ typedef struct {
pthread_mutex_t out_lock;
pthread_mutex_t cond_lock;
pthread_cond_t cond;
- pthread_t thread;
int windowsize;
int keepalive;
int keepaliveidle;
@@ -222,6 +213,7 @@ typedef struct {
* arm the epoll event set for the required event for the specific fd.
*/
int ssl_error_required;
+ int ssl_session_id;
GF_REF_DECL; /* refcount to keep track of socket_poller
threads */
@@ -236,15 +228,14 @@ typedef struct {
uint32_t backlog;
SSL_METHOD *ssl_meth;
SSL_CTX *ssl_ctx;
- int ssl_session_id;
BIO *ssl_sbio;
SSL *ssl_ssl;
char *ssl_own_cert;
char *ssl_private_key;
char *ssl_ca_list;
char *crl_path;
- int pipe[2];
struct gf_sock_incoming incoming;
+ mgmt_ssl_t srvr_ssl;
/* -1 = not connected. 0 = in progress. 1 = connected */
char connected;
/* 1 = connect failed for reasons other than EINPROGRESS/ENOENT
@@ -253,9 +244,7 @@ typedef struct {
char bio;
char connect_finish_log;
char submit_log;
- char lowlat;
char nodelay;
- mgmt_ssl_t srvr_ssl;
gf_boolean_t read_fail_log;
gf_boolean_t ssl_enabled; /* outbound I/O */
gf_boolean_t mgmt_ssl; /* outbound mgmt */
@@ -281,7 +270,7 @@ typedef struct {
* socket_event_handler() for
* newly accepted socket
*/
-
+ char _pad[4];
} socket_private_t;
#endif
diff --git a/rpc/xdr/gen/Makefile.am b/rpc/xdr/gen/Makefile.am
deleted file mode 100644
index df379b80130..00000000000
--- a/rpc/xdr/gen/Makefile.am
+++ /dev/null
@@ -1,49 +0,0 @@
-XDRGENFILES = glusterfs3-xdr.x glusterfs4-xdr.x cli1-xdr.x nlm4-xdr.x nsm-xdr.x \
- rpc-common-xdr.x glusterd1-xdr.x acl3-xdr.x portmap-xdr.x \
- mount3udp.x changelog-xdr.x glusterfs-fops.x
-XDRHEADERS = $(XDRGENFILES:.x=.h)
-XDRSOURCES = $(XDRGENFILES:.x=.c)
-
-CLEANFILES = $(XDRSOURCES) $(XDRHEADERS) $(XDRGENFILES)
-
-# trick automake into doing BUILT_SOURCES magic
-BUILT_SOURCES = $(XDRHEADERS) $(XDRSOURCES)
-
-xdrsrc=$(top_srcdir)/rpc/xdr/src
-xdrdst=$(top_builddir)/rpc/xdr/src
-
-# make's dependency resolution may mean that it decides to run
-# rpcgen again (unnecessarily), but as the .c file already exists,
-# rpcgen will exit with an error, resulting in a build error. We
-# could use a '-' (i.e. -@rpcgen ...) and suffer with noisy warnings
-# in the build. Or we do this crufty thing instead.
-$(XDRSOURCES): $(XDRGENFILES)
- @if [ ! -e $(xdrdst)/$@ -o $(@:.c=.x) -nt $(xdrdst)/$@ ]; then \
- rpcgen -c -o $(xdrdst)/$@ $(@:.c=.x) ;\
- fi
-
-# d*mn sed in netbsd6 doesn't do -i (inline)
-# (why are we still running smoke on netbsd6 and not netbsd7?)
-$(XDRHEADERS): $(XDRGENFILES)
- @if [ ! -e $(xdrdst)/$@ -o $(@:.h=.x) -nt $(xdrdst)/$@ ]; then \
- rpcgen -h -o $(@:.h=.tmp) $(@:.h=.x) && \
- sed -e '/#ifndef/ s/-/_/g' -e '/#define/ s/-/_/g' \
- -e '/#endif/ s/-/_/' -e 's/TMP_/H_/g' \
- $(@:.h=.tmp) > $(xdrdst)/$@ && \
- rm -f $(@:.h=.tmp) ; \
- fi
-
-
-# link .x files when doing out-of-tree builds
-# have to use .PHONY here to force it; all versions of make
-# will think the file already exists "here" by virtue of the
-# VPATH. And we have to have the .x file in $cwd in order to
-# have rpcgen generate "nice" #include directives
-# i.e. (nice):
-# #include "acl3-xdr.h"
-# versus (not nice):
-# #include "../../../../foo/src/rpc/xdr/src/acl3-xdr.h"
-.PHONY : $(XDRGENFILES)
-$(XDRGENFILES):
- @if [ ! -e $@ ]; then ln -s $(xdrsrc)/$@ . ; fi;
-
diff --git a/rpc/xdr/src/.gitignore b/rpc/xdr/src/.gitignore
index 6728940f546..a0c8b7ca2b6 100644
--- a/rpc/xdr/src/.gitignore
+++ b/rpc/xdr/src/.gitignore
@@ -10,8 +10,6 @@ glusterfs3-xdr.c
glusterfs3-xdr.h
glusterfs4-xdr.c
glusterfs4-xdr.h
-glusterfs-fops.h
-glusterfs-fops.c
mount3udp.c
mount3udp.h
nlm4-xdr.c
diff --git a/rpc/xdr/src/Makefile.am b/rpc/xdr/src/Makefile.am
index 495b9999236..0e9c377ec93 100644
--- a/rpc/xdr/src/Makefile.am
+++ b/rpc/xdr/src/Makefile.am
@@ -1,10 +1,19 @@
-XDRGENFILES = glusterfs3-xdr.x glusterfs4-xdr.x cli1-xdr.x nlm4-xdr.x nsm-xdr.x \
- rpc-common-xdr.x glusterd1-xdr.x acl3-xdr.x portmap-xdr.x \
- mount3udp.x changelog-xdr.x glusterfs-fops.x
+if BUILD_GNFS
+ NFS_XDRS = nlm4-xdr.x nsm-xdr.x acl3-xdr.x mount3udp.x
+ NFS_SRCS = xdr-nfs3.c msg-nfs3.c
+ NFS_HDRS = xdr-nfs3.h msg-nfs3.h
+else
+ NFS_EXTRA_XDRS = nlm4-xdr.x nsm-xdr.x acl3-xdr.x mount3udp.x
+endif
+
+XDRGENFILES = glusterfs3-xdr.x glusterfs4-xdr.x cli1-xdr.x \
+ rpc-common-xdr.x glusterd1-xdr.x changelog-xdr.x \
+ portmap-xdr.x ${NFS_XDRS}
+
XDRHEADERS = $(XDRGENFILES:.x=.h)
XDRSOURCES = $(XDRGENFILES:.x=.c)
-EXTRA_DIST = $(XDRGENFILES) libgfxdr.sym
+EXTRA_DIST = $(XDRGENFILES) libgfxdr.sym ${NFS_EXTRA_XDRS}
lib_LTLIBRARIES = libgfxdr.la
@@ -19,17 +28,58 @@ libgfxdr_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
libgfxdr_la_LDFLAGS = -version-info $(LIBGFXDR_LT_VERSION) $(GF_LDFLAGS) \
-export-symbols $(top_srcdir)/rpc/xdr/src/libgfxdr.sym
-libgfxdr_la_SOURCES = xdr-generic.c xdr-nfs3.c msg-nfs3.c
+libgfxdr_la_SOURCES = xdr-generic.c ${NFS_SRCS}
nodist_libgfxdr_la_SOURCES = $(XDRSOURCES)
-libgfxdr_la_HEADERS = xdr-generic.h xdr-nfs3.h msg-nfs3.h glusterfs3.h \
- rpc-pragmas.h
+libgfxdr_la_HEADERS = xdr-generic.h glusterfs3.h rpc-pragmas.h ${NFS_HDRS}
nodist_libgfxdr_la_HEADERS = $(XDRHEADERS)
libgfxdr_ladir = $(includedir)/glusterfs/rpc
CLEANFILES = $(XDRSOURCES) $(XDRHEADERS)
-# Generate the .c and .h symlinks from the ../gen/*.x files
-$(XDRSOURCES) $(XDRHEADERS):
- $(MAKE) -C ../gen $^
+# trick automake into doing BUILT_SOURCES magic
+BUILT_SOURCES = $(XDRHEADERS) $(XDRSOURCES)
+
+xdrsrc=$(top_srcdir)/rpc/xdr/src
+xdrdst=$(top_builddir)/rpc/xdr/src
+
+# make's dependency resolution may mean that it decides to run
+# rpcgen again (unnecessarily), but as the .c file already exists,
+# rpcgen will exit with an error, resulting in a build error. We
+# could use a '-' (i.e. -@rpcgen ...) and suffer with noisy warnings
+# in the build. Or we do this crufty thing instead.
+$(XDRSOURCES): $(XDRGENFILES)
+ @if [ ! -e $(xdrdst)/$@ -o $(@:.c=.x) -nt $(xdrdst)/$@ ]; then \
+ rpcgen -c -o $(xdrdst)/$@ $(@:.c=.x) ;\
+ fi
+
+# d*mn sed in netbsd6 doesn't do -i (inline)
+# (why are we still running smoke on netbsd6 and not netbsd7?)
+$(XDRHEADERS): $(XDRGENFILES)
+ @if [ ! -e $(xdrdst)/$@ -o $(@:.h=.x) -nt $(xdrdst)/$@ ]; then \
+ rpcgen -h -o $(@:.h=.tmp) $(@:.h=.x) && \
+ sed -e '/#ifndef/ s/-/_/g' -e '/#define/ s/-/_/g' \
+ -e '/#endif/ s/-/_/' -e 's/TMP_/H_/g' \
+ $(@:.h=.tmp) > $(xdrdst)/$@ && \
+ rm -f $(@:.h=.tmp) ; \
+ fi
+
+
+# link .x files when doing out-of-tree builds
+# have to use .PHONY here to force it; all versions of make
+# will think the file already exists "here" by virtue of the
+# VPATH. And we have to have the .x file in $cwd in order to
+# have rpcgen generate "nice" #include directives
+# i.e. (nice):
+# #include "acl3-xdr.h"
+# versus (not nice):
+# #include "../../../../foo/src/rpc/xdr/src/acl3-xdr.h"
+.PHONY : $(XDRGENFILES)
+$(XDRGENFILES):
+ @if [ ! -e $@ ]; then ln -s $(xdrsrc)/$@ . ; fi;
+
+clean-local:
+ @if [ $(top_builddir) != $(top_srcdir) ]; then \
+ rm -f $(xdrdst)/*.x; \
+ fi
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index a32c8645708..777cb0046a2 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -68,6 +68,7 @@ enum gf_bitrot_type {
GF_BITROT_OPTION_TYPE_EXPIRY_TIME,
GF_BITROT_CMD_SCRUB_STATUS,
GF_BITROT_CMD_SCRUB_ONDEMAND,
+ GF_BITROT_OPTION_TYPE_SIGNER_THREADS,
GF_BITROT_OPTION_TYPE_MAX
};
diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x
index 02ebec26c01..b631dea3502 100644
--- a/rpc/xdr/src/glusterd1-xdr.x
+++ b/rpc/xdr/src/glusterd1-xdr.x
@@ -202,6 +202,21 @@ struct gd1_mgmt_v3_commit_rsp {
string op_errstr<>;
} ;
+struct gd1_mgmt_v3_post_commit_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_v3_post_commit_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+ string op_errstr<>;
+} ;
+
struct gd1_mgmt_v3_post_val_req {
unsigned char uuid[16];
int op;
diff --git a/rpc/xdr/src/glusterfs-fops.x b/rpc/xdr/src/glusterfs-fops.x
deleted file mode 100644
index 651f8def0ba..00000000000
--- a/rpc/xdr/src/glusterfs-fops.x
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
- * This file is part of GlusterFS.
- *
- * This file is licensed to you under your choice of the GNU Lesser
- * General Public License, version 3 or any later version (LGPLv3 or
- * later), or the GNU General Public License, version 2 (GPLv2), in all
- * cases as published by the Free Software Foundation.
- */
-
-#ifdef RPC_XDR
-%#include "rpc-pragmas.h"
-#endif
-%#include <glusterfs/compat.h>
-
-/* NOTE: add members ONLY at the end (just before _MAXVALUE) */
-/*
- * OTHER NOTE: fop_enum_to_str and fop_enum_to_pri_str (in common-utils.h) also
- * contain lists of fops, so if you update this list UPDATE THOSE TOO.
- */
-enum glusterfs_fop_t {
- GF_FOP_NULL = 0,
- GF_FOP_STAT,
- GF_FOP_READLINK,
- GF_FOP_MKNOD,
- GF_FOP_MKDIR,
- GF_FOP_UNLINK,
- GF_FOP_RMDIR,
- GF_FOP_SYMLINK,
- GF_FOP_RENAME,
- GF_FOP_LINK,
- GF_FOP_TRUNCATE,
- GF_FOP_OPEN,
- GF_FOP_READ,
- GF_FOP_WRITE,
- GF_FOP_STATFS,
- GF_FOP_FLUSH,
- GF_FOP_FSYNC, /* 16 */
- GF_FOP_SETXATTR,
- GF_FOP_GETXATTR,
- GF_FOP_REMOVEXATTR,
- GF_FOP_OPENDIR,
- GF_FOP_FSYNCDIR,
- GF_FOP_ACCESS,
- GF_FOP_CREATE,
- GF_FOP_FTRUNCATE,
- GF_FOP_FSTAT, /* 25 */
- GF_FOP_LK,
- GF_FOP_LOOKUP,
- GF_FOP_READDIR,
- GF_FOP_INODELK,
- GF_FOP_FINODELK,
- GF_FOP_ENTRYLK,
- GF_FOP_FENTRYLK,
- GF_FOP_XATTROP,
- GF_FOP_FXATTROP,
- GF_FOP_FGETXATTR,
- GF_FOP_FSETXATTR,
- GF_FOP_RCHECKSUM,
- GF_FOP_SETATTR,
- GF_FOP_FSETATTR,
- GF_FOP_READDIRP,
- GF_FOP_FORGET,
- GF_FOP_RELEASE,
- GF_FOP_RELEASEDIR,
- GF_FOP_GETSPEC,
- GF_FOP_FREMOVEXATTR,
- GF_FOP_FALLOCATE,
- GF_FOP_DISCARD,
- GF_FOP_ZEROFILL,
- GF_FOP_IPC,
- GF_FOP_SEEK,
- GF_FOP_LEASE,
- GF_FOP_COMPOUND,
- GF_FOP_GETACTIVELK,
- GF_FOP_SETACTIVELK,
- GF_FOP_PUT,
- GF_FOP_ICREATE,
- GF_FOP_NAMELINK,
- GF_FOP_COPY_FILE_RANGE,
- GF_FOP_MAXVALUE
-};
-
-/* Note: Removed event GF_EVENT_CHILD_MODIFIED=8, hence
- *to preserve backward compatibiliy, GF_EVENT_CLEANUP = 9
- */
-enum glusterfs_event_t {
- GF_EVENT_PARENT_UP = 1,
- GF_EVENT_POLLIN,
- GF_EVENT_POLLOUT,
- GF_EVENT_POLLERR,
- GF_EVENT_CHILD_UP,
- GF_EVENT_CHILD_DOWN,
- GF_EVENT_CHILD_CONNECTING,
- GF_EVENT_CLEANUP = 9,
- GF_EVENT_TRANSPORT_CONNECTED,
- GF_EVENT_VOLFILE_MODIFIED,
- GF_EVENT_GRAPH_NEW,
- GF_EVENT_TRANSLATOR_INFO,
- GF_EVENT_TRANSLATOR_OP,
- GF_EVENT_AUTH_FAILED,
- GF_EVENT_VOLUME_DEFRAG,
- GF_EVENT_PARENT_DOWN,
- GF_EVENT_VOLUME_BARRIER_OP,
- GF_EVENT_UPCALL,
- GF_EVENT_SCRUB_STATUS,
- GF_EVENT_SOME_DESCENDENT_DOWN,
- GF_EVENT_SCRUB_ONDEMAND,
- GF_EVENT_SOME_DESCENDENT_UP,
- GF_EVENT_CHILD_PING,
- GF_EVENT_MAXVAL
-};
-
-/* List of compound fops. Add fops at the end. */
-enum glusterfs_compound_fop_t {
- GF_CFOP_NON_PREDEFINED = 0, /* needs single FOP inspection */
- GF_CFOP_XATTROP_WRITEV,
- GF_CFOP_XATTROP_UNLOCK,
- GF_CFOP_PUT, /* create+write+setxattr+fsync+close+rename */
- GF_CFOP_MAXVALUE
-};
-
-enum glusterfs_mgmt_t {
- GF_MGMT_NULL = 0,
- GF_MGMT_MAXVALUE
-};
-
-enum gf_op_type_t {
- GF_OP_TYPE_NULL = 0,
- GF_OP_TYPE_FOP,
- GF_OP_TYPE_MGMT,
- GF_OP_TYPE_MAX
-};
-
-/* NOTE: all the miscellaneous flags used by GlusterFS should be listed here */
-enum glusterfs_lk_cmds_t {
- GF_LK_GETLK = 0,
- GF_LK_SETLK,
- GF_LK_SETLKW,
- GF_LK_RESLK_LCK,
- GF_LK_RESLK_LCKW,
- GF_LK_RESLK_UNLCK,
- GF_LK_GETLK_FD
-};
-
-enum glusterfs_lk_types_t {
- GF_LK_F_RDLCK = 0,
- GF_LK_F_WRLCK,
- GF_LK_F_UNLCK,
- GF_LK_EOL
-};
-
-/* Lease Types */
-enum gf_lease_types_t {
- NONE = 0,
- GF_RD_LEASE = 1,
- GF_RW_LEASE = 2,
- GF_LEASE_MAX_TYPE
-};
-
-/* Lease cmds */
-enum gf_lease_cmds_t {
- GF_GET_LEASE = 1,
- GF_SET_LEASE = 2,
- GF_UNLK_LEASE = 3
-};
-
-%#define LEASE_ID_SIZE 16 /* 128bits */
-struct gf_lease {
- gf_lease_cmds_t cmd;
- gf_lease_types_t lease_type;
- char lease_id[LEASE_ID_SIZE];
- unsigned int lease_flags;
-};
-
-enum glusterfs_lk_recovery_cmds_t {
- F_RESLK_LCK = 200,
- F_RESLK_LCKW,
- F_RESLK_UNLCK,
- F_GETLK_FD
-};
-
-enum gf_lk_domain_t {
- GF_LOCK_POSIX,
- GF_LOCK_INTERNAL
-};
-
-enum entrylk_cmd {
- ENTRYLK_LOCK,
- ENTRYLK_UNLOCK,
- ENTRYLK_LOCK_NB
-};
-
-enum entrylk_type {
- ENTRYLK_RDLCK,
- ENTRYLK_WRLCK
-};
-
-%#define GF_MAX_LOCK_OWNER_LEN 1024 /* 1kB as per NLM */
-
-/* 16strings-16strings-... */
-%#define GF_LKOWNER_BUF_SIZE ((GF_MAX_LOCK_OWNER_LEN * 2) + (GF_MAX_LOCK_OWNER_LEN / 8))
-
-struct gf_lkowner_t {
- int len;
- char data[GF_MAX_LOCK_OWNER_LEN];
-};
-
-enum gf_xattrop_flags_t {
- GF_XATTROP_ADD_ARRAY,
- GF_XATTROP_ADD_ARRAY64,
- GF_XATTROP_OR_ARRAY,
- GF_XATTROP_AND_ARRAY,
- GF_XATTROP_GET_AND_SET,
- GF_XATTROP_ADD_ARRAY_WITH_DEFAULT,
- GF_XATTROP_ADD_ARRAY64_WITH_DEFAULT
-};
-
-enum gf_seek_what_t {
- GF_SEEK_DATA,
- GF_SEEK_HOLE
-};
-
-enum gf_upcall_flags_t {
- GF_UPCALL_NULL,
- GF_UPCALL,
- GF_UPCALL_CI_STAT,
- GF_UPCALL_CI_XATTR,
- GF_UPCALL_CI_RENAME,
- GF_UPCALL_CI_NLINK,
- GF_UPCALL_CI_FORGET,
- GF_UPCALL_LEASE_RECALL,
- GF_UPCALL_FLAGS_MAXVALUE
-};
-
-enum gf_dict_data_type_t {
- GF_DATA_TYPE_UNKNOWN,
- GF_DATA_TYPE_STR_OLD, /* Will be set by volgen and dict-serialize
- and unserialize. Used to reduce warnings
- if one is using old protocol */
- GF_DATA_TYPE_INT,
- GF_DATA_TYPE_UINT,
- GF_DATA_TYPE_DOUBLE,
- GF_DATA_TYPE_STR,
- GF_DATA_TYPE_PTR,
- GF_DATA_TYPE_GFUUID,
- GF_DATA_TYPE_IATT,
- GF_DATA_TYPE_MDATA,
- GF_DATA_TYPE_MAX
-};
diff --git a/rpc/xdr/src/glusterfs3-xdr.x b/rpc/xdr/src/glusterfs3-xdr.x
index 9db0a311159..1c99099a721 100644
--- a/rpc/xdr/src/glusterfs3-xdr.x
+++ b/rpc/xdr/src/glusterfs3-xdr.x
@@ -11,9 +11,8 @@
#ifdef RPC_XDR
%#include "rpc-pragmas.h"
#endif
-%#include <glusterfs/compat.h>
+%#include <glusterfs/glusterfs-fops.h>
%#include "rpc-common-xdr.h"
-%#include "glusterfs-fops.h"
#define GF_REQUEST_MAXGROUPS 16
struct gf_statfs {
diff --git a/rpc/xdr/src/glusterfs4-xdr.x b/rpc/xdr/src/glusterfs4-xdr.x
index 30597850e23..d3b1d0dfaf0 100644
--- a/rpc/xdr/src/glusterfs4-xdr.x
+++ b/rpc/xdr/src/glusterfs4-xdr.x
@@ -11,11 +11,9 @@
#ifdef RPC_XDR
%#include "rpc-pragmas.h"
#endif
-%#include <glusterfs/compat.h>
-%#include "glusterfs-fops.h"
+%#include <glusterfs/glusterfs-fops.h>
%#include "glusterfs3-xdr.h"
-
/* Need to consume iattx and new dict in all the fops */
struct gfx_iattx {
opaque ia_gfid[16];
@@ -56,7 +54,7 @@ struct gfx_mdata_iatt {
unsigned int ia_ctime_nsec;
};
-union gfx_value switch (gf_dict_data_type_t type) {
+union gfx_value switch (int type) {
case GF_DATA_TYPE_INT:
hyper value_int;
case GF_DATA_TYPE_UINT:
diff --git a/rpc/xdr/src/libgfxdr.sym b/rpc/xdr/src/libgfxdr.sym
index dd4ac8562bc..8fa0e0ddd8a 100644
--- a/rpc/xdr/src/libgfxdr.sym
+++ b/rpc/xdr/src/libgfxdr.sym
@@ -28,6 +28,8 @@ xdr_gd1_mgmt_v3_brick_op_req
xdr_gd1_mgmt_v3_brick_op_rsp
xdr_gd1_mgmt_v3_commit_req
xdr_gd1_mgmt_v3_commit_rsp
+xdr_gd1_mgmt_v3_post_commit_req
+xdr_gd1_mgmt_v3_post_commit_rsp
xdr_gd1_mgmt_v3_lock_req
xdr_gd1_mgmt_v3_lock_rsp
xdr_gd1_mgmt_v3_post_val_req
diff --git a/rpc/xdr/src/rpc-common-xdr.x b/rpc/xdr/src/rpc-common-xdr.x
index 760d1e0aedc..baf8b4313c8 100644
--- a/rpc/xdr/src/rpc-common-xdr.x
+++ b/rpc/xdr/src/rpc-common-xdr.x
@@ -11,13 +11,12 @@
#ifdef RPC_XDR
%#include "rpc-pragmas.h"
#endif
-%#include <glusterfs/compat.h>
+%#include <glusterfs/glusterfs-fops.h>
/* This file has definition of few XDR structures which are
* not captured in any section specific file */
%#include "xdr-common.h"
-%#include "glusterfs-fops.h"
struct auth_glusterfs_parms_v2 {
int pid;
diff --git a/run-tests.sh b/run-tests.sh
index 5683b21640b..e2a1655d8e0 100755
--- a/run-tests.sh
+++ b/run-tests.sh
@@ -265,6 +265,7 @@ function match()
# G_TESTDEF_TEST_STATUS_NETBSD7
# Some examples:
# G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=123456
+# G_TESTDEF_TEST_STATUS_CENTOS6=BRICK_MUX_BAD_TEST,BUG=123456
# G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=4444444
# G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=123456;555555
# G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TESTS,BUG=1385758
@@ -327,6 +328,7 @@ function get_bug_list_for_disabled_test ()
function run_tests()
{
RES=0
+ FLAKY=''
FAILED=''
TESTS_NEEDED_RETRY=''
GENERATED_CORE=''
@@ -348,15 +350,16 @@ function run_tests()
timeout_cmd_exists="no"
fi
- for t in $(find ${regression_testsdir}/tests -name '*.t' \
- | LC_COLLATE=C sort) ; do
+ all_tests=($(find ${regression_testsdir}/tests -name '*.t' | sort))
+ all_tests_cnt=${#all_tests[@]}
+ for t in "${all_tests[@]}" ; do
old_cores=$(ls /*-*.core 2> /dev/null | wc -l)
total_tests=$((total_tests+1))
if match $t "$@" ; then
selected_tests=$((selected_tests+1))
echo
- echo $section_separator$section_separator
- if [[ $(get_test_status $t) == "BAD_TEST" ]] && \
+ echo $section_separator "(${total_tests} / ${all_tests_cnt})" $section_separator
+ if [[ $(get_test_status $t) =~ "BAD_TEST" ]] && \
[[ $skip_bad_tests == "yes" ]]
then
skipped_bad_tests=$((skipped_bad_tests+1))
@@ -394,7 +397,7 @@ function run_tests()
cmd_timeout=$(grep "SCRIPT_TIMEOUT=" ${t} | cut -f2 -d'=');
echo "Timeout set is ${cmd_timeout}, default ${run_timeout}"
fi
- timeout -k ${kill_after_time} ${cmd_timeout} prove -vmfe '/bin/bash' ${t}
+ timeout --foreground -k ${kill_after_time} ${cmd_timeout} prove -vmfe '/bin/bash' ${t}
else
prove -vmfe '/bin/bash' ${t}
fi
@@ -418,7 +421,7 @@ function run_tests()
echo ""
if [ ${timeout_cmd_exists} == "yes" ]; then
- timeout -k ${kill_after_time} ${cmd_timeout} prove -vmfe '/bin/bash' ${t}
+ timeout --foreground -k ${kill_after_time} ${cmd_timeout} prove -vmfe '/bin/bash' ${t}
else
prove -vmfe '/bin/bash' ${t}
fi
@@ -431,9 +434,17 @@ function run_tests()
TESTS_NEEDED_RETRY="${TESTS_NEEDED_RETRY}${t} "
fi
+
+
if [ ${TMP_RES} -ne 0 ] ; then
- RES=${TMP_RES}
- FAILED="${FAILED}${t} "
+ if [[ "$t" == *"tests/000-flaky/"* ]]; then
+ FLAKY="${FLAKY}${t} "
+ echo "FAILURE -> SUCCESS: Flaky test"
+ TMP_RES=0
+ else
+ RES=${TMP_RES}
+ FAILED="${FAILED}${t} "
+ fi
fi
new_cores=$(ls /*-*.core 2> /dev/null | wc -l)
@@ -468,8 +479,10 @@ function run_tests()
echo "$key - ${ELAPSEDTIMEMAP["$key"]} second"
done | sort -rn -k3
- # Output the errors into a file
+ # initialize the output file
echo > "${result_output}"
+
+ # Output the errors into a file
if [ ${RES} -ne 0 ] ; then
FAILED=$( echo ${FAILED} | tr ' ' '\n' | sort -u )
FAILED_COUNT=$( echo -n "${FAILED}" | grep -c '^' )
@@ -482,7 +495,13 @@ function run_tests()
TESTS_NEEDED_RETRY=$( echo ${TESTS_NEEDED_RETRY} | tr ' ' '\n' | sort -u )
RETRY_COUNT=$( echo -n "${TESTS_NEEDED_RETRY}" | grep -c '^' )
if [ ${RETRY_COUNT} -ne 0 ] ; then
- echo -e "\n${RETRY_COUNT} test(s) needed retry \n${TESTS_NEEDED_RETRY}"
+ echo -e "\n${RETRY_COUNT} test(s) needed retry \n${TESTS_NEEDED_RETRY}" >> "${result_output}"
+ fi
+
+ FLAKY_TESTS_FAILED=$( echo ${FLAKY} | tr ' ' '\n' | sort -u )
+ RETRY_COUNT=$( echo -n "${FLAKY_TESTS_FAILED}" | grep -c '^' )
+ if [ ${RETRY_COUNT} -ne 0 ] ; then
+ echo -e "\n${RETRY_COUNT} flaky test(s) marked as success even though they failed \n${FLAKY_TESTS_FAILED}" >> "${result_output}"
fi
echo
@@ -511,8 +530,38 @@ function run_head_tests()
run_tests "$htests"
}
-function parse_args () {
- args=`getopt frcbkphHno:t: "$@"`
+function show_usage ()
+{
+ cat <<EOF
+Usage: $0 <opts> [<glob>|<bzid>]...
+
+Options:
+
+-f force
+-h skip tests altering from HEAD
+-H run only tests altering from HEAD
+-r retry failed tests
+-R do not retry failed tests
+-c dont't exit on failure
+-b don't skip bad tests
+-k don't skip known bugs
+-p don't keep logs from preceding runs
+-o OUTPUT
+-t TIMEOUT
+-n skip NFS tests
+--help
+EOF
+}
+
+usage="no"
+
+function parse_args ()
+{
+ args=`getopt -u -l help frRcbkphHno:t: "$@"`
+ if ! [ $? -eq 0 ]; then
+ show_usage
+ exit 1
+ fi
set -- $args
while [ $# -gt 0 ]; do
case "$1" in
@@ -520,6 +569,7 @@ function parse_args () {
-h) head="no" ;;
-H) head="only" ;;
-r) retry="yes" ;;
+ -R) retry="no" ;;
-c) exit_on_failure="no" ;;
-b) skip_bad_tests="no" ;;
-k) skip_known_bugs="no" ;;
@@ -527,6 +577,7 @@ function parse_args () {
-o) result_output="$2"; shift;;
-t) run_timeout="$2"; shift;;
-n) nfs_tests="no";;
+ --help) usage="yes" ;;
--) shift; break;;
esac
shift
@@ -541,6 +592,10 @@ echo
# Get user options
parse_args "$@"
+if [ x"$usage" == x"yes" ]; then
+ show_usage
+ exit 0
+fi
# Make sure we're running as the root user
check_user
diff --git a/tests/00-geo-rep/00-georep-verify-non-root-setup.t b/tests/00-geo-rep/00-georep-verify-non-root-setup.t
index ed7e589101d..a55fd3e5e6a 100644
--- a/tests/00-geo-rep/00-georep-verify-non-root-setup.t
+++ b/tests/00-geo-rep/00-georep-verify-non-root-setup.t
@@ -5,7 +5,7 @@
. $(dirname $0)/../geo-rep.rc
. $(dirname $0)/../env.rc
-SCRIPT_TIMEOUT=500
+SCRIPT_TIMEOUT=600
### Basic Non-root geo-rep setup test with Distribute Replicate volumes
@@ -144,12 +144,15 @@ TEST pidof glusterd;
TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
TEST $CLI volume start $META_VOL
TEST mkdir -p $META_MNT
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "3" brick_count ${META_VOL}
TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
##Mount master
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" brick_count $GMV0
TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
##Mount slave
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" brick_count $GSV0
TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
## Check status of mount-broker
@@ -189,6 +192,8 @@ TEST gluster-georep-sshkey generate
TEST $GEOREP_CLI $master $slave_url create push-pem
+#check for session creation
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_non_root "Created"
#Config gluster-command-dir
TEST $GEOREP_CLI $master $slave_url config gluster-command-dir ${GLUSTER_CMD_DIR}
@@ -223,22 +228,42 @@ TEST $GEOREP_CLI $master $slave_url resume
#Validate failure of volume stop when geo-rep is running
TEST ! $CLI volume stop $GMV0
+#Negative test for ssh-port
+#Port should be integer and between 1-65535 range
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port -22
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port abc
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port 6875943
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port 4.5
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port 22a
+
+#Config Set ssh-port to validate int validation
+TEST $GEOREP_CLI $master $slave config ssh-port 22
+
#Hybrid directory rename test BZ#1763439
+
TEST $GEOREP_CLI $master $slave_url config change_detector xsync
-mkdir ${master_mnt}/dir1
-mkdir ${master_mnt}/dir1/dir2
-mkdir ${master_mnt}/dir1/dir3
-mkdir ${master_mnt}/hybrid_d1
+#verify master and slave mount
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_d1
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/dir2
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/dir3
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT "^1$" check_mounted ${master_mnt}
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT "^1$" check_mounted ${slave_mnt}
+
+#Create test data for hybrid crawl
+TEST mkdir ${master_mnt}/dir1
+TEST mkdir ${master_mnt}/dir1/dir2
+TEST mkdir ${master_mnt}/dir1/dir3
+TEST mkdir ${master_mnt}/hybrid_d1
mv ${master_mnt}/hybrid_d1 ${master_mnt}/hybrid_rn_d1
mv ${master_mnt}/dir1/dir2 ${master_mnt}/rn_dir2
mv ${master_mnt}/dir1/dir3 ${master_mnt}/dir1/rn_dir3
+#Verify hybrid crawl data on slave
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_rn_d1
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/rn_dir2
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/rn_dir3
diff --git a/tests/00-geo-rep/00-georep-verify-setup.t b/tests/00-geo-rep/00-georep-verify-setup.t
index cdfccd35db5..0d46c04102d 100644
--- a/tests/00-geo-rep/00-georep-verify-setup.t
+++ b/tests/00-geo-rep/00-georep-verify-setup.t
@@ -5,7 +5,8 @@
. $(dirname $0)/../geo-rep.rc
. $(dirname $0)/../env.rc
-SCRIPT_TIMEOUT=300
+SCRIPT_TIMEOUT=400
+GEO_REP_TIMEOUT=200
##Cleanup and start glusterd
cleanup;
diff --git a/tests/00-geo-rep/bug-1708603.t b/tests/00-geo-rep/bug-1708603.t
new file mode 100644
index 00000000000..26913f1d318
--- /dev/null
+++ b/tests/00-geo-rep/bug-1708603.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=300
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="gluster volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
+TEST $CLI volume start $GSV0
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+#Create geo-rep session
+TEST create_georep_session $master $slave
+
+echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
+EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+echo y | $GEOREP_CLI $master $slave config ignore-deletes true
+EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/georep-basic-dr-rsync.t b/tests/00-geo-rep/georep-basic-dr-rsync.t
index b6fbf1875f0..d785aa59fc9 100644
--- a/tests/00-geo-rep/georep-basic-dr-rsync.t
+++ b/tests/00-geo-rep/georep-basic-dr-rsync.t
@@ -71,6 +71,19 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Created"
#Config gluster-command-dir
TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+#Negative test for ssh-port
+#Port should be integer and between 1-65535 range
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port -22
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port abc
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port 6875943
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port 4.5
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port 22a
+
#Config Set ssh-port to validate int validation
TEST $GEOREP_CLI $master $slave config ssh-port 22
diff --git a/tests/00-geo-rep/georep-upgrade.t b/tests/00-geo-rep/georep-upgrade.t
new file mode 100644
index 00000000000..7523068ed50
--- /dev/null
+++ b/tests/00-geo-rep/georep-upgrade.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+SCRIPT_TIMEOUT=500
+
+###############################################################################################
+#Before upgrade
+###############################################################################################
+brick=/bricks/brick1
+epoch1=$(date '+%s')
+sleep 1
+epoch2=$(date '+%s')
+mkdir -p /bricks/brick1/.glusterfs/changelogs/htime
+mkdir -p /bricks/brick1/.glusterfs/changelogs
+
+#multiple htime files(changelog enable/disable scenario)
+TEST touch /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1
+TEST touch /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2
+
+#changelog files
+TEST touch /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1
+TEST touch /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2
+
+htime_file1=/bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1
+htime_file2=/bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2
+
+#data inside htime files before upgrade
+data1=/bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1
+data2=/bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2
+
+#data inside htime files after upgrade
+updated_data1=/bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch1
+updated_data2=/bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch2
+
+echo -n $data1>$htime_file1
+echo -n $data2>$htime_file2
+
+echo "Before upgrade:"
+EXPECT '1' echo $(grep $data1 $htime_file1 | wc -l)
+EXPECT '1' echo $(grep $data2 $htime_file2 | wc -l)
+
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2 | wc -l)
+###############################################################################################
+#Upgrade
+###############################################################################################
+### This needed to be fixed as this very vague finding a file with name in '/'
+### multiple file with same name can exist
+### for temp fix picking only 1st result
+TEST upgrade_script=$(find / -type f -name glusterfs-georep-upgrade.py -print | head -n 1)
+TEST python3 $upgrade_script $brick
+
+###############################################################################################
+#After upgrade
+###############################################################################################
+echo "After upgrade:"
+EXPECT '1' echo $(grep $updated_data1 $htime_file1 | wc -l)
+EXPECT '1' echo $(grep $updated_data2 $htime_file2 | wc -l)
+
+#Check directory structure inside changelogs
+TEST ! ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1.bak | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y')` | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m')` | wc -l)
+EXPECT '2' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')` | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch1 | wc -l)
+
+TEST ! ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2.bak | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y')` | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m')`| wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch2 | wc -l)
+
+TEST rm -rf /bricks
diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t
index c268c125610..77d82a4996f 100644
--- a/tests/basic/afr/split-brain-favorite-child-policy.t
+++ b/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t
@@ -1,8 +1,8 @@
#!/bin/bash
#Test the split-brain resolution CLI commands.
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
cleanup;
diff --git a/tests/basic/changelog/changelog-snapshot.t b/tests/000-flaky/basic_changelog_changelog-snapshot.t
index 7742db48cdd..f6cd0b04d47 100644
--- a/tests/basic/changelog/changelog-snapshot.t
+++ b/tests/000-flaky/basic_changelog_changelog-snapshot.t
@@ -1,7 +1,7 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../snapshot.rc
cleanup;
ROLLOVER_TIME=3
diff --git a/tests/basic/distribute/rebal-all-nodes-migrate.t b/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t
index acc4ffefecc..eb5d3305ac1 100644
--- a/tests/basic/distribute/rebal-all-nodes-migrate.t
+++ b/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t
@@ -1,8 +1,8 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../dht.rc
# Check if every single rebalance process migrated some files
@@ -10,11 +10,9 @@
function cluster_rebal_all_nodes_migrated_files {
val=0
a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}');
-# echo $a
b=($a)
for i in "${b[@]}"
do
-# echo "$i";
if [ "$i" -eq "0" ]; then
echo "false";
val=1;
diff --git a/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t b/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t
new file mode 100644
index 00000000000..42808ce0c0e
--- /dev/null
+++ b/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This test checks that partial failure of fop results in main fop failure only
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5}
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST dd if=/dev/urandom of=$M0/a bs=12347 count=1
+TEST dd if=/dev/urandom of=$M0/b bs=12347 count=1
+TEST cp $M0/b $M0/c
+TEST fallocate -p -l 101 $M0/c
+TEST $CLI volume stop $V0
+TEST $CLI volume set $V0 debug.delay-gen posix;
+TEST $CLI volume set $V0 delay-gen.delay-duration 10000000;
+TEST $CLI volume set $V0 delay-gen.enable WRITE;
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 disperse.quorum-count 6
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+cksum=$(dd if=$M0/a bs=12345 count=1 | md5sum | awk '{print $1}')
+truncate -s 12345 $M0/a & #While write is waiting for 5 seconds, introduce failure
+fallocate -p -l 101 $M0/b &
+sleep 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST wait
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+EXPECT "12345" stat --format=%s $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+cksum_after_heal=$(dd if=$M0/a | md5sum | awk '{print $1}')
+TEST [[ $cksum == $cksum_after_heal ]]
+cksum=$(dd if=$M0/c | md5sum | awk '{print $1}')
+cksum_after_heal=$(dd if=$M0/b | md5sum | awk '{print $1}')
+TEST [[ $cksum == $cksum_after_heal ]]
+
+cleanup;
diff --git a/tests/basic/mount-nfs-auth.t b/tests/000-flaky/basic_mount-nfs-auth.t
index 3d4a9cff00b..3d4a9cff00b 100755..100644
--- a/tests/basic/mount-nfs-auth.t
+++ b/tests/000-flaky/basic_mount-nfs-auth.t
diff --git a/tests/bugs/core/multiplex-limit-issue-151.t b/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t
index dc9013061b0..5a88f97d726 100644
--- a/tests/bugs/core/multiplex-limit-issue-151.t
+++ b/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t
@@ -1,8 +1,8 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../traps.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../traps.rc
+. $(dirname $0)/../volume.rc
function count_up_bricks {
$CLI --xml volume status all | grep '<status>1' | wc -l
diff --git a/tests/bugs/distribute/bug-1117851.t b/tests/000-flaky/bugs_distribute_bug-1117851.t
index 62cb6b66ab4..5980bf2fd4b 100755..100644
--- a/tests/bugs/distribute/bug-1117851.t
+++ b/tests/000-flaky/bugs_distribute_bug-1117851.t
@@ -2,8 +2,8 @@
SCRIPT_TIMEOUT=250
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
create_files () {
for i in {1..1000}; do
diff --git a/tests/bugs/distribute/bug-1122443.t b/tests/000-flaky/bugs_distribute_bug-1122443.t
index 906be7072bd..abd37082b33 100644
--- a/tests/bugs/distribute/bug-1122443.t
+++ b/tests/000-flaky/bugs_distribute_bug-1122443.t
@@ -1,8 +1,8 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
make_files() {
mkdir $1 && \
@@ -42,8 +42,8 @@ TEST glusterfs -s $H0 --volfile-id $V0 $M0
TEST make_files $M0/subdir
# Get mtime before migration
-BEFORE="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')"
-
+BEFORE="$(stat -c %n:%Y $M0/subdir/* | sort | tr '\n' ',')"
+echo $BEFORE
# Migrate brick
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 start
@@ -51,9 +51,10 @@ EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 commit
# Get mtime after migration
-EXPECT_WITHIN 5 RECONNECTED bug_1113050_workaround $M0/subdir/*
-AFTER="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')"
-
+EXPECT_WITHIN 30 RECONNECTED bug_1113050_workaround $M0/subdir/symlink
+sleep 3
+AFTER="$(stat -c %n:%Y $M0/subdir/* | sort | tr '\n' ',')"
+echo $AFTER
# Check if mtime is unchanged
TEST [ "$AFTER" == "$BEFORE" ]
diff --git a/tests/bugs/glusterd/bug-857330/common.rc b/tests/000-flaky/bugs_glusterd_bug-857330/common.rc
index d0aa4b1a640..bd122eff18c 100644
--- a/tests/bugs/glusterd/bug-857330/common.rc
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/common.rc
@@ -1,4 +1,4 @@
-. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../include.rc
UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
diff --git a/tests/bugs/glusterd/bug-857330/normal.t b/tests/000-flaky/bugs_glusterd_bug-857330/normal.t
index ad0c8844fae..6c1cf54ec3c 100755
--- a/tests/bugs/glusterd/bug-857330/normal.t
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/normal.t
@@ -1,7 +1,7 @@
#!/bin/bash
. $(dirname $0)/common.rc
-. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
@@ -14,7 +14,7 @@ TEST $CLI volume start $V0;
TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
+TEST $PYTHON $(dirname $0)/../../utils/create-files.py \
--multi -b 10 -d 10 -n 10 $M0;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/tests/bugs/glusterd/bug-857330/xml.t b/tests/000-flaky/bugs_glusterd_bug-857330/xml.t
index 8383d2a0711..11785adacdb 100755
--- a/tests/bugs/glusterd/bug-857330/xml.t
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/xml.t
@@ -1,7 +1,7 @@
#!/bin/bash
. $(dirname $0)/common.rc
-. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../volume.rc
cleanup;
@@ -15,7 +15,7 @@ TEST $CLI volume start $V0;
TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
+TEST $PYTHON $(dirname $0)/../../utils/create-files.py \
--multi -b 10 -d 10 -n 10 $M0;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/tests/bugs/glusterd/quorum-value-check.t b/tests/000-flaky/bugs_glusterd_quorum-value-check.t
index aaf636274b6..a431b8c4fd4 100755..100644
--- a/tests/bugs/glusterd/quorum-value-check.t
+++ b/tests/000-flaky/bugs_glusterd_quorum-value-check.t
@@ -1,7 +1,9 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
function check_quorum_nfs() {
local qnfs="$(less /var/lib/glusterd/nfs/nfs-server.vol | grep "quorum-count"| awk '{print $3}')"
diff --git a/tests/bugs/nfs/bug-1116503.t b/tests/000-flaky/bugs_nfs_bug-1116503.t
index dd3998df150..fc50021acc7 100644
--- a/tests/bugs/nfs/bug-1116503.t
+++ b/tests/000-flaky/bugs_nfs_bug-1116503.t
@@ -3,9 +3,9 @@
# Verify that mounting NFS over UDP (MOUNT service only) works.
#
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
diff --git a/tests/features/lock-migration/lkmigration-set-option.t b/tests/000-flaky/features_lock-migration_lkmigration-set-option.t
index 4340438591f..1327ef3579f 100644
--- a/tests/features/lock-migration/lkmigration-set-option.t
+++ b/tests/000-flaky/features_lock-migration_lkmigration-set-option.t
@@ -1,7 +1,7 @@
#!/bin/bash
# Test to check
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
#Check lock-migration set option sanity
cleanup;
diff --git a/tests/afr.rc b/tests/afr.rc
index 35f352df78f..241789903ba 100644
--- a/tests/afr.rc
+++ b/tests/afr.rc
@@ -105,3 +105,19 @@ function get_quorum_type()
local repl_id="$3"
cat $m/.meta/graphs/active/$v-replicate-$repl_id/private|grep quorum-type|awk '{print $3}'
}
+
+function afr_private_key_value()
+{
+ local v=$1
+ local m=$2
+ local replica_id=$3
+ local key=$4
+#xargs at the end will strip leading spaces
+ grep -E "^${key} = " $m/.meta/graphs/active/${v}-replicate-${replica_id}/private | cut -f2 -d'=' | xargs
+}
+
+function afr_anon_entry_count()
+{
+ local b=$1
+ ls $b/.glusterfs-anonymous-inode* | wc -l
+}
diff --git a/tests/basic/afr/afr-anon-inode-no-quorum.t b/tests/basic/afr/afr-anon-inode-no-quorum.t
new file mode 100644
index 00000000000..896ba0c9b2c
--- /dev/null
+++ b/tests/basic/afr/afr-anon-inode-no-quorum.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+#Test that anon-inode entry is not cleaned up as long as there exists at least
+#one valid entry
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST touch $M0/a $M0/b
+
+gfid_a=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/a))
+gfid_b=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/b))
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST mv $M0/a $M0/a-new
+TEST mv $M0/b $M0/b-new
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST ! ls $M0/a
+TEST ! ls $M0/b
+anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
+#Make sure index heal doesn't happen after enabling heal
+TEST setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1
+TEST rm -f $B0/${V0}1/.glusterfs/indices/xattrop/*
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+#Allow time for a scan
+sleep 5
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
+inum_b=$(STAT_INO $B0/${V0}0/$anon_inode_name/$gfid_b)
+TEST rm -f $M0/a-new
+TEST stat $M0/b-new
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
+EXPECT "$inum_b" STAT_INO $B0/${V0}0/b-new
+
+cleanup
diff --git a/tests/basic/afr/afr-anon-inode.t b/tests/basic/afr/afr-anon-inode.t
new file mode 100644
index 00000000000..f4cf37a2fa0
--- /dev/null
+++ b/tests/basic/afr/afr-anon-inode.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+#Tests that afr-anon-inode test cases work fine as expected
+#These are cases where in entry-heal/name-heal we dont know entry for an inode
+#so these inodes are kept in a special directory
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST $CLI volume set $V0 cluster.use-anonymous-inode no
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST $CLI volume set $V0 cluster.use-anonymous-inode yes
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST mkdir -p $M0/d1/b $M0/d2/a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST mv $M0/d2/a $M0/d1
+TEST mv $M0/d1/b $M0/d2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
+TEST [[ -d $B0/${V0}1/$anon_inode_name ]]
+TEST [[ -d $B0/${V0}2/$anon_inode_name ]]
+anon_gfid=$(gf_get_gfid_xattr $B0/${V0}0/$anon_inode_name)
+EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}1/$anon_inode_name
+EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}2/$anon_inode_name
+
+TEST ! ls $M0/$anon_inode_name
+EXPECT "^4$" echo $(ls -a $M0 | wc -l)
+
+#Test purging code path by shd
+TEST $CLI volume heal $V0 disable
+TEST mkdir $M0/l0 $M0/l1 $M0/l2
+TEST touch $M0/del-file $M0/del-file-nolink $M0/l0/file
+TEST ln $M0/del-file $M0/del-file-link
+TEST ln $M0/l0/file $M0/l1/file-link1
+TEST ln $M0/l0/file $M0/l2/file-link2
+TEST mkdir -p $M0/del-recursive-dir/d1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST rm -f $M0/del-file $M0/del-file-nolink
+TEST rm -rf $M0/del-recursive-dir
+TEST mv $M0/d1/a $M0/d2
+TEST mv $M0/l0/file $M0/l0/renamed-file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 0
+
+nolink_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file-nolink))
+link_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file))
+dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-recursive-dir))
+rename_dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/d1/a))
+rename_file_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/l0/file))
+TEST ! stat $M0/del-file
+TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
+TEST ! stat $M0/del-file-nolink
+TEST ! stat $B0/${V0}0/$anon_inode_name/$nolink_gfid
+TEST ! stat $M0/del-recursive-dir
+TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
+TEST ! stat $M0/d1/a
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
+TEST ! stat $M0/l0/file
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_file_gfid
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/l1/file-link1 $M0/l1/renamed-file-link1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
+TEST ! stat $M0/l1/file-link1
+TEST stat $B0/${V0}1/$anon_inode_name/$rename_file_gfid
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/l2/file-link2 $M0/l2/renamed-file-link2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 2
+TEST ! stat $M0/l2/file-link2
+TEST stat $B0/${V0}2/$anon_inode_name/$rename_file_gfid
+
+#Simulate only anon-inodes present in all bricks
+TEST rm -f $M0/l0/renamed-file $M0/l1/renamed-file-link1 $M0/l2/renamed-file-link2
+
+#Test that shd doesn't cleanup anon-inodes when some bricks are down
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+$CLI volume heal $V0
+sleep 5 #Allow time for completion of one scan
+TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
+TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
+rename_dir_inum=$(STAT_INO $B0/${V0}0/$anon_inode_name/$rename_dir_gfid)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}2
+
+#Test that rename indeed happened instead of rmdir/mkdir
+renamed_dir_inum=$(STAT_INO $B0/${V0}0/d2/a)
+EXPECT "$rename_dir_inum" echo $renamed_dir_inum
+cleanup;
diff --git a/tests/basic/afr/afr-seek.t b/tests/basic/afr/afr-seek.t
new file mode 100644
index 00000000000..c12ee011660
--- /dev/null
+++ b/tests/basic/afr/afr-seek.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+SEEK=$(dirname $0)/seek
+build_tester $(dirname $0)/../seek.c -o ${SEEK}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST mkdir -p $B0/${V0}{0..2}
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+
+TEST $CLI volume start $V0
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST ${SEEK} create ${M0}/test 0 1 1048576 1
+# Determine underlying filesystem allocation block size
+BSIZE="$(($(${SEEK} scan ${M0}/test hole 0) * 2))"
+
+TEST ${SEEK} create ${M0}/test 0 ${BSIZE} $((${BSIZE} * 4 + 512)) ${BSIZE}
+
+EXPECT "^0$" ${SEEK} scan ${M0}/test data 0
+EXPECT "^$((${BSIZE} / 2))$" ${SEEK} scan ${M0}/test data $((${BSIZE} / 2))
+EXPECT "^$((${BSIZE} - 1))$" ${SEEK} scan ${M0}/test data $((${BSIZE} - 1))
+EXPECT "^$((${BSIZE} * 4))$" ${SEEK} scan ${M0}/test data ${BSIZE}
+EXPECT "^$((${BSIZE} * 4))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 4))
+EXPECT "^$((${BSIZE} * 5))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5))
+EXPECT "^$((${BSIZE} * 5 + 511))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5 + 511))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5 + 512))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 6))
+
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole 0
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole $((${BSIZE} / 2))
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole $((${BSIZE} - 1))
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole ${BSIZE}
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 4))
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5))
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 511))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 512))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 6))
+
+rm -f ${SEEK}
+cleanup
+
+# Centos6 regression slaves seem to not support SEEK_DATA/SEEK_HOLE
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t
index 155ffa09ef0..6e0f18b88f8 100644
--- a/tests/basic/afr/durability-off.t
+++ b/tests/basic/afr/durability-off.t
@@ -26,6 +26,8 @@ TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l)
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
#Test that fsyncs happen when durability is on
TEST $CLI volume set $V0 cluster.ensure-durability on
TEST $CLI volume set $V0 performance.strict-write-ordering on
diff --git a/tests/basic/afr/entry-self-heal-anon-dir-off.t b/tests/basic/afr/entry-self-heal-anon-dir-off.t
new file mode 100644
index 00000000000..7bb6ee14193
--- /dev/null
+++ b/tests/basic/afr/entry-self-heal-anon-dir-off.t
@@ -0,0 +1,459 @@
+#!/bin/bash
+
+#This file checks if missing entry self-heal and entry self-heal are working
+#as expected.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function get_file_type {
+ stat -c "%a:%F:%g:%t:%T:%u" $1
+}
+
+function diff_dirs {
+ diff <(ls $1 | sort) <(ls $2 | sort)
+}
+
+function heal_status {
+ local f1_path="${1}/${3}"
+ local f2_path="${2}/${3}"
+ local insync=""
+ diff_dirs $f1_path $f2_path
+ if [ $? -eq 0 ];
+ then
+ insync="Y"
+ else
+ insync="N"
+ fi
+ local xattr11=$(get_hex_xattr trusted.afr.$V0-client-0 $f1_path)
+ local xattr12=$(get_hex_xattr trusted.afr.$V0-client-1 $f1_path)
+ local xattr21=$(get_hex_xattr trusted.afr.$V0-client-0 $f2_path)
+ local xattr22=$(get_hex_xattr trusted.afr.$V0-client-1 $f2_path)
+ local dirty1=$(get_hex_xattr trusted.afr.dirty $f1_path)
+ local dirty2=$(get_hex_xattr trusted.afr.dirty $f2_path)
+ if [ -z $xattr11 ]; then xattr11="000000000000000000000000"; fi
+ if [ -z $xattr12 ]; then xattr12="000000000000000000000000"; fi
+ if [ -z $xattr21 ]; then xattr21="000000000000000000000000"; fi
+ if [ -z $xattr22 ]; then xattr22="000000000000000000000000"; fi
+ if [ -z $dirty1 ]; then dirty1="000000000000000000000000"; fi
+ if [ -z $dirty2 ]; then dirty2="000000000000000000000000"; fi
+ echo ${insync}${xattr11}${xattr12}${xattr21}${xattr22}${dirty1}${dirty2}
+}
+
+function is_heal_done {
+ local zero_xattr="000000000000000000000000"
+ if [ "$(heal_status $@)" == "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function print_pending_heals {
+ local result=":"
+ for i in "$@";
+ do
+ if [ "N" == $(is_heal_done $B0/${V0}0 $B0/${V0}1 $i) ];
+ then
+ result="$result:$i"
+ fi
+ done
+#To prevent any match for EXPECT_WITHIN, print a char non-existent in file-names
+ if [ $result == ":" ]; then result="~"; fi
+ echo $result
+}
+
+zero_xattr="000000000000000000000000"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 cluster.use-anonymous-inode off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --use-readdirp=no $M0
+cd $M0
+#_me_ is dir on which missing entry self-heal happens, _heal is where dir self-heal happens
+#spb is split-brain, fool is all fool
+
+#source_self_accusing means there exists source and a sink which self-accuses.
+#This simulates failures where fops failed on the bricks without it going down.
+#Something like EACCESS/EDQUOT etc
+
+TEST mkdir spb_heal spb spb_me_heal spb_me fool_heal fool_me v1_fool_heal v1_fool_me source_creations_heal source_deletions_heal source_creations_me source_deletions_me v1_dirty_me v1_dirty_heal source_self_accusing
+TEST mkfifo source_deletions_heal/fifo
+TEST mknod source_deletions_heal/block b 4 5
+TEST mknod source_deletions_heal/char c 1 5
+TEST touch source_deletions_heal/file
+TEST ln -s source_deletions_heal/file source_deletions_heal/slink
+TEST mkdir source_deletions_heal/dir1
+TEST mkdir source_deletions_heal/dir1/dir2
+
+TEST mkfifo source_deletions_me/fifo
+TEST mknod source_deletions_me/block b 4 5
+TEST mknod source_deletions_me/char c 1 5
+TEST touch source_deletions_me/file
+TEST ln -s source_deletions_me/file source_deletions_me/slink
+TEST mkdir source_deletions_me/dir1
+TEST mkdir source_deletions_me/dir1/dir2
+
+TEST mkfifo source_self_accusing/fifo
+TEST mknod source_self_accusing/block b 4 5
+TEST mknod source_self_accusing/char c 1 5
+TEST touch source_self_accusing/file
+TEST ln -s source_self_accusing/file source_self_accusing/slink
+TEST mkdir source_self_accusing/dir1
+TEST mkdir source_self_accusing/dir1/dir2
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST touch spb_heal/0 spb/0 spb_me_heal/0 spb_me/0 fool_heal/0 fool_me/0 v1_fool_heal/0 v1_fool_me/0 v1_dirty_heal/0 v1_dirty_me/0
+TEST rm -rf source_deletions_heal/fifo source_deletions_heal/block source_deletions_heal/char source_deletions_heal/file source_deletions_heal/slink source_deletions_heal/dir1
+TEST rm -rf source_deletions_me/fifo source_deletions_me/block source_deletions_me/char source_deletions_me/file source_deletions_me/slink source_deletions_me/dir1
+TEST rm -rf source_self_accusing/fifo source_self_accusing/block source_self_accusing/char source_self_accusing/file source_self_accusing/slink source_self_accusing/dir1
+
+#Test that the files are deleted
+TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}1/source_deletions_heal/block
+TEST ! stat $B0/${V0}1/source_deletions_heal/char
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/slink
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
+TEST ! stat $B0/${V0}1/source_deletions_me/fifo
+TEST ! stat $B0/${V0}1/source_deletions_me/block
+TEST ! stat $B0/${V0}1/source_deletions_me/char
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/slink
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1
+TEST ! stat $B0/${V0}1/source_self_accusing/fifo
+TEST ! stat $B0/${V0}1/source_self_accusing/block
+TEST ! stat $B0/${V0}1/source_self_accusing/char
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/slink
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1
+
+
+TEST mkfifo source_creations_heal/fifo
+TEST mknod source_creations_heal/block b 4 5
+TEST mknod source_creations_heal/char c 1 5
+TEST touch source_creations_heal/file
+TEST ln -s source_creations_heal/file source_creations_heal/slink
+TEST mkdir source_creations_heal/dir1
+TEST mkdir source_creations_heal/dir1/dir2
+
+TEST mkfifo source_creations_me/fifo
+TEST mknod source_creations_me/block b 4 5
+TEST mknod source_creations_me/char c 1 5
+TEST touch source_creations_me/file
+TEST ln -s source_creations_me/file source_creations_me/slink
+TEST mkdir source_creations_me/dir1
+TEST mkdir source_creations_me/dir1/dir2
+
+$CLI volume stop $V0
+
+#simulate fool fool scenario for fool_* dirs
+setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
+
+#Simulate v1-dirty(self-accusing but no pending ops on others) scenario for v1-dirty
+setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/v1_dirty_{heal,me}
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/v1_dirty_{heal,me}
+
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+TEST touch spb_heal/1 spb/0 spb_me_heal/1 spb_me/0 fool_heal/1 fool_me/1 v1_fool_heal/1 v1_fool_me/1
+
+$CLI volume stop $V0
+
+#simulate fool fool scenario for fool_* dirs
+setfattr -x trusted.afr.$V0-client-1 $B0/${V0}0/{fool_heal,fool_me}
+setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
+
+#simulate self-accusing for source_self_accusing
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000006 $B0/${V0}0/source_self_accusing
+
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+# Check if conservative merges happened correctly on _me_ dirs
+TEST stat spb_me_heal/1
+TEST stat $B0/${V0}0/spb_me_heal/1
+TEST stat $B0/${V0}1/spb_me_heal/1
+
+TEST stat spb_me_heal/0
+TEST stat $B0/${V0}0/spb_me_heal/0
+TEST stat $B0/${V0}1/spb_me_heal/0
+
+TEST stat fool_me/1
+TEST stat $B0/${V0}0/fool_me/1
+TEST stat $B0/${V0}1/fool_me/1
+
+TEST stat fool_me/0
+TEST stat $B0/${V0}0/fool_me/0
+TEST stat $B0/${V0}1/fool_me/0
+
+TEST stat v1_fool_me/0
+TEST stat $B0/${V0}0/v1_fool_me/0
+TEST stat $B0/${V0}1/v1_fool_me/0
+
+TEST stat v1_fool_me/1
+TEST stat $B0/${V0}0/v1_fool_me/1
+TEST stat $B0/${V0}1/v1_fool_me/1
+
+TEST stat v1_dirty_me/0
+TEST stat $B0/${V0}0/v1_dirty_me/0
+TEST stat $B0/${V0}1/v1_dirty_me/0
+
+#Check if files that have gfid-mismatches in _me_ are giving EIO
+TEST ! stat spb_me/0
+
+#Check if stale files are deleted on access
+TEST ! stat source_deletions_me/fifo
+TEST ! stat $B0/${V0}0/source_deletions_me/fifo
+TEST ! stat $B0/${V0}1/source_deletions_me/fifo
+TEST ! stat source_deletions_me/block
+TEST ! stat $B0/${V0}0/source_deletions_me/block
+TEST ! stat $B0/${V0}1/source_deletions_me/block
+TEST ! stat source_deletions_me/char
+TEST ! stat $B0/${V0}0/source_deletions_me/char
+TEST ! stat $B0/${V0}1/source_deletions_me/char
+TEST ! stat source_deletions_me/file
+TEST ! stat $B0/${V0}0/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat source_deletions_me/file
+TEST ! stat $B0/${V0}0/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat source_deletions_me/dir1/dir2
+TEST ! stat $B0/${V0}0/source_deletions_me/dir1/dir2
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1/dir2
+TEST ! stat source_deletions_me/dir1
+TEST ! stat $B0/${V0}0/source_deletions_me/dir1
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1
+
+#Test if the files created as part of access are healed correctly
+r=$(get_file_type source_creations_me/fifo)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/fifo
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/fifo
+TEST [ -p source_creations_me/fifo ]
+
+r=$(get_file_type source_creations_me/block)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/block
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/block
+TEST [ -b source_creations_me/block ]
+
+r=$(get_file_type source_creations_me/char)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/char
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/char
+TEST [ -c source_creations_me/char ]
+
+r=$(get_file_type source_creations_me/file)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/file
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/file
+TEST [ -f source_creations_me/file ]
+
+r=$(get_file_type source_creations_me/slink)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/slink
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/slink
+TEST [ -h source_creations_me/slink ]
+
+r=$(get_file_type source_creations_me/dir1/dir2)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1/dir2
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1/dir2
+TEST [ -d source_creations_me/dir1/dir2 ]
+
+r=$(get_file_type source_creations_me/dir1)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1
+TEST [ -d source_creations_me/dir1 ]
+
+#Trigger heal and check _heal dirs are healed properly
+#Trigger change in event generation number. That way inodes would get refreshed during lookup
+TEST kill_brick $V0 $H0 $B0/${V0}1
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST stat spb_heal
+TEST stat spb_me_heal
+TEST stat fool_heal
+TEST stat fool_me
+TEST stat v1_fool_heal
+TEST stat v1_fool_me
+TEST stat source_deletions_heal
+TEST stat source_deletions_me
+TEST stat source_self_accusing
+TEST stat source_creations_heal
+TEST stat source_creations_me
+TEST stat v1_dirty_heal
+TEST stat v1_dirty_me
+TEST $CLI volume stop $V0
+TEST rm -rf $B0/${V0}{0,1}/.glusterfs/indices/xattrop/*
+
+$CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+rm -f $M0/FILE
+EXPECT "1" count_index_entries $B0/${V0}0
+EXPECT "1" count_index_entries $B0/${V0}1
+
+TEST $CLI volume stop $V0;
+
+#Create entries for fool_heal and fool_me to ensure they are fully healed and dirty xattrs erased, before triggering index heal
+create_brick_xattrop_entry $B0/${V0}0 fool_heal fool_me source_creations_heal/dir1
+
+$CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+$CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0;
+EXPECT_WITHIN $HEAL_TIMEOUT "~" print_pending_heals spb_heal spb_me_heal fool_heal fool_me v1_fool_heal v1_fool_me source_deletions_heal source_deletions_me source_creations_heal source_creations_me v1_dirty_heal v1_dirty_me source_self_accusing
+
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_me_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_self_accusing
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_me
+
+#Don't access the files/dirs from mount point as that may cause self-heals
+# Check if conservative merges happened correctly on heal dirs
+TEST stat $B0/${V0}0/spb_heal/1
+TEST stat $B0/${V0}1/spb_heal/1
+
+TEST stat $B0/${V0}0/spb_heal/0
+TEST stat $B0/${V0}1/spb_heal/0
+
+TEST stat $B0/${V0}0/fool_heal/1
+TEST stat $B0/${V0}1/fool_heal/1
+
+TEST stat $B0/${V0}0/fool_heal/0
+TEST stat $B0/${V0}1/fool_heal/0
+
+TEST stat $B0/${V0}0/v1_fool_heal/0
+TEST stat $B0/${V0}1/v1_fool_heal/0
+
+TEST stat $B0/${V0}0/v1_fool_heal/1
+TEST stat $B0/${V0}1/v1_fool_heal/1
+
+TEST stat $B0/${V0}0/v1_dirty_heal/0
+TEST stat $B0/${V0}1/v1_dirty_heal/0
+
+#Check if files that have gfid-mismatches in spb are giving EIO
+TEST ! stat spb/0
+
+#Check if stale files are deleted on access
+TEST ! stat $B0/${V0}0/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}0/source_deletions_heal/block
+TEST ! stat $B0/${V0}1/source_deletions_heal/block
+TEST ! stat $B0/${V0}0/source_deletions_heal/char
+TEST ! stat $B0/${V0}1/source_deletions_heal/char
+TEST ! stat $B0/${V0}0/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}0/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}0/source_deletions_heal/dir1/dir2
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1/dir2
+TEST ! stat $B0/${V0}0/source_deletions_heal/dir1
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
+
+#Check if stale files are deleted on access
+TEST ! stat $B0/${V0}0/source_self_accusing/fifo
+TEST ! stat $B0/${V0}1/source_self_accusing/fifo
+TEST ! stat $B0/${V0}0/source_self_accusing/block
+TEST ! stat $B0/${V0}1/source_self_accusing/block
+TEST ! stat $B0/${V0}0/source_self_accusing/char
+TEST ! stat $B0/${V0}1/source_self_accusing/char
+TEST ! stat $B0/${V0}0/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}0/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}0/source_self_accusing/dir1/dir2
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1/dir2
+TEST ! stat $B0/${V0}0/source_self_accusing/dir1
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1
+
+#Test if the files created as part of full self-heal correctly
+r=$(get_file_type $B0/${V0}0/source_creations_heal/fifo)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/fifo
+TEST [ -p $B0/${V0}0/source_creations_heal/fifo ]
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/block
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/block)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/block
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/char)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/char
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/file)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file
+TEST [ -f $B0/${V0}0/source_creations_heal/file ]
+
+r=$(get_file_type source_creations_heal/file $B0/${V0}0/slink)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file slink
+TEST [ -h $B0/${V0}0/source_creations_heal/slink ]
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1/dir2)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1/dir2
+TEST [ -d $B0/${V0}0/source_creations_heal/dir1/dir2 ]
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1
+TEST [ -d $B0/${V0}0/source_creations_heal/dir1 ]
+
+cd -
+
+#Anonymous directory shouldn't be created
+TEST mkdir $M0/rename-dir
+before_rename=$(STAT_INO $B0/${V0}1/rename-dir)
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/rename-dir $M0/new-name
+TEST $CLI volume start $V0 force
+#'spb' is in split-brain so pending-heal-count will be 2
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+after_rename=$(STAT_INO $B0/${V0}1/new-name)
+EXPECT "0" echo $(ls -a $B0/${V0}0/ | grep anonymous-inode | wc -l)
+EXPECT "0" echo $(ls -a $B0/${V0}1/ | grep anonymous-inode | wc -l)
+EXPECT_NOT "$before_rename" echo $after_rename
+cleanup
diff --git a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
index f4aa351e461..35e295dc170 100644
--- a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
+++ b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
@@ -168,8 +168,8 @@ TEST [ "$gfid_1" != "$gfid_2" ]
#We know that second brick has the bigger size file
BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/f3 | cut -d\ -f1)
-TEST ls $M0/f3
-TEST cat $M0/f3
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f3 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -215,8 +215,8 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
-TEST ls $M0/f4
-TEST cat $M0/f4
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f4 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -227,4 +227,3 @@ HEALED_MD5=$(md5sum $B0/${V0}2/f4 | cut -d\ -f1)
TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
cleanup;
-#G_TESTDEF_TEST_STATUS_NETBSD7=1501390
diff --git a/tests/basic/afr/halo.t b/tests/basic/afr/halo.t
new file mode 100644
index 00000000000..3f61f5a0402
--- /dev/null
+++ b/tests/basic/afr/halo.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+#Tests that halo basic functionality works as expected
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function get_up_child()
+{
+ if [ "1" == $(afr_private_key_value $V0 $M0 0 "child_up\[0\]") ];
+ then
+ echo 0
+ elif [ "1" == $(afr_private_key_value $V0 $M0 0 "child_up\[1\]") ]
+ then
+ echo 1
+ fi
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.halo-enabled yes
+TEST $CLI volume set $V0 cluster.halo-max-replicas 1
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[0\]"
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[1\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[0\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[1\]"
+
+up_id=$(get_up_child)
+TEST [[ ! -z "$up_id" ]]
+
+down_id=$((1-up_id))
+
+TEST kill_brick $V0 $H0 $B0/${V0}${up_id}
+#As max-replicas is configured to be 1, down_child should be up now
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "child_up\[${up_id}\]"
+EXPECT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[${up_id}\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[${down_id}\]"
+
+#Bring the brick back up and the state should be restored
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+
+up_id=$(get_up_child)
+TEST [[ ! -z "$up_id" ]]
+down_id=$((1-up_id))
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "child_up\[${up_id}\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[0\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[1\]"
+
+cleanup;
diff --git a/tests/basic/afr/rename-data-loss.t b/tests/basic/afr/rename-data-loss.t
new file mode 100644
index 00000000000..256ee2aafce
--- /dev/null
+++ b/tests/basic/afr/rename-data-loss.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+#Self-heal tests
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST `echo "line1" >> file1`
+TEST mkdir dir1
+TEST mkdir dir2
+TEST mkdir -p dir1/dira/dirb
+TEST `echo "line1">>dir1/dira/dirb/file1`
+TEST mkdir delete_me
+TEST `echo "line1" >> delete_me/file1`
+
+#brick0 has witnessed the second write while brick1 is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST `echo "line2" >> file1`
+TEST `echo "line2" >> dir1/dira/dirb/file1`
+TEST `echo "line2" >> delete_me/file1`
+
+#Toggle the bricks that are up/down.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/brick0
+
+#Rename when the 'source' brick0 for data-selfheals is down.
+mv file1 file2
+mv dir1/dira dir2
+
+#Delete a dir when brick0 is down.
+rm -rf delete_me
+cd -
+
+#Bring everything up and trigger heal
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick1
+
+#Remount to avoid reading from caches
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "line2" tail -1 $M0/file2
+EXPECT "line2" tail -1 $M0/dir2/dira/dirb/file1
+TEST ! stat $M0/delete_me/file1
+TEST ! stat $M0/delete_me
+
+anon_inode_name=$(ls -a $B0/brick0 | grep glusterfs-anonymous-inode)
+TEST [[ -d $B0/brick0/$anon_inode_name ]]
+TEST [[ -d $B0/brick1/$anon_inode_name ]]
+cleanup
diff --git a/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t b/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t
new file mode 100644
index 00000000000..7c249c4bcbd
--- /dev/null
+++ b/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+#Test the client side split-brain resolution
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+
+count_files () {
+ ls $1 | wc -l
+}
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST mkdir $M0/data
+TEST touch $M0/data/file
+
+
+############ Client side healing using favorite-child-policy = mtime #################
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/data/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/data/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+mtime1=$(get_mtime $B0/${V0}0/data/file)
+mtime2=$(get_mtime $B0/${V0}1/data/file)
+if (( $(echo "$mtime1 > $mtime2" | bc -l) )); then
+ LATEST_MTIME_MD5=$(md5sum $B0/${V0}0/data/file | cut -d\ -f1)
+else
+ LATEST_MTIME_MD5=$(md5sum $B0/${V0}1/data/file | cut -d\ -f1)
+fi
+
+#file will be in split-brain
+cat $M0/data/file > /dev/null
+EXPECT "1" echo $?
+
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" afr_get_split_brain_count $V0
+cat $M0/data/file > /dev/null
+EXPECT "0" echo $?
+M0_MD5=$(md5sum $M0/data/file | cut -d\ -f1)
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_get_split_brain_count $V0
+TEST [ "$LATEST_MTIME_MD5" == "$M0_MD5" ]
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_MD5=$(md5sum $B0/${V0}0/data/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/data/file | cut -d\ -f1)
+TEST [ "$LATEST_MTIME_MD5" == "$B0_MD5" ]
+TEST [ "$LATEST_MTIME_MD5" == "$B1_MD5" ]
+
+############ Client side directory conservative merge #################
+TEST $CLI volume reset $V0 cluster.favorite-child-policy
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/data/test
+files=$(count_files $M0/data)
+EXPECT "2" echo $files
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/data/test1
+files=$(count_files $M0/data)
+EXPECT "2" echo $files
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#data dir will be in entry split-brain
+ls $M0/data > /dev/null
+EXPECT "2" echo $?
+
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" afr_get_split_brain_count $V0
+
+
+ls $M0/data > /dev/null
+EXPECT "0" echo $?
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_get_split_brain_count $V0
+#Entry Split-brain is gone, but data self-heal is pending on the files
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+cat $M0/data/test > /dev/null
+cat $M0/data/test1 > /dev/null
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+files=$(count_files $M0/data)
+EXPECT "3" echo $files
+
+TEST force_umount $M0
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+cleanup
diff --git a/tests/basic/changelog/changelog-history.t b/tests/basic/changelog/changelog-history.t
index 106e5db9211..ea952619652 100644
--- a/tests/basic/changelog/changelog-history.t
+++ b/tests/basic/changelog/changelog-history.t
@@ -5,6 +5,7 @@
cleanup;
+SCRIPT_TIMEOUT=300
HISTORY_BIN_PATH=$(dirname $0)/../../utils/changelog
build_tester $HISTORY_BIN_PATH/get-history.c -lgfchangelog
@@ -68,18 +69,21 @@ TEST $CLI volume set $V0 changelog.changelog off
sleep 3
time_after_disable=$(date '+%s')
+TEST $CLI volume set $V0 changelog.changelog on
+sleep 5
+
#Passes, gives the changelogs till continuous changelogs are available
# but returns 1
-EXPECT "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
+EXPECT_WITHIN 10 "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
#Fails as start falls between htime files
-EXPECT "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
+EXPECT_WITHIN 10 "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
#Passes as start and end falls in same htime file
-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
+EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
#Passes, gives the changelogs till continuous changelogs are available
-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
+EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
TEST rm $HISTORY_BIN_PATH/get-history
rm -rf /tmp/scratch_v1/*
diff --git a/tests/basic/ctime/ctime-utimesat.t b/tests/basic/ctime/ctime-utimesat.t
new file mode 100644
index 00000000000..540e57aec83
--- /dev/null
+++ b/tests/basic/ctime/ctime-utimesat.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.read-after-open off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+touch $M0/FILE
+
+atime=$(stat -c "%.X" $M0/FILE)
+EXPECT $atime stat -c "%.Y" $M0/FILE
+EXPECT $atime stat -c "%.Z" $M0/FILE
+
+cleanup
diff --git a/tests/basic/distribute/file-rename.t b/tests/basic/distribute/file-rename.t
new file mode 100644
index 00000000000..63111b8ad8f
--- /dev/null
+++ b/tests/basic/distribute/file-rename.t
@@ -0,0 +1,1021 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../common-utils.rc
+
+# Test overview:
+# Test all combinations of src-hashed/src-cached/dst-hashed/dst-cached
+
+hashdebugxattr="dht.file.hashed-subvol."
+
+function get_brick_index {
+ local inpath=$1
+ brickroot=$(getfattr -m . -n trusted.glusterfs.pathinfo $inpath | tr ' ' '\n' | sed -n 's/<POSIX(\(.*\)):.*:.*>.*/\1/p')
+ echo ${brickroot:(-1)}
+}
+
+function get_brick_path_for_subvol {
+ local in_subvol=$1
+ local in_brickpath
+
+ in_brickpath=$(cat "$M0/.meta/graphs/active/$in_subvol/options/remote-subvolume")
+ echo $in_brickpath
+
+}
+
+#Checks that file exists only on hashed and/or cached
+function file_existence_check
+{
+ local in_file_path=$1
+ local in_hashed=$2
+ local in_cached=$3
+ local in_client_subvol
+ local in_brickpath
+ local ret
+
+ for i in {0..3}
+ do
+ in_client_subvol="$V0-client-$i"
+ in_brickpath=$(cat "$M0/.meta/graphs/active/$in_client_subvol/options/remote-subvolume")
+ stat "$in_brickpath/$in_file_path" 2>/dev/null
+ ret=$?
+ # Either the linkto or the data file must exist on the hashed
+ if [ "$in_client_subvol" == "$in_hashed" ]; then
+ if [ $ret -ne 0 ]; then
+ return 1
+ fi
+ continue
+ fi
+
+ # If the cached is non-null, we expect the file to exist on it
+ if [ "$in_client_subvol" == "$in_cached" ]; then
+ if [ $ret -ne 0 ]; then
+ return 1
+ fi
+ continue
+ fi
+
+ if [ $ret -eq 0 ]; then
+ return 2
+ fi
+ done
+ return 0
+}
+
+
+# Check if file exists on any of the bricks of the volume
+function file_does_not_exist
+{
+ local inpath=$1
+ for i in `seq 0 3`
+ do
+ file_path=$B0/$V0-$i/$inpath
+ if [ -f "$file_path" ]; then
+ echo "1"
+ return 1
+ fi
+ done
+ return 0
+}
+
+
+# Input: filename dirpath
+function get_hash_subvol
+{
+ hash_subvol=$(getfattr --only-values -n "$hashdebugxattr$1" $2 2>/dev/null)
+}
+
+
+
+# Find the first filename that hashes to a subvol
+# other than $1
+
+function first_filename_with_diff_hashsubvol
+{
+ local in_subvol=$1
+ local in_path=$2
+ local file_pattern=$3
+ local in_hash_subvol
+
+ for i in {1..100}
+ do
+ dstfilename="$file_pattern$i"
+ in_hash_subvol=$(get_hash_subvol "$dstfilename" "$in_path")
+ echo $in_hash_subvol
+ if [ "$in_subvol" != "$in_hash_subvol" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Find the first filename that hashes to the same subvol
+# as $1
+function first_filename_with_same_hashsubvol
+{
+ local in_subvol=$1
+ local in_path=$2
+ local in_hash_subvol
+ local file_pattern=$3
+
+ for i in {1..100}
+ do
+ dstfilename="$file_pattern$i"
+ get_hash_subvol "$dstfilename" "$in_path"
+ in_hash_subvol=$hash_subvol
+# echo $in_hash_subvol
+ if [ "$in_subvol" == "$in_hash_subvol" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+function file_is_linkto
+{
+ local brick_filepath=$1
+
+ test=$(stat $brick_filepath 2>&1)
+ if [ $? -ne 0 ]; then
+ echo "2"
+ return
+ fi
+
+ test=$(getfattr -n trusted.glusterfs.dht.linkto -e text $brick_filepath 2>&1)
+
+ if [ $? -eq 0 ]; then
+ echo "1"
+ else
+ echo "0"
+ fi
+}
+
+
+
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+
+# We need at least 4 bricks to test all combinations of hashed and
+# cached files
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{0..3}
+TEST $CLI volume start $V0
+
+# Mount using FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+
+################################################################
+# The first set of tests are those where the Dst file does not exist
+# dst-cached = NULL
+#
+###############################################################
+
+################### Test 1 ####################################
+#
+# src-hashed = src-cached = dst-hashed
+# dst-cached = null
+# src-file = src-1
+
+echo " **** Test 1 **** "
+
+src_file="src-1"
+
+TEST mkdir $M0/test-1
+TEST touch $M0/test-1/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-1
+src_hashed=$hash_subvol
+#echo "Hashed subvol for $src_file: " $src_hashed
+
+# Find a file name that hashes to the same subvol as $src_file
+TEST first_filename_with_same_hashsubvol "$src_hashed" "$M0/test-1" "dst-"
+#echo "dst-file name: " $dstfilename
+dst_hashed=$src_hashed
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-1/$src_file $M0/test-1/$dstfilename
+
+# Expected:
+# dst file is accessible from the mount point
+# dst file exists only on the hashed brick.
+# no linkto files on any bricks
+# src files do not exist
+
+
+TEST stat $M0/test-1/$dstfilename 2>/dev/null
+TEST file_existence_check test-1/$dstfilename $src_hashed
+TEST file_does_not_exist test-1/$src_file
+EXPECT "0" file_is_linkto $src_hash_brick/test-1/$dstfilename
+
+
+################### Test 2 ####################################
+
+# src-hashed = src-cached != dst-hashed
+# dst-cached = null
+
+echo " **** Test 2 **** "
+
+src_file="src-1"
+
+TEST mkdir $M0/test-2
+TEST touch $M0/test-2/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-2
+src_hashed=$hash_subvol
+#echo "Hashed subvol for $src_file: " $src_hashed
+
+# Find a file name that hashes to a diff hashed subvol than $src_file
+TEST first_filename_with_diff_hashsubvol "$src_hashed" "$M0/test-2" "dst-"
+echo "dst-file name: " $dstfilename
+TEST get_hash_subvol $dstfilename $M0/test-2
+dst_hashed=$hash_subvol
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+dst_hash_brick=$(get_brick_path_for_subvol $dst_hashed)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-2/$src_file $M0/test-2/$dstfilename
+
+
+# Expected:
+# dst file is accessible from the mount point
+# dst data file on src_hashed and dst linkto file on dst_hashed
+# src files do not exist
+
+
+TEST stat $M0/test-2/$dstfilename 2>/dev/null
+TEST file_existence_check test-2/$dstfilename $dst_hashed $src_hashed
+TEST file_does_not_exist test-2/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-2/$dstfilename
+EXPECT "0" file_is_linkto $src_hash_brick/test-2/$dstfilename
+
+################### Test 3 ####################################
+
+# src-hashed = dst-hashed != src-cached
+
+echo " **** Test 3 **** "
+
+src_file0="abc-1"
+
+# 1. Create src file with src_cached != src_hashed
+TEST mkdir $M0/test-3
+TEST touch $M0/test-3/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-3
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_diff_hashsubvol "$src_cached" "$M0/test-3" "src-"
+echo "dst-file name: " $dstfilename
+src_file=$dstfilename
+
+TEST mv $M0/test-3/$src_file0 $M0/test-3/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-3
+src_hashed=$hash_subvol
+
+
+# 2. Rename src to dst
+TEST first_filename_with_same_hashsubvol "$src_hashed" "$M0/test-3" "dst-"
+#echo "dst-file name: " $dstfilename
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-3/$src_file $M0/test-3/$dstfilename
+
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-3/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-3/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+TEST file_existence_check test-3/$dstfilename $src_hashed $src_cached
+
+EXPECT "1" file_is_linkto $src_hash_brick/test-3/$dstfilename
+EXPECT "0" file_is_linkto $src_cached_brick/test-3/$dstfilename
+
+
+
+################### Test 4 ####################################
+
+# src-cached = dst-hashed != src-hashed
+
+echo " **** Test 4 **** "
+
+src_file0="abc-1"
+
+# 1. Create src file with src_cached != src_hashed
+TEST mkdir $M0/test-4
+TEST touch $M0/test-4/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-4
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_diff_hashsubvol "$src_cached" "$M0/test-4" "src-"
+src_file=$dstfilename
+
+TEST mv $M0/test-4/$src_file0 $M0/test-4/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-4
+src_hashed=$hash_subvol
+
+
+# 2. Rename src to dst
+TEST first_filename_with_same_hashsubvol "$src_cached" "$M0/test-4" "dst-"
+#echo "dst-file name: " $dstfilename
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-4/$src_file $M0/test-4/$dstfilename
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-4/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-4/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+TEST file_existence_check test-4/$dstfilename $src_cached
+
+EXPECT "0" file_is_linkto $src_cached_brick/test-4/$dstfilename
+
+
+################### Test 5 ####################################
+
+# src-cached != src-hashed
+# src-hashed != dst-hashed
+# src-cached != dst-hashed
+
+
+echo " **** Test 5 **** "
+
+# 1. Create src and dst files
+
+TEST mkdir $M0/test-5
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-5" "abc-"
+src_file0=$dstfilename
+
+TEST touch $M0/test-5/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-5
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-5" "src-"
+src_file=$dstfilename
+
+TEST mv $M0/test-5/$src_file0 $M0/test-5/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-5
+src_hashed=$hash_subvol
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-5" "dst-"
+#echo "dst-file name: " $dstfilename
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+
+# 2. Rename src to dst
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-5/$src_file $M0/test-5/$dstfilename
+
+
+# 3. Validate
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-5/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-5/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+
+EXPECT "0" file_is_linkto $src_cached_brick/test-5/$dstfilename
+EXPECT "1" file_is_linkto $dst_hash_brick/test-5/$dstfilename
+
+
+########################################################################
+#
+# The Dst file exists
+#
+########################################################################
+
+################### Test 6 ####################################
+
+# src_hash = src_cached
+# dst_hash = dst_cached
+# dst_hash = src_hash
+
+
+TEST mkdir $M0/test-6
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-6" "src-"
+src_file=$dstfilename
+
+TEST touch $M0/test-6/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-6" "dst-"
+dst_file=$dstfilename
+
+TEST touch $M0/test-6/$dst_file
+
+
+# 2. Rename src to dst
+
+TEST mv $M0/test-6/$src_file $M0/test-6/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-6/$dst_file 2>/dev/null
+TEST file_existence_check test-6/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-6/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-6/$dst_file
+
+
+################### Test 7 ####################################
+
+# src_hash = src_cached
+# dst_hash = dst_cached
+# dst_hash != src_hash
+
+
+echo " **** Test 7 **** "
+
+TEST mkdir $M0/test-7
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-7" "src-"
+src_file=$dstfilename
+
+TEST touch $M0/test-7/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-7" "dst-"
+dst_file=$dstfilename
+
+TEST touch $M0/test-7/$dst_file
+
+
+# 2. Rename src to dst
+
+TEST mv $M0/test-7/$src_file $M0/test-7/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+src_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-7/$dst_file 2>/dev/null
+TEST file_existence_check test-7/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-7/$src_file
+
+EXPECT "0" file_is_linkto $src_hash_brick/test-7/$dst_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-7/$dst_file
+
+
+################### Test 8 ####################################
+
+# src_hash = src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash
+# dst_cached != src_hash
+
+echo " **** Test 8 **** "
+
+TEST mkdir $M0/test-8
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-8" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-8/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-8" "dst0-"
+dst_file0=$dstfilename
+TEST touch $M0/test-8/$dst_file0
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-8" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-8/$dst_file0 $M0/test-8/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-8/$src_file $M0/test-8/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+src_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-8/$dst_file 2>/dev/null
+TEST file_existence_check test-8/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-8/$src_file
+
+EXPECT "0" file_is_linkto $src_hash_brick/test-8/$dst_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-8/$dst_file
+
+################### Test 9 ####################################
+
+# src_hash = src_cached = dst_hash
+# dst_hash != dst_cached
+
+echo " **** Test 9 **** "
+
+TEST mkdir $M0/test-9
+
+
+# 1. Create src and dst files
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-9" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-9/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-9" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-9/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-9" "dst-"
+dst_file=$dstfilename
+
+TEST mv $M0/test-9/$dst0_file $M0/test-9/$dst_file
+
+# 2. Rename the file
+
+mv $M0/test-9/$src_file $M0/test-9/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-9/$dst_file 2>/dev/null
+TEST file_existence_check test-9/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-9/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-9/$dst_file
+
+
+################### Test 10 ####################################
+
+# src_hash = src_cached = dst_cached
+# dst_hash != dst_cached
+
+echo " **** Test 10 **** "
+
+TEST mkdir $M0/test-10
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-10" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-10/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-10" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-10/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-10" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-10/$dst0_file $M0/test-10/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-10/$src_file $M0/test-10/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-10/$dst_file 2>/dev/null
+TEST file_existence_check test-10/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-10/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-10/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-10/$dst_file
+
+
+################### Test 11 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached = src_cached
+
+echo " **** Test 11 **** "
+
+TEST mkdir $M0/test-11
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-11" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-11/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-11" "src-"
+src_file=$dstfilename
+
+mv $M0/test-11/$src0_file $M0/test-11/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-11" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-11/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-11/$src_file $M0/test-11/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-11/$dst_file 2>/dev/null
+TEST file_existence_check test-11/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-11/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-11/$dst_file
+
+
+################### Test 12 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached = src_hash
+
+echo " **** Test 12 **** "
+
+TEST mkdir $M0/test-12
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-12" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-12/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-12" "src-"
+src_file=$dstfilename
+
+mv $M0/test-12/$src0_file $M0/test-12/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-12" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-12/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-12/$src_file $M0/test-12/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-12/$dst_file 2>/dev/null
+TEST file_existence_check test-12/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-12/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-12/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-12/$dst_file
+
+################### Test 13 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached
+# dst_hash != src_cached
+# dst_hash != src_hash
+
+echo " **** Test 13 **** "
+
+TEST mkdir $M0/test-13
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-13" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-13/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-13" "src-"
+src_file=$dstfilename
+
+mv $M0/test-13/$src0_file $M0/test-13/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-13" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-13/$dst_file
+
+# 2. Rename the file
+
+mv $M0/test-13/$src_file $M0/test-13/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-13/$dst_file 2>/dev/null
+TEST file_existence_check test-13/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-13/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-13/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-13/$dst_file
+
+
+################### Test 14 ####################################
+
+# src_hash != src_cached
+# dst_hash = src_hash
+# dst_cached = src_cached
+
+echo " **** Test 14 **** "
+
+TEST mkdir $M0/test-14
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-14" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-14/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-14" "src-"
+src_file=$dstfilename
+
+mv $M0/test-14/$src0_file $M0/test-14/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-14" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-14/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-14" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-14/$dst0_file $M0/test-14/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-14/$src_file $M0/test-14/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-14/$dst_file 2>/dev/null
+TEST file_existence_check test-14/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-14/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-14/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-14/$dst_file
+
+################### Test 15 ####################################
+
+# src_hash != src_cached
+# dst_hash != src_hash
+# dst_hash != src_cached
+# dst_cached = src_cached
+
+echo " **** Test 15 **** "
+
+TEST mkdir $M0/test-15
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-15" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-15/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-15" "src-"
+src_file=$dstfilename
+
+mv $M0/test-15/$src0_file $M0/test-15/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-15" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-15/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-15" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-15/$dst0_file $M0/test-15/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-15/$src_file $M0/test-15/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-15/$dst_file 2>/dev/null
+TEST file_existence_check test-15/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-15/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-15/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-15/$dst_file
+
+
+
+################### Test 16 ####################################
+
+# src_hash != src_cached
+# dst_hash = src_cached
+# dst_cached = src_hash
+
+echo " **** Test 16 **** "
+
+TEST mkdir $M0/test-16
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-16" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-16/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-16" "src-"
+src_file=$dstfilename
+
+mv $M0/test-16/$src0_file $M0/test-16/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-16" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-16/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-16" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-16/$dst0_file $M0/test-16/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-16/$src_file $M0/test-16/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-16/$dst_file 2>/dev/null
+TEST file_existence_check test-16/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-16/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-16/$dst_file
+
+
+################### Test 17 ####################################
+
+# src_hash != src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash != src_cached
+# dst_cached = src_hash
+
+
+echo " **** Test 17 **** "
+
+TEST mkdir $M0/test-17
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-17" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-17/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-17" "src-"
+src_file=$dstfilename
+
+mv $M0/test-17/$src0_file $M0/test-17/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-17" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-17/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-17" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-17/$dst0_file $M0/test-17/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-17/$src_file $M0/test-17/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-17/$dst_file 2>/dev/null
+TEST file_existence_check test-17/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-17/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-17/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-17/$dst_file
+
+
+################### Test 18 ####################################
+
+# src_hash != src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash != src_cached != dst_cached
+
+
+echo " **** Test 18 **** "
+
+TEST mkdir $M0/test-18
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-18" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-18/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-18" "src-"
+src_file=$dstfilename
+
+mv $M0/test-18/$src0_file $M0/test-18/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-18" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-18/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-3" "$M0/test-18" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-18/$dst0_file $M0/test-18/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-18/$src_file $M0/test-18/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-3")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-18/$dst_file 2>/dev/null
+TEST file_existence_check test-18/$dst_file "$V0-client-3" "$V0-client-0"
+TEST file_does_not_exist test-18/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-18/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-18/$dst_file
+
+
+# Cleanup
+cleanup
+
diff --git a/tests/basic/distribute/spare_file_rebalance.t b/tests/basic/distribute/spare_file_rebalance.t
new file mode 100644
index 00000000000..061c02f7392
--- /dev/null
+++ b/tests/basic/distribute/spare_file_rebalance.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#------------------------------------------------------------
+
+# Test case - Create sparse files on MP and verify
+# file info after rebalance
+#------------------------------------------------------------
+
+# Create some sparse files and get their size
+TEST cd $M0;
+dd if=/dev/urandom of=sparse_file bs=10k count=1 seek=2M
+cp --sparse=always sparse_file sparse_file_3;
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3;
+
+# Trigger rebalance
+TEST $CLI volume rebalance $V0 start force;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed;
+
+# Compare original and rebalanced files
+TEST cd $B0/${V0}2
+TEST cmp sparse_file $B0/${V0}3/sparse_file_3
+EXPECT_WITHIN 30 "";
+
+cleanup;
diff --git a/tests/basic/ec/ec-badfd.c b/tests/basic/ec/ec-badfd.c
new file mode 100644
index 00000000000..8be23c10eaf
--- /dev/null
+++ b/tests/basic/ec/ec-badfd.c
@@ -0,0 +1,124 @@
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+fill_iov(struct iovec *iov, char fillchar, int count)
+{
+ int ret = -1;
+
+ iov->iov_base = malloc(count + 1);
+ if (iov->iov_base == NULL) {
+ return ret;
+ } else {
+ iov->iov_len = count;
+ ret = 0;
+ }
+ memset(iov->iov_base, fillchar, count);
+ memset(iov->iov_base + count, '\0', 1);
+
+ return ret;
+}
+
+int
+write_sync(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ fprintf(stderr, "failed to create iov");
+ goto out;
+ }
+
+ ret = glfs_pwritev(glfd, &iov, 1, 0, flags);
+out:
+ if (ret < 0) {
+ fprintf(stderr, "glfs_pwritev failed, %d", errno);
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ int ret = 1;
+ char volume_cmd[4096] = {0};
+
+ if (argc != 4) {
+ fprintf(stderr, "Syntax: %s <host> <volname> <file>\n", argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_set_logging(fs, "/tmp/ec-badfd.log", 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = write_sync(fs, fd, 16);
+ if (ret < 0) {
+ fprintf(stderr, "write_sync failed\n");
+ }
+
+ snprintf(volume_cmd, sizeof(volume_cmd),
+ "gluster --mode=script volume stop %s", argv[2]);
+ /*Stop the volume so that update-size-version fails*/
+ system(volume_cmd);
+ sleep(8); /* 3 seconds more than eager-lock-timeout*/
+ snprintf(volume_cmd, sizeof(volume_cmd),
+ "gluster --mode=script volume start %s", argv[2]);
+ system(volume_cmd);
+ sleep(8); /*wait for bricks to come up*/
+ ret = glfs_fsync(fd, NULL, NULL);
+ if (ret == 0) {
+ fprintf(stderr, "fsync succeeded on a BADFD\n");
+ exit(1);
+ }
+
+ ret = glfs_close(fd);
+ if (ret == 0) {
+ fprintf(stderr, "flush succeeded on a BADFD\n");
+ exit(1);
+ }
+ ret = 0;
+
+out:
+ unlink("/tmp/ec-badfd.log");
+ glfs_fini(fs);
+
+ return ret;
+}
diff --git a/tests/basic/ec/ec-badfd.t b/tests/basic/ec/ec-badfd.t
new file mode 100755
index 00000000000..56feb47f115
--- /dev/null
+++ b/tests/basic/ec/ec-badfd.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{1..6}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 5
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+TEST touch $M0/file
+
+TEST build_tester $(dirname $0)/ec-badfd.c -lgfapi -Wall -O2
+TEST $(dirname $0)/ec-badfd $H0 $V0 /file
+cleanup_tester $(dirname ${0})/ec-badfd
+
+cleanup;
diff --git a/tests/basic/ec/ec-quorum-count.t b/tests/basic/ec/ec-quorum-count.t
new file mode 100644
index 00000000000..9310ebbb8f2
--- /dev/null
+++ b/tests/basic/ec/ec-quorum-count.t
@@ -0,0 +1,167 @@
+ #!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../ec.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5}
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 5
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 disperse.heal-wait-qlength 0
+
+#Should fail on non-disperse volume
+TEST ! $CLI volume set $V1 disperse.quorum-count 5
+
+#Should succeed on a valid range
+TEST ! $CLI volume set $V0 disperse.quorum-count 0
+TEST ! $CLI volume set $V0 disperse.quorum-count -0
+TEST ! $CLI volume set $V0 disperse.quorum-count abc
+TEST ! $CLI volume set $V0 disperse.quorum-count 10abc
+TEST ! $CLI volume set $V0 disperse.quorum-count 1
+TEST ! $CLI volume set $V0 disperse.quorum-count 2
+TEST ! $CLI volume set $V0 disperse.quorum-count 3
+TEST $CLI volume set $V0 disperse.quorum-count 4
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+#Test that the option is reflected in the mount
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^4$" ec_option_value $V0 $M0 0 quorum-count
+TEST $CLI volume reset $V0 disperse.quorum-count
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count
+TEST $CLI volume set $V0 disperse.quorum-count 6
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^6$" ec_option_value $V0 $M0 0 quorum-count
+
+TEST touch $M0/a
+TEST touch $M0/data
+TEST setfattr -n trusted.def -v def $M0/a
+TEST touch $M0/src
+TEST touch $M0/del-me
+TEST mkdir $M0/dir1
+TEST dd if=/dev/zero of=$M0/read-file bs=1M count=1 oflag=direct
+TEST dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct
+TEST gf_rm_file_and_gfid_link $B0/${V0}0 del-file
+#modify operations should fail as the file is not in quorum
+TEST ! dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct
+TEST kill_brick $V0 $H0 $B0/${V0}0
+#Read should succeed even when quorum-count is not met
+TEST dd if=$M0/read-file of=/dev/null iflag=direct
+TEST ! touch $M0/a2
+TEST ! mkdir $M0/dir2
+TEST ! mknod $M0/b2 b 4 5
+TEST ! ln -s $M0/a $M0/symlink
+TEST ! ln $M0/a $M0/link
+TEST ! mv $M0/src $M0/dst
+TEST ! rm -f $M0/del-me
+TEST ! rmdir $M0/dir1
+TEST ! dd if=/dev/zero of=$M0/a bs=1M count=1 conv=notrunc
+TEST ! dd if=/dev/zero of=$M0/data bs=1M count=1 conv=notrunc
+TEST ! truncate -s 0 $M0/a
+TEST ! setfattr -n trusted.abc -v abc $M0/a
+TEST ! setfattr -x trusted.def $M0/a
+TEST ! chmod +x $M0/a
+TEST ! fallocate -l 2m -n $M0/a
+TEST ! fallocate -p -l 512k $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+# reset the option and check whether the default redundancy count is
+# accepted or not.
+TEST $CLI volume reset $V0 disperse.quorum-count
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count
+TEST touch $M0/a1
+TEST touch $M0/data1
+TEST setfattr -n trusted.def -v def $M0/a1
+TEST touch $M0/src1
+TEST touch $M0/del-me1
+TEST mkdir $M0/dir11
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/a21
+TEST mkdir $M0/dir21
+TEST mknod $M0/b21 b 4 5
+TEST ln -s $M0/a1 $M0/symlink1
+TEST ln $M0/a1 $M0/link1
+TEST mv $M0/src1 $M0/dst1
+TEST rm -f $M0/del-me1
+TEST rmdir $M0/dir11
+TEST dd if=/dev/zero of=$M0/a1 bs=1M count=1 conv=notrunc
+TEST dd if=/dev/zero of=$M0/data1 bs=1M count=1 conv=notrunc
+TEST truncate -s 0 $M0/a1
+TEST setfattr -n trusted.abc -v abc $M0/a1
+TEST setfattr -x trusted.def $M0/a1
+TEST chmod +x $M0/a1
+TEST fallocate -l 2m -n $M0/a1
+TEST fallocate -p -l 512k $M0/a1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST touch $M0/a2
+TEST touch $M0/data2
+TEST setfattr -n trusted.def -v def $M0/a1
+TEST touch $M0/src2
+TEST touch $M0/del-me2
+TEST mkdir $M0/dir12
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST ! touch $M0/a22
+TEST ! mkdir $M0/dir22
+TEST ! mknod $M0/b22 b 4 5
+TEST ! ln -s $M0/a2 $M0/symlink2
+TEST ! ln $M0/a2 $M0/link2
+TEST ! mv $M0/src2 $M0/dst2
+TEST ! rm -f $M0/del-me2
+TEST ! rmdir $M0/dir12
+TEST ! dd if=/dev/zero of=$M0/a2 bs=1M count=1 conv=notrunc
+TEST ! dd if=/dev/zero of=$M0/data2 bs=1M count=1 conv=notrunc
+TEST ! truncate -s 0 $M0/a2
+TEST ! setfattr -n trusted.abc -v abc $M0/a2
+TEST ! setfattr -x trusted.def $M0/a2
+TEST ! chmod +x $M0/a2
+TEST ! fallocate -l 2m -n $M0/a2
+TEST ! fallocate -p -l 512k $M0/a2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+# Set quorum-count to 5 and kill 1 brick and the fops should pass
+TEST $CLI volume set $V0 disperse.quorum-count 5
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^5$" ec_option_value $V0 $M0 0 quorum-count
+TEST touch $M0/a3
+TEST touch $M0/data3
+TEST setfattr -n trusted.def -v def $M0/a3
+TEST touch $M0/src3
+TEST touch $M0/del-me3
+TEST mkdir $M0/dir13
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/a31
+TEST mkdir $M0/dir31
+TEST mknod $M0/b31 b 4 5
+TEST ln -s $M0/a3 $M0/symlink3
+TEST ln $M0/a3 $M0/link3
+TEST mv $M0/src3 $M0/dst3
+TEST rm -f $M0/del-me3
+TEST rmdir $M0/dir13
+TEST dd if=/dev/zero of=$M0/a3 bs=1M count=1 conv=notrunc
+TEST dd if=/dev/zero of=$M0/data3 bs=1M count=1 conv=notrunc
+TEST truncate -s 0 $M0/a3
+TEST setfattr -n trusted.abc -v abc $M0/a3
+TEST setfattr -x trusted.def $M0/a3
+TEST chmod +x $M0/a3
+TEST fallocate -l 2m -n $M0/a3
+TEST fallocate -p -l 512k $M0/a3
+TEST dd if=/dev/urandom of=$M0/heal-file bs=1M count=1 oflag=direct
+cksum_before_heal="$(md5sum $M0/heal-file | awk '{print $1}')"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick $V0 $H0 $B0/${V0}5
+cksum_after_heal=$(dd if=$M0/heal-file iflag=direct | md5sum | awk '{print $1}')
+TEST [[ $cksum_before_heal == $cksum_after_heal ]]
+cleanup;
diff --git a/tests/basic/ec/ec-read-mask.t b/tests/basic/ec/ec-read-mask.t
new file mode 100644
index 00000000000..ddb556f2973
--- /dev/null
+++ b/tests/basic/ec/ec-read-mask.t
@@ -0,0 +1,114 @@
+ #!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../ec.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Empty read-mask should fail
+TEST ! $GFS --xlator-option=*.ec-read-mask="" -s $H0 --volfile-id $V0 $M0
+
+#Less than 4 number of bricks should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option=*.ec-read-mask="0:1:2" -s $H0 --volfile-id $V0 $M0
+
+#ids greater than 5 should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:6" -s $H0 --volfile-id $V0 $M0
+
+#ids less than 0 should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:-1:2:5" -s $H0 --volfile-id $V0 $M0
+
+#read-mask with non-alphabet or comma should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:5:abc" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:5a" -s $H0 --volfile-id $V0 $M0
+
+#mount with at least 4 read-mask-ids and all of them valid should pass
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5:4:3" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^111111$" ec_option_value $V0 $M0 0 read-mask
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+
+TEST dd if=/dev/urandom of=$M0/a bs=1M count=1
+md5=$(md5sum $M0/a | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Read on the file should fail if any of the read-mask is down when number of
+#ids is data-count
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}5
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+#Read on file should succeed when non-read-mask bricks are down
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}3
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+#Deliberately corrupt chunks 3: 4 and check that reads still give correct data
+TEST dd if=/dev/zero of=$B0/${V0}3/a bs=256k count=1
+TEST dd if=/dev/zero of=$B0/${V0}4/a bs=256k count=1
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup;
diff --git a/tests/basic/ec/ec-reset-brick.t b/tests/basic/ec/ec-reset-brick.t
new file mode 100644
index 00000000000..f1a625df4ff
--- /dev/null
+++ b/tests/basic/ec/ec-reset-brick.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+function num_entries {
+ ls -l $1 | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+mkdir $M0/dir
+touch $M0/dir/{1..10}
+
+mkdir $M0/dir/dir1
+touch $M0/dir/dir1/{1..10}
+
+#kill brick process
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}5 start
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "5" ec_child_up_count $V0 0
+
+#reset-brick by removing all the data and create dir again
+rm -rf $B0/${V0}5
+mkdir $B0/${V0}5
+
+#start brick process and heal by commiting reset-brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}5 $H0:$B0/${V0}5 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count_shd $V0 0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+EXPECT "^12$" num_entries $B0/${V0}5/dir
+EXPECT "^11$" num_entries $B0/${V0}5/dir/dir1
+
+ec_version=$(get_hex_xattr trusted.ec.version $B0/${V0}0)
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}1
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}2
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}3
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}4
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}5
+
+cleanup;
diff --git a/tests/basic/ec/ec-seek.t b/tests/basic/ec/ec-seek.t
index 6a0060870c8..5a7d31b9f8f 100644
--- a/tests/basic/ec/ec-seek.t
+++ b/tests/basic/ec/ec-seek.t
@@ -6,7 +6,7 @@
cleanup
SEEK=$(dirname $0)/seek
-build_tester $(dirname $0)/seek.c -o ${SEEK}
+build_tester $(dirname $0)/../seek.c -o ${SEEK}
TEST glusterd
TEST pidof glusterd
@@ -51,6 +51,7 @@ EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5
EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 512))
EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 6))
+rm -f ${SEEK}
cleanup
# Centos6 regression slaves seem to not support SEEK_DATA/SEEK_HOLE
diff --git a/tests/basic/ec/gfapi-ec-open-truncate.c b/tests/basic/ec/gfapi-ec-open-truncate.c
index 22f17f436b6..fb16807003a 100644
--- a/tests/basic/ec/gfapi-ec-open-truncate.c
+++ b/tests/basic/ec/gfapi-ec-open-truncate.c
@@ -150,7 +150,8 @@ main(int argc, char *argv[])
for (i = 0; i < 20; i++) {
ret = system(
"[ $(for i in $(pgrep glusterfsd); do ls -l /proc/$i/fd | grep "
- "\"[.]glusterfs\" | grep -v health_check; done | wc -l) == 3 ]");
+ "\"[.]glusterfs\" | grep -v \".glusterfs/[0-9a-f][0-9a-f]\" | grep "
+ "-v health_check; done | wc -l) == 3 ]");
if (WIFEXITED(ret) && WEXITSTATUS(ret)) {
printf("Ret value of system: %d\n, ifexited: %d, exitstatus: %d",
ret, WIFEXITED(ret), WEXITSTATUS(ret));
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
index d217559db1a..6329bb60248 100644
--- a/tests/basic/ec/self-heal.t
+++ b/tests/basic/ec/self-heal.t
@@ -131,6 +131,8 @@ TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume set $V0 client-log-level DEBUG
#Write-behind has a bug where lookup can race over write which leads to size mismatch on the mount after a 'cp'
TEST $CLI volume set $V0 performance.write-behind off
+#md-cache can return stale stat due to default timeout being 1 sec
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT "Created" volinfo_field $V0 'Status'
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
diff --git a/tests/basic/fencing/afr-lock-heal-advanced.c b/tests/basic/fencing/afr-lock-heal-advanced.c
new file mode 100644
index 00000000000..e202ccd5b29
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-advanced.c
@@ -0,0 +1,227 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *logfile_fp;
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!logfile_fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(logfile_fp, "\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(logfile_fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_client(char *hostname, char *volname, char *log_file)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(logfile_fp, "\nglfs_new: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+glfs_fd_t *
+open_file(glfs_t *fs, char *fname)
+{
+ glfs_fd_t *fd = NULL;
+
+ fd = glfs_creat(fs, fname, O_CREAT, 0644);
+ if (!fd) {
+ LOG_ERR("glfs_creat", errno);
+ goto out;
+ }
+out:
+ return fd;
+}
+
+int
+acquire_mandatory_lock(glfs_t *fs, glfs_fd_t *fd)
+{
+ struct flock lock;
+ int ret = 0;
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ ret = -1;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int
+perform_test(glfs_t *fs, char *file1, char *file2)
+{
+ int ret = 0;
+ glfs_fd_t *fd1 = NULL;
+ glfs_fd_t *fd2 = NULL;
+ char *buf = "0123456789";
+
+ fd1 = open_file(fs, file1);
+ if (!fd1) {
+ ret = -1;
+ goto out;
+ }
+ fd2 = open_file(fs, file2);
+ if (!fd2) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Kill one brick from the .t.*/
+ pause();
+
+ ret = acquire_mandatory_lock(fs, fd1);
+ if (ret) {
+ goto out;
+ }
+ ret = acquire_mandatory_lock(fs, fd2);
+ if (ret) {
+ goto out;
+ }
+
+ /* Bring the brick up and let the locks heal. */
+ pause();
+ /*At this point, the .t would have killed and brought back 2 bricks, marking
+ * the fd bad.*/
+
+ ret = glfs_write(fd1, buf, 10, 0);
+ if (ret > 0) {
+ /* Write is supposed to fail with EBADFD*/
+ LOG_ERR("glfs_write", ret);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (fd1)
+ glfs_close(fd1);
+ if (fd2)
+ glfs_close(fd2);
+ return ret;
+}
+
+static void
+sigusr1_handler(int signo)
+{
+ /*Signal caught. Just continue with the execution.*/
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname1 = NULL;
+ char *fname2 = NULL;
+
+ if (argc != 7) {
+ fprintf(stderr,
+ "Expect following args %s <host> <volname> <file1> <file2> "
+ "<log file "
+ "location> <log_file_suffix>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ fname1 = argv[3];
+ fname2 = argv[4];
+
+ /*Use SIGUSR1 and pause()as a means of hitting break-points this program
+ *when signalled from the .t test case.*/
+ if (signal(SIGUSR1, sigusr1_handler) == SIG_ERR) {
+ LOG_ERR("SIGUSR1 handler error", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[5], "lock-heal.c", argv[6], "log");
+ logfile_fp = fopen(log_file, "w");
+ if (!logfile_fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[5], "glfs-client", argv[6], "log");
+ fs = setup_client(hostname, volname, log_file);
+ if (!fs) {
+ LOG_ERR("setup_client", errno);
+ return -1;
+ }
+
+ ret = perform_test(fs, fname1, fname2);
+
+error:
+ if (fs) {
+ /*glfs_fini(fs)*/; // glfs fini path is racy and crashes the program
+ }
+
+ fclose(logfile_fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/afr-lock-heal-advanced.t b/tests/basic/fencing/afr-lock-heal-advanced.t
new file mode 100644
index 00000000000..8a5b5989b5e
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-advanced.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+PROCESS_UP_TIMEOUT=90
+
+function is_gfapi_program_alive()
+{
+ pid=$1
+ ps -p $pid
+ if [ $? -eq 0 ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function get_active_lock_count {
+ brick=$1
+ i1=$2
+ i2=$3
+ pattern="ACTIVE.*client-${brick: -1}"
+
+ sdump=$(generate_brick_statedump $V0 $H0 $brick)
+ lock_count1="$(egrep "$i1" $sdump -A3| egrep "$pattern"|uniq|wc -l)"
+ lock_count2="$(egrep "$i2" $sdump -A3| egrep "$pattern"|uniq|wc -l)"
+ echo "$((lock_count1+lock_count2))"
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+TEST build_tester $(dirname $0)/afr-lock-heal-advanced.c -lgfapi -ggdb
+
+#------------------------------------------------------------------------------
+# Use more than 1 fd from same client so that list_for_each_* loops are executed more than once.
+$(dirname $0)/afr-lock-heal-advanced $H0 $V0 "/FILE1" "/FILE2" $logdir C1&
+client_pid=$!
+TEST [ $client_pid ]
+
+TEST sleep 5 # By now, the client would have opened an fd on FILE1 and FILE2 and waiting for a SIGUSR1.
+EXPECT "Y" is_gfapi_program_alive $client_pid
+
+gfid_str1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE1))
+inode1="FILE1|gfid:$gfid_str1"
+gfid_str2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE2))
+inode2="FILE2|gfid:$gfid_str2"
+
+# Kill brick-3 and let client-1 take lock on both files.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill -SIGUSR1 $client_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client_pid
+
+# Check lock is present on brick-1 and brick-2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}0 $inode1 $inode2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}1 $inode1 $inode2
+
+# Restart brick-3 and check that the lock has healed on it.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd.
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}2 $inode1 $inode2
+
+#------------------------------------------------------------------------------
+# Kill same brick before heal completes the first time and check it completes the second time.
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume reset $V0 delay-gen
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}0 $inode1 $inode2
+
+#------------------------------------------------------------------------------
+# Kill 2 bricks and bring it back. The fds must be marked bad.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+# TODO: `gluster v statedump $V0 client localhost:$client_pid` is not working,
+# so sleep for 20 seconds for the client to connect to connect to the bricks.
+TEST sleep $CHILD_UP_TIMEOUT
+
+# Try to write to FILE1 from the .c; it must fail.
+TEST kill -SIGUSR1 $client_pid
+wait $client_pid
+ret=$?
+TEST [ $ret == 0 ]
+
+cleanup_tester $(dirname $0)/afr-lock-heal-advanced
+cleanup;
diff --git a/tests/basic/fencing/afr-lock-heal-basic.c b/tests/basic/fencing/afr-lock-heal-basic.c
new file mode 100644
index 00000000000..768c9e57181
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-basic.c
@@ -0,0 +1,182 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *logfile_fp;
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!logfile_fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(logfile_fp, "\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(logfile_fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_client(char *hostname, char *volname, char *log_file)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(logfile_fp, "\nglfs_new: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+int
+acquire_mandatory_lock(glfs_t *fs, char *fname)
+{
+ struct flock lock;
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+
+ fd = glfs_creat(fs, fname, O_CREAT, 0644);
+ if (!fd) {
+ if (errno != EEXIST) {
+ LOG_ERR("glfs_creat", errno);
+ ret = -1;
+ goto out;
+ }
+ fd = glfs_open(fs, fname, O_RDWR | O_NONBLOCK);
+ if (!fd) {
+ LOG_ERR("glfs_open", errno);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ pause();
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ goto out;
+ }
+
+ pause();
+
+out:
+ if (fd) {
+ glfs_close(fd);
+ }
+
+ return ret;
+}
+
+static void
+sigusr1_handler(int signo)
+{
+ /*Signal caught. Just continue with the execution.*/
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname = NULL;
+
+ if (argc != 6) {
+ fprintf(stderr,
+ "Expect following args %s <host> <volname> <file> <log file "
+ "location> <log_file_suffix>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ fname = argv[3];
+
+ /*Use SIGUSR1 and pause()as a means of hitting break-points this program
+ *when signalled from the .t test case.*/
+ if (signal(SIGUSR1, sigusr1_handler) == SIG_ERR) {
+ LOG_ERR("SIGUSR1 handler error", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[4], "lock-heal-basic.c", argv[5],
+ "log");
+ logfile_fp = fopen(log_file, "w");
+ if (!logfile_fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[4], "glfs-client", argv[5], "log");
+ fs = setup_client(hostname, volname, log_file);
+ if (!fs) {
+ LOG_ERR("setup_client", errno);
+ return -1;
+ }
+
+ ret = acquire_mandatory_lock(fs, fname);
+
+error:
+ if (fs) {
+ /*glfs_fini(fs)*/; // glfs fini path is racy and crashes the program
+ }
+
+ fclose(logfile_fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/afr-lock-heal-basic.t b/tests/basic/fencing/afr-lock-heal-basic.t
new file mode 100644
index 00000000000..69131af085d
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-basic.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function is_gfapi_program_alive()
+{
+ pid=$1
+ ps -p $pid
+ if [ $? -eq 0 ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+TEST build_tester $(dirname $0)/afr-lock-heal-basic.c -lgfapi -ggdb
+
+$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C1&
+client1_pid=$!
+TEST [ $client1_pid ]
+
+$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C2&
+client2_pid=$!
+TEST [ $client2_pid ]
+
+TEST sleep 5 # By now, the 2 clients would have opened an fd on FILE and waiting for a SIGUSR1.
+EXPECT "Y" is_gfapi_program_alive $client1_pid
+EXPECT "Y" is_gfapi_program_alive $client2_pid
+
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE))
+inode="FILE|gfid:$gfid_str"
+
+# Kill brick-3 and let client-1 take lock on the file.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill -SIGUSR1 $client1_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client1_pid
+
+# Check lock is present on brick-1 and brick-2
+b1_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}0)
+c1_lock_on_b1="$(egrep "$inode" $b1_sdump -A3| egrep 'ACTIVE.*client-0'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b2_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}1)
+c1_lock_on_b2="$(egrep "$inode" $b2_sdump -A3| egrep 'ACTIVE.*client-1'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b2" ]
+
+# Restart brick-3 and check that the lock has healed on it.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
+
+b3_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}2)
+c1_lock_on_b3="$(egrep "$inode" $b3_sdump -A3| egrep 'ACTIVE.*client-2'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b3" ]
+
+# Kill brick-1 and let client-2 preempt the lock on bricks 2 and 3.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill -SIGUSR1 $client2_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client2_pid
+
+# Restart brick-1 and let lock healing complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
+
+# Check that all bricks now have locks from client 2 only.
+b1_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}0)
+c2_lock_on_b1="$(egrep "$inode" $b1_sdump -A3| egrep 'ACTIVE.*client-0'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b2_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}1)
+c2_lock_on_b2="$(egrep "$inode" $b2_sdump -A3| egrep 'ACTIVE.*client-1'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b3_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}2)
+c2_lock_on_b3="$(egrep "$inode" $b3_sdump -A3| egrep 'ACTIVE.*client-2'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b2" ]
+TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b3" ]
+TEST [ "$c2_lock_on_b1" != "$c1_lock_on_b1" ]
+
+#Let the client programs run and exit.
+TEST kill -SIGUSR1 $client1_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client1_pid
+TEST kill -SIGUSR1 $client2_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client2_pid
+
+cleanup_tester $(dirname $0)/afr-lock-heal-basic
+cleanup;
diff --git a/tests/basic/fuse/active-io-graph-switch.t b/tests/basic/fuse/active-io-graph-switch.t
new file mode 100644
index 00000000000..6ec3e1fcbfa
--- /dev/null
+++ b/tests/basic/fuse/active-io-graph-switch.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+TESTS_EXPECTED_IN_LOOP=12
+
+function perform_io_on_mount {
+ local m="$1"
+ local f="$2"
+ local lockfile="$3"
+ while [ -f "$m/$lockfile" ];
+ do
+ dd if=/dev/zero of=$m/$f bs=1M count=1
+ done
+}
+
+function perform_graph_switch {
+ for i in {1..3}
+ do
+ TEST_IN_LOOP $CLI volume set $V0 performance.stat-prefetch off
+ sleep 3
+ TEST_IN_LOOP $CLI volume set $V0 performance.stat-prefetch on
+ sleep 3
+ done
+}
+
+function count_files {
+ ls $M0 | wc -l
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/lock
+for i in {1..100}; do perform_io_on_mount $M0 $i lock & done
+EXPECT_WITHIN 5 "101" count_files
+
+perform_graph_switch
+TEST rm -f $M0/lock
+wait
+EXPECT "100" count_files
+TEST rm -f $M0/{1..100}
+EXPECT "0" count_files
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Repeat the tests with reader-thread-count
+TEST $GFS --reader-thread-count=10 --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/lock
+for i in {1..100}; do perform_io_on_mount $M0 $i lock & done
+EXPECT_WITHIN 5 "101" count_files
+
+perform_graph_switch
+TEST rm -f $M0/lock
+wait
+EXPECT "100" count_files
+TEST rm -f $M0/{1..100}
+EXPECT "0" count_files
+
+cleanup
diff --git a/tests/basic/gfapi/bug-1507896.c b/tests/basic/gfapi/bug-1507896.c
new file mode 100644
index 00000000000..1cc20849c2b
--- /dev/null
+++ b/tests/basic/gfapi/bug-1507896.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define VALIDATE_AND_GOTO_LABEL_ON_ERROR(func, ret, label) \
+ do { \
+ if (ret < 0) { \
+ fprintf(stderr, "%s : returned error %d (%s)\n", func, ret, \
+ strerror(errno)); \
+ goto label; \
+ } \
+ } while (0)
+
+int
+main(int argc, char *argv[])
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs)
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_new(fs)", ret, out);
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_volfile_server(fs)", ret, out);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_logging(fs)", ret, out);
+
+ ret = glfs_init(fs);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_init(fs)", ret, out);
+
+out:
+ if (fs) {
+ ret = glfs_fini(fs);
+ if (ret)
+ fprintf(stderr, "glfs_fini(fs) returned %d\n", ret);
+ }
+ return ret;
+}
diff --git a/tests/basic/gfapi/bug-1507896.t b/tests/basic/gfapi/bug-1507896.t
new file mode 100644
index 00000000000..4764e650232
--- /dev/null
+++ b/tests/basic/gfapi/bug-1507896.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/bug-1507896.c -lgfapi
+
+TEST ./$(dirname $0)/bug-1507896 $H0 $V0 $logdir/bug-1507896.log
+
+#volume name precedding with '/'
+TEST ! ./$(dirname $0)/bug-1507896 $H0 /$V0 $logdir/bug-1507896.log
+
+#volume name passed with any special characters
+TEST ! ./$(dirname $0)/bug-1507896 $H0 test@_$V0 $logdir/bug-1507896.log
+
+cleanup_tester $(dirname $0)/bug-1507896
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-copy-file-range.t b/tests/basic/gfapi/gfapi-copy-file-range.t
index c24c1433edf..a56d3a58e07 100644
--- a/tests/basic/gfapi/gfapi-copy-file-range.t
+++ b/tests/basic/gfapi/gfapi-copy-file-range.t
@@ -5,20 +5,22 @@
cleanup;
+mkfs.xfs 2>&1 | grep reflink
+if [ $? -ne 0 ]; then
+ SKIP_TESTS
+ exit
+fi
+
+
TEST glusterd
+TEST truncate -s 2G $B0/xfs_image
# for now, a xfs filesystem with reflink support is created.
# In future, better to make changes in MKFS_LOOP so that,
# once can create a xfs filesystem with reflink enabled in
# generic and simple way, instead of doing below steps each
# time.
-TEST truncate -s 2G $B0/xfs_image
-mkfs.xfs 2>&1 | grep reflink
-if [ $? -eq 0 ]; then
- mkfs.xfs -f -i size=512 -m reflink=1 $B0/xfs_image;
-else
- mkfs.xfs -f -i size=512 $B0/xfs_image;
-fi
+TEST mkfs.xfs -f -i size=512 -m reflink=1 $B0/xfs_image;
TEST mkdir $B0/bricks
TEST mount -t xfs -o loop $B0/xfs_image $B0/bricks
diff --git a/tests/basic/gfapi/gfapi-graph-switch-open-fd.t b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
new file mode 100644
index 00000000000..2e666be7ec7
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{0..2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/sync
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-keep-writing.c -lgfapi
+
+
+#Launch a program to keep doing writes on an fd
+./$(dirname $0)/gfapi-keep-writing ${H0} $V0 $logdir/gfapi-async-calls-test.log sync &
+p=$!
+sleep 1 #Let some writes go through
+#Check if graph switch will lead to any pending markers for ever
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+
+TEST rm -f $M0/sync #Make sure the glfd is closed
+TEST wait #Wait for background process to die
+#Goal is to check if there is permanent FOOL changelog
+sleep 5
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick0/glfs_test.txt trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick1/glfs_test.txt trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick2/glfs_test.txt trusted.afr.dirty
+
+cleanup_tester $(dirname $0)/gfapi-async-calls-test
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-keep-writing.c b/tests/basic/gfapi/gfapi-keep-writing.c
new file mode 100644
index 00000000000..91b59cea02b
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-keep-writing.c
@@ -0,0 +1,129 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+glfs_test_function(const char *hostname, const char *volname,
+ const char *logfile, const char *syncfile)
+{
+ int ret = -1;
+ int flags = O_CREAT | O_RDWR;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ const char *buff = "This is from my prog\n";
+ const char *filename = "glfs_test.txt";
+ struct stat buf = {0};
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ glfd = glfs_creat(fs, filename, flags, 0644);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_creat failed");
+ goto out;
+ }
+
+ while (glfs_stat(fs, syncfile, &buf) == 0) {
+ ret = glfs_write(glfd, buff, strlen(buff), flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+ }
+
+ ret = glfs_close(glfd);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+out:
+ ret = glfs_fini(fs);
+ if (ret) {
+ LOG_ERR("glfs_fini failed");
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ char *syncfile = NULL;
+
+ if (argc != 5) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+ syncfile = argv[4];
+
+ ret = glfs_test_function(hostname, volname, logfile, syncfile);
+ if (ret) {
+ LOG_ERR("glfs_test_function failed");
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c
new file mode 100644
index 00000000000..7beb8dd1fe4
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c
@@ -0,0 +1,127 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *volfile,
+ const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile(fs, volfile);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+glfs_test_function(const char *hostname, const char *volname,
+ const char *volfile, const char *logfile)
+{
+ int ret = -1;
+ int flags = O_CREAT | O_RDWR;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ const char *buff = "This is from my prog\n";
+ const char *filename = "glfs_test.txt";
+
+ fs = init_glfs(hostname, volname, volfile, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ glfd = glfs_creat(fs, filename, flags, 0644);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_creat failed");
+ goto out;
+ }
+
+ ret = glfs_write(glfd, buff, strlen(buff), flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+ ret = glfs_close(glfd);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+out:
+ ret = glfs_fini(fs);
+ if (ret) {
+ LOG_ERR("glfs_fini failed");
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *volfile = NULL;
+ char *logfile = NULL;
+
+ if (argc != 5) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ volfile = argv[3];
+ logfile = argv[4];
+
+ ret = glfs_test_function(hostname, volname, volfile, logfile);
+ if (ret) {
+ LOG_ERR("glfs_test_function failed");
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t
new file mode 100755
index 00000000000..8e94df9d321
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../ssl.rc
+
+cleanup;
+
+sed -e "s,@@HOSTNAME@@,${H0},g" -e "s,@@BRICKPATH@@,${B0}/brick1,g" \
+ -e "s,@@SSL@@,off,g" \
+ $(dirname ${0})/protocol-client-ssl.vol.in \
+ > $(dirname ${0})/protocol-client-ssl.vol
+
+TEST create_self_signed_certs
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-ssl-load-volfile-test.c -lgfapi
+
+# Run test without I/O or management encryption
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+# Enable management encryption
+touch $GLUSTERD_WORKDIR/secure-access
+
+killall_gluster
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# Run test with management encryption (No I/O encryption)
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+# Enable I/O encryption
+TEST $CLI volume set $V0 server.ssl on
+
+killall_gluster
+
+sed -e "s,@@HOSTNAME@@,${H0},g" -e "s,@@BRICKPATH@@,${B0}/brick1,g" \
+ -e "s,@@SSL@@,on,g" \
+ $(dirname ${0})/protocol-client-ssl.vol.in \
+ > $(dirname ${0})/protocol-client-ssl.vol
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# Run test without I/O or management encryption
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+cleanup_tester $(dirname $0)/gfapi-ssl-load-volfile-test
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
+
+# NetBSD build scripts are not up to date therefore this test
+# is failing in NetBSD. Therefore skipping the test in NetBSD
+# as of now.
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/gfapi/glfsxmp-coverage.c b/tests/basic/gfapi/glfsxmp-coverage.c
index 11860c526e0..51650023efd 100644
--- a/tests/basic/gfapi/glfsxmp-coverage.c
+++ b/tests/basic/gfapi/glfsxmp-coverage.c
@@ -12,7 +12,7 @@ int
test_dirops(glfs_t *fs)
{
glfs_fd_t *fd = NULL;
- char buf[512];
+ char buf[2048];
struct dirent *entry = NULL;
fd = glfs_opendir(fs, "/");
@@ -26,6 +26,9 @@ test_dirops(glfs_t *fs)
fprintf(stderr, "%s: %lu\n", entry->d_name, glfs_telldir(fd));
}
+ /* Should internally call fsyncdir(), hopefully */
+ glfs_fsync(fd, NULL, NULL);
+
glfs_closedir(fd);
return 0;
}
@@ -1313,7 +1316,7 @@ test_handleops(int argc, char *argv[])
memcpy(writebuf, "abcdefghijklmnopqrstuvwxyz012345", 32);
ret = glfs_write(fd, writebuf, 32, 0);
- glfs_lseek(fd, 0, SEEK_SET);
+ glfs_lseek(fd, 10, SEEK_SET);
ret = glfs_read(fd, readbuf, 32, 0);
if (memcmp(readbuf, writebuf, 32)) {
@@ -1651,6 +1654,13 @@ test_write_apis(glfs_t *fs)
strerror(errno));
}
+ ret = glfs_fsync(fd, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "fsync(%s): %d (%s)\n", filename, ret, strerror(errno));
+ }
+
+ glfs_close(fd);
+
return 0;
}
@@ -1667,7 +1677,7 @@ test_metadata_ops(glfs_t *fs, glfs_t *fs2)
};
struct statvfs sfs;
char readbuf[32];
- char writebuf[32];
+ char writebuf[11] = "helloworld";
char *filename = "/filename2";
int ret;
@@ -1676,12 +1686,26 @@ test_metadata_ops(glfs_t *fs, glfs_t *fs2)
fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
fd = glfs_creat(fs, filename, O_RDWR, 0644);
- fprintf(stderr, "creat(%s): (%p) %s\n", filename, fd, strerror(errno));
+ if (!fd)
+ fprintf(stderr, "creat(%s): (%p) %s\n", filename, fd, strerror(errno));
fd2 = glfs_open(fs2, filename, O_RDWR);
- fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+ if (!fd2)
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
- glfs_lseek(fd2, 0, SEEK_SET);
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_write(fd, writebuf, 11, 0);
+ if (ret < 0) {
+ fprintf(stderr, "writev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ glfs_fsync(fd, NULL, NULL);
+
+ glfs_lseek(fd2, 5, SEEK_SET);
ret = glfs_read(fd2, readbuf, 32, 0);
@@ -1689,39 +1713,67 @@ test_metadata_ops(glfs_t *fs, glfs_t *fs2)
/* get stat */
ret = glfs_fstat(fd2, &sb);
+ if (ret)
+ fprintf(stderr, "fstat(%s): %d (%s)\n", filename, ret, strerror(errno));
ret = glfs_access(fs, filename, R_OK);
+ if (ret)
+ fprintf(stderr, "access(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_fallocate(fd2, 1024, 1024, 1024);
+ if (ret)
+ fprintf(stderr, "fallocate(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_discard(fd2, 1024, 512);
+ if (ret)
+ fprintf(stderr, "discard(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_zerofill(fd2, 2048, 1024);
+ if (ret)
+ fprintf(stderr, "zerofill(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
/* set stat */
/* TODO: got some errors, need to fix */
- /* ret = glfs_fsetattr(fd2, &gsb); */
+ ret = glfs_fsetattr(fd2, &gsb);
glfs_close(fd);
glfs_close(fd2);
filename = "/filename3";
ret = glfs_mknod(fs, filename, S_IFIFO, 0);
- fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
ret = glfs_lstat(fs, filename, &sb);
- fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
ret = glfs_rename(fs, filename, "/filename4");
- fprintf(stderr, "rename(%s): (%d) %s\n", filename, ret, strerror(errno));
+ if (ret)
+ fprintf(stderr, "rename(%s): (%d) %s\n", filename, ret,
+ strerror(errno));
ret = glfs_unlink(fs, "/filename4");
- fprintf(stderr, "unlink(%s): (%d) %s\n", "/filename4", ret,
- strerror(errno));
+ if (ret)
+ fprintf(stderr, "unlink(%s): (%d) %s\n", "/filename4", ret,
+ strerror(errno));
filename = "/dirname2";
ret = glfs_mkdir(fs, filename, 0);
- fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
ret = glfs_lstat(fs, filename, &sb);
- fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+ if (ret)
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
ret = glfs_rmdir(fs, filename);
- fprintf(stderr, "rmdir(%s): (%d) %s\n", filename, ret, strerror(errno));
+ if (ret)
+ fprintf(stderr, "rmdir(%s): (%d) %s\n", filename, ret, strerror(errno));
}
int
main(int argc, char *argv[])
@@ -1739,48 +1791,78 @@ main(int argc, char *argv[])
struct statvfs sfs;
char readbuf[32];
char writebuf[32];
+ char volumeid[64];
char *filename = "/filename2";
- if (argc != 3) {
- printf("Expect following args\n\t%s <volname> <hostname>\n", argv[0]);
+ if ((argc < 2) || (argc > 3)) {
+ printf("Usage:\n\t%s <volname> <hostname>\n\t%s <volfile-path>",
+ argv[0], argv[0]);
return -1;
}
- fs = glfs_new(argv[1]);
- if (!fs) {
- fprintf(stderr, "glfs_new: returned NULL\n");
- return 1;
+ if (argc == 2) {
+ /* Generally glfs_new() requires volume name as an argument */
+ fs = glfs_new("test-only");
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile(fs, argv[1]);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile failed\n");
+ } else {
+ fs = glfs_new(argv[1]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+ // ret = glfs_set_volfile_server (fs, "unix", "/tmp/gluster.sock", 0);
+ ret = glfs_set_volfile_server(fs, "tcp", argv[2], 24007);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile_server failed\n");
}
- // ret = glfs_set_volfile (fs, "/tmp/posix.vol");
-
- ret = glfs_set_volfile_server(fs, "tcp", argv[2], 24007);
-
- // ret = glfs_set_volfile_server (fs, "unix", "/tmp/gluster.sock", 0);
-
- ret = glfs_set_logging(fs, "/dev/stderr", 7);
+ /* Change this to relevant file when running locally */
+ ret = glfs_set_logging(fs, "/dev/stderr", 5);
+ if (ret)
+ fprintf(stderr, "glfs_set_logging failed\n");
ret = glfs_init(fs);
-
- fprintf(stderr, "glfs_init: returned %d\n", ret);
+ if (ret)
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
if (ret)
goto out;
- sleep(2);
-
- fs2 = glfs_new(argv[1]);
- if (!fs2) {
- fprintf(stderr, "glfs_new: returned NULL\n");
- return 1;
+ /* no major use for getting the volume id in this test, done for coverage */
+ ret = glfs_get_volumeid(fs, volumeid, 64);
+ if (ret) {
+ fprintf(stderr, "glfs_get_volumeid: returned %d\n", ret);
}
- // ret = glfs_set_volfile (fs2, "/tmp/posix.vol");
-
- ret = glfs_set_volfile_server(fs2, "tcp", argv[2], 24007);
+ sleep(2);
- ret = glfs_set_logging(fs2, "/dev/stderr", 7);
+ if (argc == 2) {
+ /* Generally glfs_new() requires volume name as an argument */
+ fs2 = glfs_new("test_only_volume");
+ if (!fs2) {
+ fprintf(stderr, "glfs_new(fs2): returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile(fs2, argv[1]);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile failed(fs2)\n");
+ } else {
+ fs2 = glfs_new(argv[1]);
+ if (!fs2) {
+ fprintf(stderr, "glfs_new(fs2): returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile_server(fs2, "tcp", argv[2], 24007);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile_server failed(fs2)\n");
+ }
ret = glfs_set_statedump_path(fs2, "/tmp");
if (ret) {
@@ -1788,8 +1870,8 @@ main(int argc, char *argv[])
}
ret = glfs_init(fs2);
-
- fprintf(stderr, "glfs_init: returned %d\n", ret);
+ if (ret)
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
test_metadata_ops(fs, fs2);
@@ -1807,6 +1889,8 @@ main(int argc, char *argv[])
glfs_statvfs(fs, "/", &sfs);
+ glfs_unset_volfile_server(fs, "tcp", argv[2], 24007);
+
glfs_fini(fs);
glfs_fini(fs2);
diff --git a/tests/basic/gfapi/glfsxmp.t b/tests/basic/gfapi/glfsxmp.t
index 4f0d90d059f..b3e6645c0f5 100644
--- a/tests/basic/gfapi/glfsxmp.t
+++ b/tests/basic/gfapi/glfsxmp.t
@@ -13,9 +13,14 @@ EXPECT 'Created' volinfo_field $V0 'Status'
TEST $CLI volume start $V0
EXPECT 'Started' volinfo_field $V0 'Status'
+$CLI system getspec $V0 > fubar.vol
+
TEST cp $(dirname $0)/glfsxmp-coverage.c ./glfsxmp.c
TEST build_tester ./glfsxmp.c -lgfapi
TEST ./glfsxmp $V0 $H0
+
+TEST ./glfsxmp fubar.vol
+
TEST cleanup_tester ./glfsxmp
TEST rm ./glfsxmp.c
diff --git a/tests/basic/gfapi/protocol-client-ssl.vol.in b/tests/basic/gfapi/protocol-client-ssl.vol.in
new file mode 100644
index 00000000000..cdc0c9d0671
--- /dev/null
+++ b/tests/basic/gfapi/protocol-client-ssl.vol.in
@@ -0,0 +1,15 @@
+#
+# This .vol file expects that there is
+#
+# 1. GlusterD listening on @@HOSTNAME@@
+# 2. a volume that provides a brick on @@BRICKPATH@@
+# 3. the volume with the brick has been started
+#
+volume test
+ type protocol/client
+ option remote-host @@HOSTNAME@@
+ option remote-subvolume @@BRICKPATH@@
+ option transport-type socket
+ option transport.socket.ssl-enabled @@SSL@@
+end-volume
+
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
index a50af9dfa57..46d0dac2fce 100644
--- a/tests/basic/glusterd-restart-shd-mux.t
+++ b/tests/basic/glusterd-restart-shd-mux.t
@@ -31,9 +31,9 @@ EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
shd_pid=$(get_shd_mux_pid $V0)
for i in $(seq 1 3); do
@@ -52,9 +52,9 @@ TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
shd_pid=$(get_shd_mux_pid $V0)
for i in $(seq 1 3); do
@@ -69,7 +69,7 @@ for i in $(seq 1 3); do
TEST $CLI volume stop ${V0}_ec$i
done
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
diff --git a/tests/basic/glusterd/arbiter-volume.t b/tests/basic/glusterd/arbiter-volume.t
deleted file mode 100644
index e9edf046905..00000000000
--- a/tests/basic/glusterd/arbiter-volume.t
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-# This command tests the volume create command validation for arbiter volumes.
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b{4..9}
-EXPECT "2 x \(2 \+ 1\) = 6" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-
-TEST rm -rf $B0/b{1..3}
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-TEST killall -15 glusterd
-TEST glusterd
-TEST pidof glusterd
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-
-#cleanup
diff --git a/tests/basic/glusterd/disperse-create.t b/tests/basic/glusterd/disperse-create.t
index 384c675c882..db8a621d48e 100644
--- a/tests/basic/glusterd/disperse-create.t
+++ b/tests/basic/glusterd/disperse-create.t
@@ -20,6 +20,10 @@ TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/b7 $H0:$B0/b8 $H0:$B
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
TEST $CLI volume delete $V0
+TEST $CLI volume create $V0 disperse-data 2 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
+EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
+
+TEST $CLI volume delete $V0
TEST $CLI volume create $V0 redundancy 1 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t
index 309060919b7..e11cfed509a 100644
--- a/tests/basic/glusterd/volfile_server_switch.t
+++ b/tests/basic/glusterd/volfile_server_switch.t
@@ -34,7 +34,7 @@ TEST glusterfs --volfile-id=/$V0 --volfile-server=$H1 --volfile-server=$H2 --vol
TEST kill_glusterd 1
-TEST $CLI_2 volume set $V0 performance.io-cache off
+TEST $CLI_2 volume set $V0 performance.write-behind off
# make sure by this time directory will be created
# TODO: suggest ideal time to wait
diff --git a/tests/basic/glusterd/volume-brick-count.t b/tests/basic/glusterd/volume-brick-count.t
new file mode 100644
index 00000000000..dc1a5278f4f
--- /dev/null
+++ b/tests/basic/glusterd/volume-brick-count.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function test_volume_config()
+{
+ volname=$1
+ type_string=$2
+ brickCount=$3
+ distCount=$4
+ replicaCount=$5
+ arbiterCount=$6
+ disperseCount=$7
+ redundancyCount=$8
+
+ EXPECT "$type_string" volinfo_field $volname "Number of Bricks"
+ EXPECT "$brickCount" get-xml "volume info $volname" "brickCount"
+ EXPECT "$distCount" get-xml "volume info $volname" "distCount"
+ EXPECT "$replicaCount" get-xml "volume info $volname" "replicaCount"
+ EXPECT "$arbiterCount" get-xml "volume info $volname" "arbiterCount"
+ EXPECT "$disperseCount" get-xml "volume info $volname" "disperseCount"
+ EXPECT "$redundancyCount" get-xml "volume info $volname" "redundancyCount"
+}
+
+# This command tests the volume create command and number of bricks for different volume types.
+cleanup;
+TESTS_EXPECTED_IN_LOOP=56
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create ${V0}_1 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+test_volume_config "${V0}_1" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+
+TEST $CLI volume create ${V0}_2 replica 3 arbiter 1 $H0:$B0/b{4..9}
+test_volume_config "${V0}_2" "2 x \(2 \+ 1\) = 6" "6" "2" "3" "1" "0" "0"
+
+
+TEST $CLI volume create ${V0}_3 replica 3 arbiter 1 $H0:$B0/b{10..12}
+test_volume_config "${V0}_3" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+TEST killall -15 glusterd
+TEST glusterd
+TEST pidof glusterd
+test_volume_config "${V0}_3" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+
+TEST $CLI volume create ${V0}_4 replica 3 $H0:$B0/b{13..15}
+test_volume_config "${V0}_4" "1 x 3 = 3" "3" "1" "3" "0" "0" "0"
+
+TEST $CLI volume create ${V0}_5 replica 3 $H0:$B0/b{16..21}
+test_volume_config "${V0}_5" "2 x 3 = 6" "6" "2" "3" "0" "0" "0"
+
+TEST $CLI volume create ${V0}_6 disperse 3 redundancy 1 $H0:$B0/b{22..24}
+test_volume_config "${V0}_6" "1 x \(2 \+ 1\) = 3" "3" "1" "1" "0" "3" "1"
+
+TEST $CLI volume create ${V0}_7 disperse 3 redundancy 1 $H0:$B0/b{25..30}
+test_volume_config "${V0}_7" "2 x \(2 \+ 1\) = 6" "6" "2" "1" "0" "3" "1"
+
+TEST $CLI volume create ${V0}_8 $H0:$B0/b{31..33}
+test_volume_config "${V0}_8" "3" "3" "3" "1" "0" "0" "0"
+
+cleanup
diff --git a/tests/basic/graph-cleanup-brick-down-shd-mux.t b/tests/basic/graph-cleanup-brick-down-shd-mux.t
new file mode 100644
index 00000000000..3c621cdcc26
--- /dev/null
+++ b/tests/basic/graph-cleanup-brick-down-shd-mux.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 2); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#kill one brick and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+#kill an entire subvol and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+#wait for some time to create a race sceanrio
+sleep 1
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+#kill all bricks and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick $V0 $H0 $B0/${V0}5
+#wait for some time to create a race sceanrio
+sleep 2
+
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+cleanup
diff --git a/tests/basic/metadisp/fsyncdir.c b/tests/basic/metadisp/fsyncdir.c
new file mode 100644
index 00000000000..62b532b9ce4
--- /dev/null
+++ b/tests/basic/metadisp/fsyncdir.c
@@ -0,0 +1,29 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int pfd;
+
+ pfd = open(argv[1], O_RDONLY | O_DIRECTORY);
+ if (pfd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (rename(argv[2], argv[3]) == (-1)) {
+ perror("rename");
+ return EXIT_FAILURE;
+ }
+
+ if (fsync(pfd) == (-1)) {
+ perror("fsync");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/ftruncate.c b/tests/basic/metadisp/ftruncate.c
new file mode 100644
index 00000000000..c9185212c31
--- /dev/null
+++ b/tests/basic/metadisp/ftruncate.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int pfd;
+
+ pfd = open(argv[1], O_RDWR);
+ if (pfd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (ftruncate(pfd, 0) == (-1)) {
+ perror("ftruncate");
+ return EXIT_FAILURE;
+ }
+
+ if (write(pfd, "hello", 5) == (-1)) {
+ perror("write");
+ return EXIT_FAILURE;
+ }
+
+ if (fsync(pfd) == (-1)) {
+ perror("fsync");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/fxattr.c b/tests/basic/metadisp/fxattr.c
new file mode 100644
index 00000000000..e552057778a
--- /dev/null
+++ b/tests/basic/metadisp/fxattr.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+
+static char MY_XATTR[] = "user.fxtest";
+static char *PROGRAM;
+#define CONSUME(v) \
+ do { \
+ if (!argc) { \
+ fprintf(stderr, "missing argument\n"); \
+ return EXIT_FAILURE; \
+ } \
+ v = argv[0]; \
+ ++argv; \
+ --argc; \
+ } while (0)
+
+static int
+do_get(int argc, char **argv, int fd)
+{
+ char *value;
+ int ret;
+ char buf[1024];
+
+ CONSUME(value);
+
+ ret = fgetxattr(fd, MY_XATTR, buf, sizeof(buf));
+ if (ret == (-1)) {
+ perror("fgetxattr");
+ return EXIT_FAILURE;
+ }
+
+ if (strncmp(buf, value, ret) != 0) {
+ fprintf(stderr, "data mismatch\n");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+static int
+do_set(int argc, char **argv, int fd)
+{
+ char *value;
+ int ret;
+
+ CONSUME(value);
+
+ ret = fsetxattr(fd, MY_XATTR, value, strlen(value), 0);
+ if (ret == (-1)) {
+ perror("fsetxattr");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+static int
+do_remove(int argc, char **argv, int fd)
+{
+ int ret;
+
+ ret = fremovexattr(fd, MY_XATTR);
+ if (ret == (-1)) {
+ perror("femovexattr");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+int
+main(int argc, char **argv)
+{
+ int fd;
+ char *path;
+ char *cmd;
+
+ CONSUME(PROGRAM);
+ CONSUME(path);
+ CONSUME(cmd);
+
+ fd = open(path, O_RDWR);
+ if (fd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (strcmp(cmd, "get") == 0) {
+ return do_get(argc, argv, fd);
+ }
+
+ if (strcmp(cmd, "set") == 0) {
+ return do_set(argc, argv, fd);
+ }
+
+ if (strcmp(cmd, "remove") == 0) {
+ return do_remove(argc, argv, fd);
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/gfs-fsetxattr.c b/tests/basic/metadisp/gfs-fsetxattr.c
new file mode 100644
index 00000000000..63578bc528f
--- /dev/null
+++ b/tests/basic/metadisp/gfs-fsetxattr.c
@@ -0,0 +1,141 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int gfapi = 1;
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ int i = 0;
+ glfs_fd_t *fd = NULL;
+ char *topdir = "topdir", *filename = "file1";
+ char *buf = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+ char *basename = NULL;
+ char *dir1 = NULL, *dir2 = NULL, *filename1 = NULL, *filename2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+
+ if (argc != 5) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file> <basename>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ logfile = argv[3];
+ basename = argv[4];
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL (%s)\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&dir1, "%s-dir", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = glfs_mkdir(fs, dir1, 0755);
+ if (ret < 0) {
+ fprintf(stderr, "mkdir(%s): %s\n", dir1, strerror(errno));
+ return -1;
+ }
+
+ fd = glfs_opendir(fs, dir1);
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.dirfattr", "fsetxattr", 9, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_closedir(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_closedir failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&filename1, "%s-file", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = asprintf(&filename2, "%s-file-renamed", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ fd = glfs_creat(fs, filename1, O_RDWR, 0644);
+ if (!fd) {
+ fprintf(stderr, "%s: (%p) %s\n", filename1, fd, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_rename(fs, filename1, filename2);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_rename failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_lstat(fs, filename2, &sb);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_lstat failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.filefattr", "fsetxattr", 9, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_close failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+}
diff --git a/tests/basic/metadisp/metadisp.t b/tests/basic/metadisp/metadisp.t
new file mode 100644
index 00000000000..894ffe07226
--- /dev/null
+++ b/tests/basic/metadisp/metadisp.t
@@ -0,0 +1,316 @@
+#!/usr/bin/env bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+
+# Considering `--enable-metadisp` is an option for `./configure`,
+# which is disabled by default, this test will never pass regression.
+# But to see the value of this test, run below after configuring
+# with above option :
+# `prove -vmfe '/bin/bash' tests/basic/metadisp/metadisp.t`
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST
+
+cleanup;
+
+TEST mkdir -p $B0/b0/{0,1}
+
+TEST setfattr -n trusted.glusterfs.volume-id -v 0xddab9eece7b64a95b07351a1f748f56f ${B0}/b0/0
+TEST setfattr -n trusted.glusterfs.volume-id -v 0xddab9eece7b64a95b07351a1f748f56f ${B0}/b0/1
+
+TEST $GFS --volfile=$(dirname $0)/metadisp.vol --volfile-id=$V0 $M0;
+
+NUM_FILES=40
+TEST touch $M0/{1..${NUM_FILES}}
+
+# each drive should get 40 files
+TEST [ $(dir -1 $B0/b0/0/ | wc -l) -eq $NUM_FILES ]
+TEST [ $(dir -1 $B0/b0/1/ | wc -l) -eq $NUM_FILES ]
+
+# now write some data to a file
+echo "hello" > $M0/3
+filename=$$
+echo "hello" > /tmp/metadisp-write-${filename}
+checksum=$(md5sum /tmp/metadisp-write-${filename} | awk '{print $1}')
+TEST [ "$(md5sum $M0/3 | awk '{print $1}')" == "$checksum" ]
+
+# check that the backend file exists on b1
+gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/b0/*/3))
+TEST [ $(dir -1 $B0/b0/1/$gfid | wc -l) -eq 1 ]
+
+# check that the backend file matches the frontend
+TEST [ "$(md5sum $B0/b0/1/$gfid | awk '{print $1}')" == "$checksum" ]
+
+# delete the file
+TEST rm $M0/3
+
+# ensure the frontend and backend files are cleaned up
+TEST ! -e $M0/3
+TEST ! [ stat $B0/b*/*/$gfid ]
+
+# Test TRUNCATE + WRITE flow
+echo "hello" | tee $M0/4
+echo "goo" | tee $M0/4
+filename=$$
+echo "goo" | tee /tmp/metadisp-truncate-${filename}
+checksum=$(md5sum /tmp/metadisp-truncate-${filename} | awk '{print $1}')
+TEST [ "$(md5sum $M0/4 | awk '{print $1}')" == "$checksum" ]
+
+# Test mkdir + rmdir.
+TEST mkdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+TEST rmdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+
+# Test rename.
+TEST touch $M0/rename_me
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+TEST mv $M0/rename_me $M0/such_rename
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+
+# Test rename of a file that doesn't exist.
+TEST ! mv $M0/does-not-exist $M0/neither-does-this
+
+
+# cleanup all the other files.
+TEST rm -v $M0/1 $M0/2 $M0/{4..${NUM_FILES}}
+TEST rm $M0/such_rename
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq 0 ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq 0 ]
+
+# Test CREATE flow
+NUM_FILES=40
+TEST touch $M0/{1..${NUM_FILES}}
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq $NUM_FILES ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq $NUM_FILES ]
+
+# Test UNLINK flow
+# No drives should have any files
+TEST rm -v $M0/{1..${NUM_FILES}}
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq 0 ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq 0 ]
+
+# Test CREATE + WRITE + READ flow
+filename=$$
+dd if=/dev/urandom of=/tmp/${filename} bs=1M count=10
+checksum=$(md5sum /tmp/${filename} | awk '{print $1}')
+TEST cp -v /tmp/${filename} $M0/1
+TEST cp -v /tmp/${filename} $M0/2
+TEST cp -v /tmp/${filename} $M0/3
+TEST cp -v /tmp/${filename} $M0/4
+TEST [ "$(md5sum $M0/1 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/2 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/3 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/4 | awk '{print $1}')" == "$checksum" ]
+
+# Test TRUNCATE + WRITE flow
+TEST dd if=/dev/zero of=$M0/1 bs=1M count=20
+
+# Check that readdir stats the files properly and we get the correct sizes
+TEST [ $(find $M0 -size +9M | wc -l) -eq 4 ];
+
+# Test mkdir + rmdir.
+TEST mkdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+TEST rmdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+
+# Test rename.
+# Still flaky, so disabled until it can be debugged.
+TEST touch $M0/rename_me
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+TEST mv $M0/rename_me $M0/such_rename
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+
+# Test rename of a file that doesn't exist.
+TEST ! mv $M0/does-not-exist $M0/neither-does-this
+
+# Test rename over an existing file.
+ok=yes
+for i in $(seq 0 9); do
+ echo foo > $M0/src$i
+ echo bar > $M0/dst$i
+done
+for i in $(seq 0 9); do
+ mv $M0/src$i $M0/dst$i
+done
+for i in $(seq 0 9); do
+ nfiles=$(cat $B0/b0/*/dst$i | wc -l)
+ if [ "$nfiles" = "2" ]; then
+ echo "COLLISION on dst$i"
+ (ls -l $B0/b0/*/dst$i; cat $B0/b0/*/dst$i) | sed "/^/s// /"
+ ok=no
+ fi
+done
+EXPECT "yes" echo $ok
+
+# Test rename of a directory.
+count_copies () {
+ ls -d $B0/b?/?/$1 2> /dev/null | wc -l
+}
+TEST mkdir $M0/foo_dir
+EXPECT 1 count_copies foo_dir
+EXPECT 0 count_copies bar_dir
+TEST mv $M0/foo_dir $M0/bar_dir
+EXPECT 0 count_copies foo_dir
+EXPECT 1 count_copies bar_dir
+
+for x in $(seq 0 99); do
+ touch $M0/target$x
+ ln -s $M0/target$x $M0/link$x
+done
+on_0=$(ls $B0/b*/0/link* | wc -l)
+on_1=$(ls $B0/b*/1/link* | wc -l)
+TEST [ "$on_0" -eq 100 ]
+TEST [ "$on_1" -eq 0 ]
+TEST [ "$(ls -l $M0/link* | wc -l)" = 100 ]
+
+# Test (hard) link.
+_test_hardlink () {
+ local b
+ local has_src
+ local has_dst
+ local src_inum
+ local dst_inum
+ touch $M0/hardsrc$1
+ ln $M0/hardsrc$1 $M0/harddst$1
+ for b in $B0/b{0}/{0,1}; do
+ [ -f $b/hardsrc$1 ]; has_src=$?
+ [ -f $b/harddst$1 ]; has_dst=$?
+ if [ "$has_src" != "$has_dst" ]; then
+ echo "MISSING $b/hardxxx$1 $has_src $has_dst"
+ return
+ fi
+ if [ "$has_src$has_dst" = "00" ]; then
+ src_inum=$(stat -c '%i' $b/hardsrc$1)
+ dst_inum=$(stat -c '%i' $b/harddst$1)
+ if [ "$dst_inum" != "$src_inum" ]; then
+ echo "MISMATCH $b/hardxx$i $src_inum $dst_inum"
+ return
+ fi
+ fi
+ done
+ echo "OK"
+}
+
+test_hardlink () {
+ local result=$(_test_hardlink $*)
+ # [ "$result" = "OK" ] || echo $result > /dev/tty
+ echo $result
+}
+
+# Do this multiple times to make sure colocation isn't a fluke.
+EXPECT "OK" test_hardlink 0
+EXPECT "OK" test_hardlink 1
+EXPECT "OK" test_hardlink 2
+EXPECT "OK" test_hardlink 3
+EXPECT "OK" test_hardlink 4
+EXPECT "OK" test_hardlink 5
+EXPECT "OK" test_hardlink 6
+EXPECT "OK" test_hardlink 7
+EXPECT "OK" test_hardlink 8
+EXPECT "OK" test_hardlink 9
+
+# Test remove hardlink source. ensure deleting one file
+# doesn't delete the data unless link-count is 1
+TEST mkdir $M0/hardlink
+TEST touch $M0/hardlink/fileA
+echo "data" >> $M0/hardlink/fileA
+checksum=$(md5sum $M0/hardlink/fileA | awk '{print $1}')
+TEST ln $M0/hardlink/fileA $M0/hardlink/fileB
+TEST [ $(dir -1 $M0/hardlink/ | wc -l) -eq 2 ]
+TEST rm $M0/hardlink/fileA
+TEST [ $(dir -1 $M0/hardlink/ | wc -l) -eq 1 ]
+TEST [ "$(md5sum $M0/hardlink/fileB | awk '{print $1}')" == "$checksum" ]
+
+#
+# FIXME: statfs values look ok but the test is bad
+#
+# Test statfs. If we're doing it right, the numbers for the mountpoint should be
+# double those for the brick filesystem times the number of bricks,
+# but unless we're on a completely idle
+# system (which never happens) the numbers can change even while this function
+# runs and that would trip us up. Do a sloppy comparison to deal with that.
+#compare_fields () {
+# val1=$(df $1 | grep / | awk "{print \$$3}")
+# val2=$(df $2 | grep / | awk "{print \$$3}")
+# [ "$val2" -gt "$(((val1/(29/10))*19/10))" -a "$val2" -lt "$(((val1/(31/10))*21/10))" ]
+#}
+
+#brick_df=$(df $B0 | grep /)
+#mount_df=$(df $M0 | grep /)
+#TEST compare_fields $B0 $M0 2 # Total blocks
+#TEST compare_fields $B0 $M0 3 # Used
+#TEST compare_fields $B0 $M0 4 # Available
+
+# Test removexattr.
+#RXATTR_FILE=$(get_file_not_on_disk0 rxtest)
+#TEST setfattr -n user.foo -v bar $M0/$RXATTR_FILE
+#TEST getfattr -n user.foo $B0/b0/1/$RXATTR_FILE
+#TEST setfattr -x user.foo $M0/$RXATTR_FILE
+#TEST ! getfattr -n user.foo $B0/b0/1/$RXATTR_FILE
+
+# Test fsyncdir. We can't really test whether it's doing the right thing,
+# but we can test that it doesn't fail and we can hand-check that it's calling
+# down to all of the disks instead of just one.
+#
+# P.S. There's no fsyncdir test in the rest of Gluster, so who even knows if
+# other translators are handling it correctly?
+
+#FSYNCDIR_EXE=$(dirname $0)/fsyncdir
+#build_tester ${FSYNCDIR_EXE}.c
+#TEST touch $M0/fsyncdir_src
+#TEST $FSYNCDIR_EXE $M0 $M0/fsyncdir_src $M0/fsyncdir_dst
+#TEST rm -f $FSYNCDIR_EXE
+
+# Test fsetxattr, fgetxattr, fremovexattr (in that order).
+FXATTR_FILE=$M0/fxfile1
+TEST touch $FXATTR_FILE
+FXATTR_EXE=$(dirname $0)/fxattr
+build_tester ${FXATTR_EXE}.c
+TEST ! getfattr -n user.fxtest $FXATTR_FILE
+TEST $FXATTR_EXE $FXATTR_FILE set value1
+TEST getfattr -n user.fxtest $FXATTR_FILE
+TEST setfattr -n user.fxtest -v value2 $FXATTR_FILE
+TEST $FXATTR_EXE $FXATTR_FILE get value2
+TEST $FXATTR_EXE $FXATTR_FILE remove
+TEST ! getfattr -n user.fxtest $FXATTR_FILE
+TEST rm -f $FXATTR_EXE
+
+# Test ftruncate
+FTRUNCATE_EXE=$(dirname $0)/ftruncate
+build_tester ${FTRUNCATE_EXE}.c
+FTRUNCATE_FILE=$M0/ftfile1
+TEST dd if=/dev/urandom of=$FTRUNCATE_FILE count=1 bs=1MB
+TEST $FTRUNCATE_EXE $FTRUNCATE_FILE
+#gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/b0/*/ftfile1))
+
+# Test fallocate, discard, zerofill. Actually we don't so much check that these
+# *work* as that they don't throw any errors (especially ENOENT because the
+# file's not on disk zero).
+FALLOC_FILE=fatest1
+TEST touch $M0/$FALLOC_FILE
+TEST fallocate -l $((4096*5)) $M0/$FALLOC_FILE
+TEST fallocate -p -o 4096 -l 4096 $M0/$FALLOC_FILE
+# This actually fails with "operation not supported" on most filesystems, so
+# don't leave it enabled except to test changes.
+#TEST fallocate -z -o $((4096*3)) -l 4096 $M0/$FALLOC_FILE
+
+#cleanup;
diff --git a/tests/basic/metadisp/metadisp.vol b/tests/basic/metadisp/metadisp.vol
new file mode 100644
index 00000000000..58ae2f6f2a8
--- /dev/null
+++ b/tests/basic/metadisp/metadisp.vol
@@ -0,0 +1,14 @@
+volume posix-0
+ type storage/posix
+ option directory /d/backends/b0/0
+end-volume
+
+volume posix-1
+ type storage/posix
+ option directory /d/backends/b0/1
+end-volume
+
+volume metadisp-0
+ type features/metadisp
+ subvolumes posix-0 posix-1
+end-volume
diff --git a/tests/basic/mount.t b/tests/basic/mount.t
index f4c2df31135..3a3d7cc9d8d 100755
--- a/tests/basic/mount.t
+++ b/tests/basic/mount.t
@@ -69,6 +69,9 @@ TEST rm -f $N0/newfile;
TEST ! stat $M0/newfile;
TEST ! stat $M1/newfile;
+# No need to check for status here right now
+$(dirname $0)/rpc-coverage.sh $N0 >/dev/null
+
## Before killing daemon to avoid deadlocks
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
diff --git a/tests/basic/multiple-volume-shd-mux.t b/tests/basic/multiple-volume-shd-mux.t
new file mode 100644
index 00000000000..d7cfbaec85f
--- /dev/null
+++ b/tests/basic/multiple-volume-shd-mux.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=16
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+for i in $(seq 1 3); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+#Delete the volumes
+for i in $(seq 1 3); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+ TEST $CLI volume delete ${V0}_afr$i
+ TEST $CLI volume delete ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/open-behind/open-behind.t b/tests/basic/open-behind/open-behind.t
new file mode 100644
index 00000000000..5e865d602e2
--- /dev/null
+++ b/tests/basic/open-behind/open-behind.t
@@ -0,0 +1,183 @@
+#!/bin/bash
+
+WD="$(dirname "${0}")"
+
+. ${WD}/../../include.rc
+. ${WD}/../../volume.rc
+
+function assign() {
+ local _assign_var="${1}"
+ local _assign_value="${2}"
+
+ printf -v "${_assign_var}" "%s" "${_assign_value}"
+}
+
+function pipe_create() {
+ local _pipe_create_var="${1}"
+ local _pipe_create_name
+ local _pipe_create_fd
+
+ _pipe_create_name="$(mktemp -u)"
+ mkfifo "${_pipe_create_name}"
+ exec {_pipe_create_fd}<>"${_pipe_create_name}"
+ rm "${_pipe_create_name}"
+
+ assign "${_pipe_create_var}" "${_pipe_create_fd}"
+}
+
+function pipe_close() {
+ local _pipe_close_fd="${!1}"
+
+ exec {_pipe_close_fd}>&-
+}
+
+function tester_start() {
+ declare -ag tester
+ local tester_in
+ local tester_out
+
+ pipe_create tester_in
+ pipe_create tester_out
+
+ ${WD}/tester <&${tester_in} >&${tester_out} &
+
+ tester=("$!" "${tester_in}" "${tester_out}")
+}
+
+function tester_send() {
+ declare -ag tester
+ local tester_res
+ local tester_extra
+
+ echo "${*}" >&${tester[1]}
+
+ read -t 3 -u ${tester[2]} tester_res tester_extra
+ echo "${tester_res} ${tester_extra}"
+ if [[ "${tester_res}" == "OK" ]]; then
+ return 0
+ fi
+
+ return 1
+}
+
+function tester_stop() {
+ declare -ag tester
+ local tester_res
+
+ tester_send "quit"
+
+ tester_res=0
+ if ! wait ${tester[0]}; then
+ tester_res=$?
+ fi
+
+ unset tester
+
+ return ${tester_res}
+}
+
+function count_open() {
+ local file="$(realpath "${B0}/${V0}/${1}")"
+ local count="0"
+ local inode
+ local ref
+
+ inode="$(stat -c %i "${file}")"
+
+ for fd in /proc/${BRICK_PID}/fd/*; do
+ ref="$(readlink "${fd}")"
+ if [[ "${ref}" == "${B0}/${V0}/"* ]]; then
+ if [[ "$(stat -c %i "${ref}")" == "${inode}" ]]; then
+ count="$((${count} + 1))"
+ fi
+ fi
+ done
+
+ echo "${count}"
+}
+
+cleanup
+
+TEST build_tester ${WD}/tester.c ${WD}/tester-fd.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST ${CLI} volume create ${V0} ${H0}:${B0}/${V0}
+TEST ${CLI} volume set ${V0} flush-behind off
+TEST ${CLI} volume set ${V0} write-behind off
+TEST ${CLI} volume set ${V0} quick-read off
+TEST ${CLI} volume set ${V0} stat-prefetch on
+TEST ${CLI} volume set ${V0} io-cache off
+TEST ${CLI} volume set ${V0} open-behind on
+TEST ${CLI} volume set ${V0} lazy-open off
+TEST ${CLI} volume set ${V0} read-after-open off
+TEST ${CLI} volume start ${V0}
+
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+BRICK_PID="$(get_brick_pid ${V0} ${H0} ${B0}/${V0})"
+
+TEST touch "${M0}/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_start
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT_WITHIN 5 "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${CLI} volume set ${V0} lazy-open on
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+sleep 2
+EXPECT "0" count_open "/test"
+TEST tester_send fd write 0 "test"
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+EXPECT "test" tester_send fd read 0 64
+# Even though read-after-open is disabled, use-anonymous-fd is also disabled,
+# so reads need to open the file first.
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+TEST tester_send fd open 1 "${M0}/test"
+EXPECT "2" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "1" count_open "/test"
+TEST tester_send fd close 1
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${CLI} volume set ${V0} read-after-open on
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+EXPECT "test" tester_send fd read 0 64
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST tester_stop
+
+cleanup
diff --git a/tests/basic/open-behind/tester-fd.c b/tests/basic/open-behind/tester-fd.c
new file mode 100644
index 00000000000..00f02bc5b0a
--- /dev/null
+++ b/tests/basic/open-behind/tester-fd.c
@@ -0,0 +1,99 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "tester.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+static int32_t
+fd_open(context_t *ctx, command_t *cmd)
+{
+ obj_t *obj;
+ int32_t fd;
+
+ obj = cmd->args[0].obj.ref;
+
+ fd = open(cmd->args[1].str.data, O_RDWR);
+ if (fd < 0) {
+ return error(errno, "open() failed");
+ }
+
+ obj->type = OBJ_TYPE_FD;
+ obj->fd = fd;
+
+ out_ok("%d", fd);
+
+ return 0;
+}
+
+static int32_t
+fd_close(context_t *ctx, command_t *cmd)
+{
+ obj_t *obj;
+
+ obj = cmd->args[0].obj.ref;
+ obj->type = OBJ_TYPE_NONE;
+
+ if (close(obj->fd) != 0) {
+ return error(errno, "close() failed");
+ }
+
+ out_ok();
+
+ return 0;
+}
+
+static int32_t
+fd_write(context_t *ctx, command_t *cmd)
+{
+ ssize_t len, ret;
+
+ len = strlen(cmd->args[1].str.data);
+ ret = write(cmd->args[0].obj.ref->fd, cmd->args[1].str.data, len);
+ if (ret < 0) {
+ return error(errno, "write() failed");
+ }
+
+ out_ok("%zd", ret);
+
+ return 0;
+}
+
+static int32_t
+fd_read(context_t *ctx, command_t *cmd)
+{
+ char data[cmd->args[1].num.value + 1];
+ ssize_t ret;
+
+ ret = read(cmd->args[0].obj.ref->fd, data, cmd->args[1].num.value);
+ if (ret < 0) {
+ return error(errno, "read() failed");
+ }
+
+ data[ret] = 0;
+
+ out_ok("%zd %s", ret, data);
+
+ return 0;
+}
+
+command_t fd_commands[] = {
+ {"open", fd_open, CMD_ARGS(ARG_VAL(OBJ_TYPE_NONE), ARG_STR(1024))},
+ {"close", fd_close, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD))},
+ {"write", fd_write, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_STR(1024))},
+ {"read", fd_read, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_NUM(0, 1024))},
+ CMD_END};
diff --git a/tests/basic/open-behind/tester.c b/tests/basic/open-behind/tester.c
new file mode 100644
index 00000000000..b2da71c8385
--- /dev/null
+++ b/tests/basic/open-behind/tester.c
@@ -0,0 +1,444 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "tester.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+static void *
+mem_alloc(size_t size)
+{
+ void *ptr;
+
+ ptr = malloc(size);
+ if (ptr == NULL) {
+ error(ENOMEM, "Failed to allocate memory (%zu bytes)", size);
+ }
+
+ return ptr;
+}
+
+static void
+mem_free(void *ptr)
+{
+ free(ptr);
+}
+
+static bool
+buffer_create(context_t *ctx, size_t size)
+{
+ ctx->buffer.base = mem_alloc(size);
+ if (ctx->buffer.base == NULL) {
+ return false;
+ }
+
+ ctx->buffer.size = size;
+ ctx->buffer.len = 0;
+ ctx->buffer.pos = 0;
+
+ return true;
+}
+
+static void
+buffer_destroy(context_t *ctx)
+{
+ mem_free(ctx->buffer.base);
+ ctx->buffer.size = 0;
+ ctx->buffer.len = 0;
+}
+
+static int32_t
+buffer_get(context_t *ctx)
+{
+ ssize_t len;
+
+ if (ctx->buffer.pos >= ctx->buffer.len) {
+ len = read(0, ctx->buffer.base, ctx->buffer.size);
+ if (len < 0) {
+ return error(errno, "read() failed");
+ }
+ if (len == 0) {
+ return 0;
+ }
+
+ ctx->buffer.len = len;
+ ctx->buffer.pos = 0;
+ }
+
+ return ctx->buffer.base[ctx->buffer.pos++];
+}
+
+static int32_t
+str_skip_spaces(context_t *ctx, int32_t current)
+{
+ while ((current > 0) && (current != '\n') && isspace(current)) {
+ current = buffer_get(ctx);
+ }
+
+ return current;
+}
+
+static int32_t
+str_token(context_t *ctx, char *buffer, uint32_t size, int32_t current)
+{
+ uint32_t len;
+
+ current = str_skip_spaces(ctx, current);
+
+ len = 0;
+ while ((size > 0) && (current > 0) && (current != '\n') &&
+ !isspace(current)) {
+ len++;
+ *buffer++ = current;
+ size--;
+ current = buffer_get(ctx);
+ }
+
+ if (len == 0) {
+ return error(ENODATA, "Expecting a token");
+ }
+
+ if (size == 0) {
+ return error(ENOBUFS, "Token too long");
+ }
+
+ *buffer = 0;
+
+ return current;
+}
+
+static int32_t
+str_number(context_t *ctx, uint64_t min, uint64_t max, uint64_t *value,
+ int32_t current)
+{
+ char text[32], *ptr;
+ uint64_t num;
+
+ current = str_token(ctx, text, sizeof(text), current);
+ if (current > 0) {
+ num = strtoul(text, &ptr, 0);
+ if ((*ptr != 0) || (num < min) || (num > max)) {
+ return error(ERANGE, "Invalid number");
+ }
+ *value = num;
+ }
+
+ return current;
+}
+
+static int32_t
+str_eol(context_t *ctx, int32_t current)
+{
+ current = str_skip_spaces(ctx, current);
+ if (current != '\n') {
+ return error(EINVAL, "Expecting end of command");
+ }
+
+ return current;
+}
+
+static void
+str_skip(context_t *ctx, int32_t current)
+{
+ while ((current > 0) && (current != '\n')) {
+ current = buffer_get(ctx);
+ }
+}
+
+static int32_t
+cmd_parse_obj(context_t *ctx, arg_t *arg, int32_t current)
+{
+ obj_t *obj;
+ uint64_t id;
+
+ current = str_number(ctx, 0, ctx->obj_count, &id, current);
+ if (current <= 0) {
+ return current;
+ }
+
+ obj = &ctx->objs[id];
+ if (obj->type != arg->obj.type) {
+ if (obj->type != OBJ_TYPE_NONE) {
+ return error(EBUSY, "Object is in use");
+ }
+ return error(ENOENT, "Object is not defined");
+ }
+
+ arg->obj.ref = obj;
+
+ return current;
+}
+
+static int32_t
+cmd_parse_num(context_t *ctx, arg_t *arg, int32_t current)
+{
+ return str_number(ctx, arg->num.min, arg->num.max, &arg->num.value,
+ current);
+}
+
+static int32_t
+cmd_parse_str(context_t *ctx, arg_t *arg, int32_t current)
+{
+ return str_token(ctx, arg->str.data, arg->str.size, current);
+}
+
+static int32_t
+cmd_parse_args(context_t *ctx, command_t *cmd, int32_t current)
+{
+ arg_t *arg;
+
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_OBJ:
+ current = cmd_parse_obj(ctx, arg, current);
+ break;
+ case ARG_TYPE_NUM:
+ current = cmd_parse_num(ctx, arg, current);
+ break;
+ case ARG_TYPE_STR:
+ current = cmd_parse_str(ctx, arg, current);
+ break;
+ default:
+ return error(EINVAL, "Unknown argument type");
+ }
+ }
+
+ if (current < 0) {
+ return current;
+ }
+
+ current = str_eol(ctx, current);
+ if (current <= 0) {
+ return error(EINVAL, "Syntax error");
+ }
+
+ return cmd->handler(ctx, cmd);
+}
+
+static int32_t
+cmd_parse(context_t *ctx, command_t *cmds)
+{
+ char text[32];
+ command_t *cmd;
+ int32_t current;
+
+ cmd = cmds;
+ do {
+ current = str_token(ctx, text, sizeof(text), buffer_get(ctx));
+ if (current <= 0) {
+ return current;
+ }
+
+ while (cmd->name != NULL) {
+ if (strcmp(cmd->name, text) == 0) {
+ if (cmd->handler != NULL) {
+ return cmd_parse_args(ctx, cmd, current);
+ }
+ cmd = cmd->cmds;
+ break;
+ }
+ cmd++;
+ }
+ } while (cmd->name != NULL);
+
+ str_skip(ctx, current);
+
+ return error(ENOTSUP, "Unknown command");
+}
+
+static void
+cmd_fini(context_t *ctx, command_t *cmds)
+{
+ command_t *cmd;
+ arg_t *arg;
+
+ for (cmd = cmds; cmd->name != NULL; cmd++) {
+ if (cmd->handler == NULL) {
+ cmd_fini(ctx, cmd->cmds);
+ } else {
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_STR:
+ mem_free(arg->str.data);
+ arg->str.data = NULL;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+static bool
+cmd_init(context_t *ctx, command_t *cmds)
+{
+ command_t *cmd;
+ arg_t *arg;
+
+ for (cmd = cmds; cmd->name != NULL; cmd++) {
+ if (cmd->handler == NULL) {
+ if (!cmd_init(ctx, cmd->cmds)) {
+ return false;
+ }
+ } else {
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_STR:
+ arg->str.data = mem_alloc(arg->str.size);
+ if (arg->str.data == NULL) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+objs_create(context_t *ctx, uint32_t count)
+{
+ uint32_t i;
+
+ ctx->objs = mem_alloc(sizeof(obj_t) * count);
+ if (ctx->objs == NULL) {
+ return false;
+ }
+ ctx->obj_count = count;
+
+ for (i = 0; i < count; i++) {
+ ctx->objs[i].type = OBJ_TYPE_NONE;
+ }
+
+ return true;
+}
+
+static int32_t
+objs_destroy(context_t *ctx)
+{
+ uint32_t i;
+ int32_t err;
+
+ err = 0;
+ for (i = 0; i < ctx->obj_count; i++) {
+ if (ctx->objs[i].type != OBJ_TYPE_NONE) {
+ err = error(ENOTEMPTY, "Objects not destroyed");
+ break;
+ }
+ }
+
+ mem_free(ctx->objs);
+ ctx->objs = NULL;
+ ctx->obj_count = 0;
+
+ return err;
+}
+
+static context_t *
+init(size_t size, uint32_t objs, command_t *cmds)
+{
+ context_t *ctx;
+
+ ctx = mem_alloc(sizeof(context_t));
+ if (ctx == NULL) {
+ goto failed;
+ }
+
+ if (!buffer_create(ctx, size)) {
+ goto failed_ctx;
+ }
+
+ if (!objs_create(ctx, objs)) {
+ goto failed_buffer;
+ }
+
+ if (!cmd_init(ctx, cmds)) {
+ goto failed_objs;
+ }
+
+ ctx->active = true;
+
+ return ctx;
+
+failed_objs:
+ cmd_fini(ctx, cmds);
+ objs_destroy(ctx);
+failed_buffer:
+ buffer_destroy(ctx);
+failed_ctx:
+ mem_free(ctx);
+failed:
+ return NULL;
+}
+
+static int32_t
+fini(context_t *ctx, command_t *cmds)
+{
+ int32_t ret;
+
+ cmd_fini(ctx, cmds);
+ buffer_destroy(ctx);
+
+ ret = objs_destroy(ctx);
+
+ ctx->active = false;
+
+ return ret;
+}
+
+static int32_t
+exec_quit(context_t *ctx, command_t *cmd)
+{
+ ctx->active = false;
+
+ return 0;
+}
+
+static command_t commands[] = {{"fd", NULL, CMD_SUB(fd_commands)},
+ {"quit", exec_quit, CMD_ARGS()},
+ CMD_END};
+
+int32_t
+main(int32_t argc, char *argv[])
+{
+ context_t *ctx;
+ int32_t res;
+
+ ctx = init(1024, 16, commands);
+ if (ctx == NULL) {
+ return 1;
+ }
+
+ do {
+ res = cmd_parse(ctx, commands);
+ if (res < 0) {
+ out_err(-res);
+ }
+ } while (ctx->active);
+
+ res = fini(ctx, commands);
+ if (res >= 0) {
+ out_ok();
+ return 0;
+ }
+
+ out_err(-res);
+
+ return 1;
+}
diff --git a/tests/basic/open-behind/tester.h b/tests/basic/open-behind/tester.h
new file mode 100644
index 00000000000..64e940c78fc
--- /dev/null
+++ b/tests/basic/open-behind/tester.h
@@ -0,0 +1,145 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __TESTER_H__
+#define __TESTER_H__
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+enum _obj_type;
+typedef enum _obj_type obj_type_t;
+
+enum _arg_type;
+typedef enum _arg_type arg_type_t;
+
+struct _buffer;
+typedef struct _buffer buffer_t;
+
+struct _obj;
+typedef struct _obj obj_t;
+
+struct _context;
+typedef struct _context context_t;
+
+struct _arg;
+typedef struct _arg arg_t;
+
+struct _command;
+typedef struct _command command_t;
+
+enum _obj_type { OBJ_TYPE_NONE, OBJ_TYPE_FD };
+
+enum _arg_type { ARG_TYPE_NONE, ARG_TYPE_OBJ, ARG_TYPE_NUM, ARG_TYPE_STR };
+
+struct _buffer {
+ char *base;
+ uint32_t size;
+ uint32_t len;
+ uint32_t pos;
+};
+
+struct _obj {
+ obj_type_t type;
+ union {
+ int32_t fd;
+ };
+};
+
+struct _context {
+ obj_t *objs;
+ buffer_t buffer;
+ uint32_t obj_count;
+ bool active;
+};
+
+struct _arg {
+ arg_type_t type;
+ union {
+ struct {
+ obj_type_t type;
+ obj_t *ref;
+ } obj;
+ struct {
+ uint64_t value;
+ uint64_t min;
+ uint64_t max;
+ } num;
+ struct {
+ uint32_t size;
+ char *data;
+ } str;
+ };
+};
+
+struct _command {
+ const char *name;
+ int32_t (*handler)(context_t *ctx, command_t *cmd);
+ union {
+ arg_t *args;
+ command_t *cmds;
+ };
+};
+
+#define msg(_stream, _fmt, _args...) \
+ do { \
+ fprintf(_stream, _fmt "\n", ##_args); \
+ fflush(_stream); \
+ } while (0)
+
+#define msg_out(_fmt, _args...) msg(stdout, _fmt, ##_args)
+#define msg_err(_err, _fmt, _args...) \
+ ({ \
+ int32_t __msg_err = (_err); \
+ msg(stderr, "[%4u:%-15s] " _fmt, __LINE__, __FUNCTION__, __msg_err, \
+ ##_args); \
+ -__msg_err; \
+ })
+
+#define error(_err, _fmt, _args...) msg_err(_err, "E(%4d) " _fmt, ##_args)
+#define warn(_err, _fmt, _args...) msg_err(_err, "W(%4d) " _fmt, ##_args)
+#define info(_err, _fmt, _args...) msg_err(_err, "I(%4d) " _fmt, ##_args)
+
+#define out_ok(_args...) msg_out("OK " _args)
+#define out_err(_err) msg_out("ERR %d", _err)
+
+#define ARG_END \
+ { \
+ ARG_TYPE_NONE \
+ }
+
+#define CMD_ARGS1(_x, _args...) \
+ .args = (arg_t[]) { _args }
+#define CMD_ARGS(_args...) CMD_ARGS1(, ##_args, ARG_END)
+
+#define CMD_SUB(_cmds) .cmds = _cmds
+
+#define CMD_END \
+ { \
+ NULL, NULL, CMD_SUB(NULL) \
+ }
+
+#define ARG_VAL(_type) \
+ { \
+ ARG_TYPE_OBJ, .obj = {.type = _type } \
+ }
+#define ARG_NUM(_min, _max) \
+ { \
+ ARG_TYPE_NUM, .num = {.min = _min, .max = _max } \
+ }
+#define ARG_STR(_size) \
+ { \
+ ARG_TYPE_STR, .str = {.size = _size } \
+ }
+
+extern command_t fd_commands[];
+
+#endif /* __TESTER_H__ */ \ No newline at end of file
diff --git a/tests/basic/posix/shared-statfs.t b/tests/basic/posix/shared-statfs.t
index 33439562ec9..0e4a1bb409f 100644
--- a/tests/basic/posix/shared-statfs.t
+++ b/tests/basic/posix/shared-statfs.t
@@ -20,15 +20,18 @@ TEST mkdir -p $B0/${V0}1 $B0/${V0}2
TEST MOUNT_LOOP $LO1 $B0/${V0}1
TEST MOUNT_LOOP $LO2 $B0/${V0}2
+total_brick_blocks=$(df -P $B0/${V0}1 $B0/${V0}2 | tail -2 | awk '{sum = sum+$2}END{print sum}')
+#Account for rounding error
+brick_blocks_two_percent_less=$((total_brick_blocks*98/100))
# Create a subdir in mountpoint and use that for volume.
TEST $CLI volume create $V0 $H0:$B0/${V0}1/1 $H0:$B0/${V0}2/1;
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
+total_mount_blocks=$(df -P $M0 | tail -1 | awk '{ print $2}')
# Keeping the size less than 200M mainly because XFS will use
# some storage in brick to keep its own metadata.
-TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
+TEST [ $total_mount_blocks -gt $brick_blocks_two_percent_less -a $total_mount_blocks -lt 200000 ]
TEST force_umount $M0
@@ -41,8 +44,8 @@ TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1/2 $H0:$B0/${V0}2/2 $H0:$B0/${V0}1/
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
-TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
+total_mount_blocks=$(df -P $M0 | tail -1 | awk '{ print $2}')
+TEST [ $total_mount_blocks -gt $brick_blocks_two_percent_less -a $total_mount_blocks -lt 200000 ]
TEST force_umount $M0
TEST $CLI volume stop $V0
diff --git a/tests/basic/posix/zero-fill-enospace.c b/tests/basic/posix/zero-fill-enospace.c
index 1371ff59a5f..b1f142c6be9 100644
--- a/tests/basic/posix/zero-fill-enospace.c
+++ b/tests/basic/posix/zero-fill-enospace.c
@@ -1,4 +1,5 @@
#include <stdio.h>
+#include <stdlib.h>
#include <glusterfs/api/glfs.h>
#include <glusterfs/api/glfs-handles.h>
@@ -8,7 +9,7 @@ main(int argc, char *argv[])
glfs_t *fs = NULL;
glfs_fd_t *fd = NULL;
int ret = 1;
- int size = 0;
+ off_t size = 0;
if (argc != 6) {
fprintf(stderr,
@@ -45,12 +46,12 @@ main(int argc, char *argv[])
goto out;
}
- size = atoi(argv[5]);
+ size = strtol(argv[5], NULL, 10);
if (size < 0) {
fprintf(stderr, "Wrong size %s", argv[5]);
goto out;
}
- ret = glfs_zerofill(fd, 0, atoi(argv[5]));
+ ret = glfs_zerofill(fd, 0, size);
if (ret <= 0) {
fprintf(stderr, "glfs_zerofill: returned %d\n", ret);
goto out;
diff --git a/tests/basic/quick-read-with-upcall.t b/tests/basic/quick-read-with-upcall.t
index 0eab8e1a9f6..dfb751dfcdb 100644
--- a/tests/basic/quick-read-with-upcall.t
+++ b/tests/basic/quick-read-with-upcall.t
@@ -15,8 +15,8 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
TEST $CLI volume start $V0
# Mount FUSE without selinux:
-TEST glusterfs -s $H0 --volfile-id $V0 $M0;
-TEST glusterfs -s $H0 --volfile-id $V0 $M1;
+TEST glusterfs -s $H0 --volfile-id $V0 --direct-io-mode=enable $M0;
+TEST glusterfs -s $H0 --volfile-id $V0 --direct-io-mode=enable $M1;
D0="test-message0";
D1="test-message1";
@@ -37,16 +37,13 @@ TEST write_to "$M0/test.txt" "$D1"
EXPECT "$D1" cat $M0/test.txt
EXPECT "$D0" cat $M1/test.txt
-# This is 3.7 for no good reason. We could have kept this to
-# any number above 2 seconds. Noticed that when it is 2 seconds, or
-# less, there is a possibility of not getting a lookup on the same inode.
-sleep 3.7
+sleep 1
# TODO: This line normally fails
EXPECT "$D1" cat $M1/test.txt
TEST $CLI volume set $V0 features.cache-invalidation on
-TEST $CLI volume set $V0 performance.qr-cache-timeout 15
+TEST $CLI volume set $V0 performance.quick-read-cache-timeout 15
TEST $CLI volume set $V0 performance.md-cache-timeout 15
TEST write_to "$M0/test1.txt" "$D0"
@@ -63,6 +60,7 @@ EXPECT "$D0" cat $M1/test1.txt
sleep 30
EXPECT "$D1" cat $M1/test1.txt
+TEST $CLI volume set $V0 performance.quick-read-cache-invalidation on
TEST $CLI volume set $V0 performance.cache-invalidation on
TEST write_to "$M0/test2.txt" "$D0"
@@ -72,7 +70,3 @@ EXPECT "$D0" cat $M1/test2.txt
TEST write_to "$M0/test2.txt" "$D1"
EXPECT "$D1" cat $M0/test2.txt
EXPECT "$D1" cat $M1/test2.txt
-
-
-#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1718191
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1718191
diff --git a/tests/basic/ec/seek.c b/tests/basic/seek.c
index 54fa6f463af..54fa6f463af 100644
--- a/tests/basic/ec/seek.c
+++ b/tests/basic/seek.c
diff --git a/tests/basic/shd-mux-afr.t b/tests/basic/shd-mux-afr.t
new file mode 100644
index 00000000000..cf300c148bb
--- /dev/null
+++ b/tests/basic/shd-mux-afr.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Create a one more volume
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
+TEST $CLI volume start ${V0}_1
+
+#Check whether the shd has multiplexed or not
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
+
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
+
+TEST touch $M0/foo{1..100}
+TEST touch $M1/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
+
+TEST $CLI volume start ${V0} force
+TEST $CLI volume start ${V0}_1 force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
+
+TEST rm -rf $M0/*
+TEST rm -rf $M1/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+#Stop the volume
+TEST $CLI volume stop ${V0}_1
+TEST $CLI volume delete ${V0}_1
+
+#Check the stop succeeded and detached the volume with out restarting it
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+
+#Check the thread count become to earlier number after stopping
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+cleanup
diff --git a/tests/basic/shd-mux-ec.t b/tests/basic/shd-mux-ec.t
new file mode 100644
index 00000000000..ef4d65018d3
--- /dev/null
+++ b/tests/basic/shd-mux-ec.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Now create a ec volume and check mux works
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
+TEST $CLI volume start ${V0}_2
+
+#Check whether the shd has multiplexed or not
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
+
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
+
+TEST touch $M0/foo{1..100}
+TEST touch $M1/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
+
+TEST $CLI volume start ${V0} force
+TEST $CLI volume start ${V0}_2 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
+
+TEST rm -rf $M0/*
+TEST rm -rf $M1/*
+
+
+#Stop the volume
+TEST $CLI volume stop ${V0}_2
+TEST $CLI volume delete ${V0}_2
+
+#Check the stop succeeded and detached the volume with out restarting it
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+
+#Check the thread count become to zero for ec related threads
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to earlier number after stopping
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
deleted file mode 100644
index e42a34ab1f7..00000000000
--- a/tests/basic/shd-mux.t
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TESTS_EXPECTED_IN_LOOP=16
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
-TEST $CLI volume set $V0 cluster.eager-lock off
-TEST $CLI volume set $V0 performance.flush-behind off
-TEST $CLI volume start $V0
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
-
-shd_pid=$(get_shd_mux_pid $V0)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-#Create a one more volume
-TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
-TEST $CLI volume start ${V0}_1
-
-#Check whether the shd has multiplexed or not
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
-
-TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
-TEST $CLI volume set ${V0}_1 cluster.eager-lock off
-TEST $CLI volume set ${V0}_1 performance.flush-behind off
-TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
-
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST kill_brick $V0 $H0 $B0/${V0}4
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
-
-TEST touch $M0/foo{1..100}
-TEST touch $M1/foo{1..100}
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
-
-TEST $CLI volume start ${V0} force
-TEST $CLI volume start ${V0}_1 force
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
-
-TEST rm -rf $M0/*
-TEST rm -rf $M1/*
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
-
-#Stop the volume
-TEST $CLI volume stop ${V0}_1
-TEST $CLI volume delete ${V0}_1
-
-#Check the stop succeeded and detached the volume with out restarting it
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
-
-#Check the thread count become to earlier number after stopping
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-
-#Now create a ec volume and check mux works
-TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
-TEST $CLI volume start ${V0}_2
-
-#Check whether the shd has multiplexed or not
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
-
-TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
-TEST $CLI volume set ${V0}_2 cluster.eager-lock off
-TEST $CLI volume set ${V0}_2 performance.flush-behind off
-TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
-
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST kill_brick $V0 $H0 $B0/${V0}4
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
-
-TEST touch $M0/foo{1..100}
-TEST touch $M1/foo{1..100}
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
-
-TEST $CLI volume start ${V0} force
-TEST $CLI volume start ${V0}_2 force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
-
-TEST rm -rf $M0/*
-TEST rm -rf $M1/*
-
-
-#Stop the volume
-TEST $CLI volume stop ${V0}_2
-TEST $CLI volume delete ${V0}_2
-
-#Check the stop succeeded and detached the volume with out restarting it
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
-
-#Check the thread count become to zero for ec related threads
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to earlier number after stopping
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-for i in $(seq 1 3); do
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_afr$i
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_ec$i
-done
-
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-#Delete the volumes
-for i in $(seq 1 3); do
- TEST $CLI volume stop ${V0}_afr$i
- TEST $CLI volume stop ${V0}_ec$i
- TEST $CLI volume delete ${V0}_afr$i
- TEST $CLI volume delete ${V0}_ec$i
-done
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $CLI volume stop ${V0}
-TEST $CLI volume delete ${V0}
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
-
-cleanup
diff --git a/tests/basic/trace.t b/tests/basic/trace.t
index 3153222df8d..01e7c9e0a25 100755
--- a/tests/basic/trace.t
+++ b/tests/basic/trace.t
@@ -26,8 +26,30 @@ TEST $(dirname $0)/rpc-coverage.sh --no-locks $M0
# Take statedump to get maximum code coverage
pid=$(ps auxww | grep glusterfs | grep -E "template.vol" | awk '{print $2}' | head -1)
+
TEST generate_statedump $pid
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+# Now, use the glusterd way of enabling trace
+TEST glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume set $V0 debug.trace marker
+TEST $CLI volume set $V0 debug.log-file yes
+#TEST $CLI volume set $V0 debug.log-history yes
+
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+TEST $(dirname $0)/rpc-coverage.sh --no-locks $M1
+cp $(dirname ${0})/gfapi/glfsxmp-coverage.c ./glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+./glfsxmp $V0 $H0 > /dev/null
+cleanup_tester ./glfsxmp
+rm ./glfsxmp.c
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
cleanup;
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
index 89b833d5ddc..102de22468e 100644
--- a/tests/basic/volume-scale-shd-mux.t
+++ b/tests/basic/volume-scale-shd-mux.t
@@ -23,34 +23,32 @@ for i in $(seq 1 2); do
done
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "afr_shd_index_healer"
#Remove the brick and check the detach is successful
$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
#Remove the brick and check the detach is successful
$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
for i in $(seq 1 2); do
@@ -58,7 +56,7 @@ for i in $(seq 1 2); do
TEST $CLI volume stop ${V0}_ec$i
done
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
@@ -80,28 +78,31 @@ TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
TEST $CLI volume start ${V0}_distribute1
#Creating a non-replicate/non-ec volume should not have any effect in shd
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
TEST mkdir $B0/add/
#Now convert the distributed volume to replicate
TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "afr_shd_index_healer"
#scale down the volume
TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Before stopping the process, make sure there is no pending clenup threads hanging
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
TEST $CLI volume stop ${V0}
TEST $CLI volume delete ${V0}
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
-TEST rm -rf $B0/add/
-TEST mkdir $B0/add/
+TEST rm -rf $B0/add/2 $B0/add/3
+
#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "afr_shd_index_healer"
#Now convert the replica volume to distribute again and make sure the shd is now stopped
TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
diff --git a/tests/basic/volume-snap-scheduler.t b/tests/basic/volume-snap-scheduler.t
new file mode 100644
index 00000000000..a638c5cc46a
--- /dev/null
+++ b/tests/basic/volume-snap-scheduler.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $V0
+
+## Create, start and mount meta_volume as
+## snap_scheduler expects shared storage to be enabled.
+## This test is very basic in nature not creating any snapshot
+## and purpose is to validate snap scheduling commands.
+
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##function to check status
+function check_status_scheduler()
+{
+ local key=$1
+ snap_scheduler.py status | grep -F "$key" | wc -l
+}
+
+##Basic snap_scheduler command test init/enable/disable/list
+
+TEST snap_scheduler.py init
+
+TEST snap_scheduler.py enable
+
+EXPECT 1 check_status_scheduler "Enabled"
+
+TEST snap_scheduler.py disable
+
+EXPECT 1 check_status_scheduler "Disabled"
+
+TEST snap_scheduler.py list
+
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/basic/volume-snapshot-xml.t b/tests/basic/volume-snapshot-xml.t
index 3ba25f4ddbb..ff63b54538d 100755
--- a/tests/basic/volume-snapshot-xml.t
+++ b/tests/basic/volume-snapshot-xml.t
@@ -1,13 +1,9 @@
#!/bin/bash
. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
. $(dirname $0)/../snapshot.rc
-function get-xml()
-{
- $CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
-}
-
cleanup;
TEST verify_lvm_version;
TEST glusterd;
diff --git a/tests/basic/volume.t b/tests/basic/volume.t
index 35ad55c3c5c..27fe093d07d 100755..100644
--- a/tests/basic/volume.t
+++ b/tests/basic/volume.t
@@ -11,7 +11,6 @@ TEST $CLI volume info;
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
-
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
EXPECT '6' brick_count $V0
@@ -25,10 +24,37 @@ EXPECT '9' brick_count $V0
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{1,2,3} force;
EXPECT '6' brick_count $V0
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume top $V0 read-perf bs 4096 count 1000
+TEST $CLI volume top $V0 write-perf bs 1048576 count 2
+
+TEST touch $M0/foo
+
+# statedump path should be a directory, setting it to a file path should fail
+
+TEST ! $CLI v set $V0 server.statedump-path $M0/foo;
+EXPECT '/var/run/gluster' $CLI v get $V0 server.statedump-path
+
+#set the statedump path to an existing ditectory which should succeed
+TEST mkdir $D0/level;
+TEST $CLI v set $V0 server.statedump-path $D0/level
+EXPECT '/level' volinfo_field $V0 'server.statedump-path'
+
+ret=$(ls $D0/level | wc -l);
+TEST [ $ret == 0 ]
+TEST $CLI v statedump $V0;
+ret=$(ls $D0/level | wc -l);
+TEST ! [ $ret == 0 ]
+
+#set the statedump path to a non - existing directory which should fail
+TEST ! $CLI v set $V0 server.statedump-path /root/test
+EXPECT '/level' volinfo_field $V0 'server.statedump-path'
+
+TEST rm -rf $D0/level
+
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status'
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
+TEST $CLI volume delete $V0
+TEST ! $CLI volume info $V0
cleanup;
diff --git a/tests/bitrot/br-signer-threads-config-1797869.t b/tests/bitrot/br-signer-threads-config-1797869.t
new file mode 100644
index 00000000000..657ef3eedaf
--- /dev/null
+++ b/tests/bitrot/br-signer-threads-config-1797869.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
+
+function get_bitd_count_1 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H1 | wc -l
+}
+
+function get_bitd_count_2 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H2 | wc -l
+}
+
+function get_bitd_pid_1 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H1 | awk '{print $2}'
+}
+
+function get_bitd_pid_2 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H2 | awk '{print $2}'
+}
+
+function get_signer_th_count_1 {
+ ps -eL | grep $(get_bitd_pid_1) | grep glfs_brpobj | wc -l
+}
+
+function get_signer_th_count_2 {
+ ps -eL | grep $(get_bitd_pid_2) | grep glfs_brpobj | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+TEST $CLI_1 volume create $V0 $H1:$B1
+TEST $CLI_1 volume create $V1 $H2:$B2
+EXPECT 'Created' volinfo_field_1 $V0 'Status';
+EXPECT 'Created' volinfo_field_1 $V1 'Status';
+
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume start $V1
+EXPECT 'Started' volinfo_field_1 $V0 'Status';
+EXPECT 'Started' volinfo_field_1 $V1 'Status';
+
+#Enable bitrot
+TEST $CLI_1 volume bitrot $V0 enable
+TEST $CLI_1 volume bitrot $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count_2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_2
+
+old_bitd_pid_1=$(get_bitd_pid_1)
+old_bitd_pid_2=$(get_bitd_pid_2)
+TEST $CLI_1 volume bitrot $V0 signer-threads 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_signer_th_count_1
+EXPECT_NOT "$old_bitd_pid_1" get_bitd_pid_1;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_2
+EXPECT "$old_bitd_pid_2" get_bitd_pid_2;
+
+old_bitd_pid_1=$(get_bitd_pid_1)
+old_bitd_pid_2=$(get_bitd_pid_2)
+TEST $CLI_1 volume bitrot $V1 signer-threads 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_signer_th_count_2
+EXPECT_NOT "$old_bitd_pid_2" get_bitd_pid_2;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_signer_th_count_1
+EXPECT "$old_bitd_pid_1" get_bitd_pid_1;
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1227996.t b/tests/bugs/bitrot/bug-1227996.t
index 47ebc4235cf..121c7b5f279 100644
--- a/tests/bugs/bitrot/bug-1227996.t
+++ b/tests/bugs/bitrot/bug-1227996.t
@@ -17,7 +17,6 @@ TEST pidof glusterd;
## Lets create and start the volume
TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
TEST $CLI volume start $V0
-
## Enable bitrot on volume $V0
TEST $CLI volume bitrot $V0 enable
diff --git a/tests/bugs/bitrot/bug-1245981.t b/tests/bugs/bitrot/bug-1245981.t
index 2bed4d980fa..f3955256b01 100644
--- a/tests/bugs/bitrot/bug-1245981.t
+++ b/tests/bugs/bitrot/bug-1245981.t
@@ -47,9 +47,9 @@ touch $M0/5
sleep `expr $SLEEP_TIME \* 2`
backpath=$(get_backend_paths $fname)
-TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
backpath=$(get_backend_paths $M0/new_file)
-TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
cleanup;
diff --git a/tests/bugs/bug-1064147.t b/tests/bugs/bug-1064147.t
new file mode 100755
index 00000000000..27ffde4eb44
--- /dev/null
+++ b/tests/bugs/bug-1064147.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+#------------------------------------------------------------
+
+# Test case 1 - Subvolume down + Healing
+#------------------------------------------------------------
+# Kill 2nd brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+
+# Change root permissions
+TEST chmod 444 $M0
+
+# Store permission for comparision
+TEST permission_new=`stat -c "%A" $M0`
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check brick permissions
+TEST brick_perm=`stat -c "%A" $B0/${V0}2`
+TEST [ ${brick_perm} = ${permission_new} ]
+#------------------------------------------------------------
+
+# Test case 2 - Add-brick + Healing
+#------------------------------------------------------------
+# Change root permissions
+TEST chmod 777 $M0
+
+# Store permission for comparision
+TEST permission_new_2=`stat -c "%A" $M0`
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check permissions on the new brick
+TEST brick_perm2=`stat -c "%A" $B0/${V0}3`
+
+TEST [ ${brick_perm2} = ${permission_new_2} ]
+
+cleanup;
diff --git a/tests/bugs/bug-1371806.t b/tests/bugs/bug-1371806.t
index 7dc1613a4f2..08180525650 100644
--- a/tests/bugs/bug-1371806.t
+++ b/tests/bugs/bug-1371806.t
@@ -28,6 +28,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume start $V0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
diff --git a/tests/bugs/bug-1371806_acl.t b/tests/bugs/bug-1371806_acl.t
index a2e5af49bb0..c39165628cc 100644
--- a/tests/bugs/bug-1371806_acl.t
+++ b/tests/bugs/bug-1371806_acl.t
@@ -39,6 +39,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
TEST $CLI volume start $V0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
TEST glusterfs --volfile-id=$V0 --acl --volfile-server=$H0 --entry-timeout=0 $M0;
diff --git a/tests/bugs/bug-1620580.t b/tests/bugs/bug-1620580.t
new file mode 100644
index 00000000000..0c74d4a6089
--- /dev/null
+++ b/tests/bugs/bug-1620580.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+#do some operation on mount, so that kill_brick is guaranteed to be
+#done _after_ first lookup on root
+
+TEST ls $M0
+TEST touch $M0/file
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+# Case of Same volume name, but different bricks
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{3,4};
+TEST $CLI volume start $V0;
+
+# Give time for 'reconnect' to happen
+sleep 4
+
+TEST ! ls $M0
+TEST ! touch $M0/file1
+
+# Case of Same brick, but different volume (ie, recreated).
+TEST $CLI volume create $V1 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V1;
+
+# Give time for 'reconnect' to happen
+sleep 4
+TEST ! ls $M0
+TEST ! touch $M0/file2
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1
+
+# Case of Same brick, but different volume (but same volume name)
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0;
+
+# Give time for 'reconnect' to happen
+sleep 4
+TEST ! ls $M0
+TEST ! touch $M0/file3
+
+
+cleanup
diff --git a/tests/bugs/bug-1694920.t b/tests/bugs/bug-1694920.t
new file mode 100644
index 00000000000..5bf93c92f94
--- /dev/null
+++ b/tests/bugs/bug-1694920.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=300
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0};
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+TEST $GFS -s $H0 --volfile-id=$V0 $M0;
+
+TEST touch $M0/a
+
+#When all bricks are up, lock and unlock should succeed
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST fd_close $fd1
+
+#When all bricks are down, lock/unlock should fail
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST $CLI volume stop $V0
+TEST ! flock -x $fd1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST fd_close $fd1
+
+#When a brick goes down and comes back up operations on fd which had locks on it should succeed by default
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST $CLI volume stop $V0
+sleep 2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST fd_write $fd1 "data"
+TEST fd_close $fd1
+
+#When a brick goes down and comes back up operations on fd which had locks on it should fail when client.strict-locks is on
+TEST $CLI volume set $V0 client.strict-locks on
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST $CLI volume stop $V0
+sleep 2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST ! fd_write $fd1 "data"
+TEST fd_close $fd1
+
+cleanup
diff --git a/tests/bugs/cli/bug-1320388.t b/tests/bugs/cli/bug-1320388.t
index f5ffcbe082a..e719fc59033 100755
--- a/tests/bugs/cli/bug-1320388.t
+++ b/tests/bugs/cli/bug-1320388.t
@@ -21,7 +21,7 @@ cleanup;
rm -f $SSL_BASE/glusterfs.*
touch "$GLUSTERD_WORKDIR"/secure-access
-TEST openssl genrsa -out $SSL_KEY 1024
+TEST openssl genrsa -out $SSL_KEY 2048
TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
ln $SSL_CERT $SSL_CA
diff --git a/tests/bugs/ctime/issue-832.t b/tests/bugs/ctime/issue-832.t
new file mode 100755
index 00000000000..740f731ab73
--- /dev/null
+++ b/tests/bugs/ctime/issue-832.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+
+#Trigger trusted.glusterfs.mdata setting codepath and see things work as expected
+cleanup
+
+TEST_USER=test-ctime-user
+TEST_UID=27341
+
+TEST useradd -o -M -u ${TEST_UID} ${TEST_USER}
+push_trapfunc "userdel --force ${TEST_USER}"
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+
+$GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+echo abc > $M0/test
+TEST chmod 755 $M0/
+TEST chmod 744 $M0/test
+TEST setfattr -x trusted.glusterfs.mdata $B0/$V0/test
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+$GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+su ${TEST_USER} -c "cat $M0/test"
+TEST getfattr -n trusted.glusterfs.mdata $B0/$V0/test
+
+cleanup
diff --git a/tests/bugs/distribute/bug-1600379.t b/tests/bugs/distribute/bug-1600379.t
new file mode 100644
index 00000000000..8d2f6154100
--- /dev/null
+++ b/tests/bugs/distribute/bug-1600379.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+#------------------------------------------------------------
+
+# Test case - Remove xattr from killed brick on lookup
+#------------------------------------------------------------
+# Create a dir and set custom xattr
+TEST mkdir $M0/testdir
+TEST setfattr -n user.attr -v val $M0/testdir
+xattr_val=`getfattr -d $B0/${V0}2/testdir | awk '{print $1}'`;
+TEST ${xattr_val}='user.attr="val"';
+
+# Kill 2nd brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+
+# Remove custom xattr
+TEST setfattr -x user.attr $M0/testdir
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0/testdir
+
+# Check brick xattrs
+xattr_val_2=`getfattr -d $B0/${V0}2/testdir`;
+TEST [ ${xattr_val_2} = ''] ;
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1786679.t b/tests/bugs/distribute/bug-1786679.t
new file mode 100755
index 00000000000..219ce51c8a9
--- /dev/null
+++ b/tests/bugs/distribute/bug-1786679.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+
+# create 2 subvols
+# create a dir
+# create a file
+# change layout
+# remove the file
+# execute create from a different mount
+# Without the patch, the file will be present on both of the bricks
+
+cleanup
+
+function get_layout () {
+
+layout=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1 | grep dht | gawk -F"=" '{print $2}'`
+
+echo $layout
+
+}
+
+function set_layout()
+{
+ setfattr -n "trusted.glusterfs.dht" -v $1 $2
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK1=$B0/${V0}-0
+BRICK2=$B0/${V0}-1
+
+TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
+TEST $CLI volume start $V0
+
+# Mount FUSE and create symlink
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+TEST ! stat "$BRICK1/dir/file"
+TEST stat "$BRICK2/dir/file"
+
+layout1="$(get_layout "$BRICK1/dir")"
+layout2="$(get_layout "$BRICK2/dir")"
+
+TEST set_layout $layout1 "$BRICK2/dir"
+TEST set_layout $layout2 "$BRICK1/dir"
+
+TEST rm $M0/dir/file -f
+TEST gluster v set $V0 client-log-level DEBUG
+
+#Without the patch in place, this client will create the file in $BRICK2
+#which will lead to two files being on both the bricks when a new client
+#create the file with the same name
+TEST touch $M0/dir/file
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M1
+TEST touch $M1/dir/file
+
+TEST stat "$BRICK1/dir/file"
+TEST ! stat "$BRICK2/dir/file"
+
+cleanup
diff --git a/tests/bugs/distribute/issue-1327.t b/tests/bugs/distribute/issue-1327.t
new file mode 100755
index 00000000000..acd8c8c6614
--- /dev/null
+++ b/tests/bugs/distribute/issue-1327.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK1=$B0/${V0}-0
+BRICK2=$B0/${V0}-1
+
+TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mkdir $M0/dir
+
+#remove dir from one of the brick
+TEST rmdir $BRICK2/dir
+
+#safe cache timeout for lookup to be triggered
+sleep 2
+
+TEST ls $M0/dir
+
+TEST stat $BRICK2/dir
+
+cleanup
diff --git a/tests/bugs/fuse/bug-985074.t b/tests/bugs/fuse/bug-985074.t
index d10fd9f8b41..ffa6df54144 100644
--- a/tests/bugs/fuse/bug-985074.t
+++ b/tests/bugs/fuse/bug-985074.t
@@ -30,7 +30,7 @@ TEST glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0
TEST $CLI volume start $V0
-TEST $CLI volume set $V0 md-cache-timeout 3
+TEST $CLI volume set $V0 performance.stat-prefetch off
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1 --entry-timeout=0 --attribute-timeout=0
@@ -40,8 +40,7 @@ TEST ln $M0/file $M0/file.link
TEST ls -ali $M0 $M1
TEST rm -f $M1/file.link
TEST ls -ali $M0 $M1
-# expire the md-cache timeout
-sleep 3
+
TEST mv $M0/file $M0/file.link
TEST stat $M0/file.link
TEST ! stat $M0/file
diff --git a/tests/bugs/fuse/many-groups-for-acl.t b/tests/bugs/fuse/many-groups-for-acl.t
index d959f750ee0..a51b1bc7267 100755
--- a/tests/bugs/fuse/many-groups-for-acl.t
+++ b/tests/bugs/fuse/many-groups-for-acl.t
@@ -38,6 +38,13 @@ do
done
TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER}
+# Linux < 3.8 exports only first 32 gids of pid to userspace
+kernel_exports_few_gids=0
+if [ "$OSTYPE" = Linux ] && \
+ su -m ${NEW_USER} -c "grep ^Groups: /proc/self/status | wc -w | xargs -I@ expr @ - 1 '<' $LAST_GID - $NEW_GID + 1" > /dev/null; then
+ kernel_exports_few_gids=1
+fi
+
# preparation done, start the tests
TEST glusterd
@@ -48,6 +55,8 @@ TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume set ${V0} server.manage-gids off
TEST $CLI volume start ${V0}
+# This is just a synchronization hack to make sure the bricks are
+# up before going on.
EXPECT_WITHIN ${NFS_EXPORT_TIMEOUT} "1" is_nfs_export_available
# mount the volume with POSIX ACL support, without --resolve-gids
@@ -69,8 +78,8 @@ TEST [ $? -eq 0 ]
su -m ${NEW_USER} -c "touch ${M0}/first-32-gids-2/success > /dev/null"
TEST [ $? -eq 0 ]
-su -m ${NEW_USER} -c "touch ${M0}/gid-64/failure > /dev/null"
-TEST [ $? -ne 0 ]
+su -m ${NEW_USER} -c "touch ${M0}/gid-64/success--if-all-gids-exported > /dev/null"
+TEST [ $? -eq $kernel_exports_few_gids ]
su -m ${NEW_USER} -c "touch ${M0}/gid-120/failure > /dev/null"
TEST [ $? -ne 0 ]
diff --git a/tests/bugs/gfapi/bug-1447266/bug-1447266.t b/tests/bugs/gfapi/bug-1447266/bug-1447266.t
index 2bf72f8c6d7..45547f4f0e7 100644
--- a/tests/bugs/gfapi/bug-1447266/bug-1447266.t
+++ b/tests/bugs/gfapi/bug-1447266/bug-1447266.t
@@ -56,5 +56,5 @@ TEST ! $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/.././snap3"
TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/../."
TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/./snap1/./../snap1/dir/."
-cleanup_tester $(dirname $0)/bug-1319374
+cleanup_tester $(dirname $0)/bug-1447266
cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
index 4e570381701..b6af487a791 100644
--- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
@@ -7,6 +7,20 @@ function count_brick_processes {
pgrep glusterfsd | wc -l
}
+function count_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+function count_N/A_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -- '\-1' | sort | uniq | wc -l
+}
+
+function check_peers {
+ $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
cleanup;
TEST launch_cluster 3
@@ -48,4 +62,47 @@ TEST $CLI_1 volume stop $V1
EXPECT 3 count_brick_processes
-cleanup
+TEST $CLI_1 volume stop $META_VOL
+
+TEST $CLI_1 volume delete $META_VOL
+TEST $CLI_1 volume delete $V0
+TEST $CLI_1 volume delete $V1
+
+#bug-1773856 - Brick process fails to come up with brickmux on
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force
+TEST $CLI_1 volume start $V0
+
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force
+TEST $CLI_1 volume start $V1
+
+EXPECT 3 count_brick_processes
+
+V2=patchy2
+TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force
+TEST $CLI_1 volume start $V2
+
+EXPECT 3 count_brick_processes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+
+TEST kill_node 1
+
+sleep 10
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+$CLI_2 volume set $V0 performance.readdir-ahead on
+$CLI_2 volume set $V1 performance.readdir-ahead on
+
+TEST $glusterd_1;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_N/A_brick_pids
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation.t b/tests/bugs/glusterd/brick-mux-validation.t
index 03a476823ca..61b0455f9a8 100644
--- a/tests/bugs/glusterd/brick-mux-validation.t
+++ b/tests/bugs/glusterd/brick-mux-validation.t
@@ -24,7 +24,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
TEST $CLI volume start $V0
EXPECT 1 count_brick_processes
-EXPECT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
pkill gluster
@@ -101,4 +101,4 @@ TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
-cleanup; \ No newline at end of file
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t
index eeaa3ebfea8..927940534c1 100644
--- a/tests/bugs/glusterd/brick-mux.t
+++ b/tests/bugs/glusterd/brick-mux.t
@@ -39,7 +39,7 @@ TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
EXPECT 1 count_brick_processes
-TEST $CLI volume set $V1 performance.cache-size 32MB
+TEST $CLI volume set $V1 performance.io-cache-size 32MB
TEST $CLI volume stop $V1
TEST $CLI volume start $V1
diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
new file mode 100644
index 00000000000..0be31dac768
--- /dev/null
+++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST verify_lvm_version;
+#Create cluster with 3 nodes
+TEST launch_cluster 3 -NO_DEBUG -NO_FORCE
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#add-brick with or without mentioning the replica count should not fail
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1
+EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host with force should succeed
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
+EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Add-brick with Increasing replica count
+TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+#Add-brick with Increasing replica count from same host should fail
+TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
+
+#adding multiple bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t
new file mode 100644
index 00000000000..99bcf6ff785
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1720566.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+
+cleanup;
+V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd"
+V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd"
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V1
+EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status';
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V1 $M1;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST mkdir $M1/dir{1..4};
+TEST touch $M1/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1
+TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1
+
+
+TEST $CLI_1 volume rebalance $V0 start
+TEST $CLI_1 volume rebalance $V1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1
+
+cleanup;
diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t
new file mode 100644
index 00000000000..41d2140aa2b
--- /dev/null
+++ b/tests/bugs/glusterd/check_elastic_server.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function cluster_rebalance_status {
+ local vol=$1
+ $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //'
+}
+
+cleanup;
+TEST launch_cluster 4;
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+TEST $CLI_1 peer probe $H4;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#Mount invalid volume
+TEST ! glusterfs -s $H1 --volfile-id=$V0_NA $M0;
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0"
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit
+
+kill_glusterd 1
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+kill_glusterd 2
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
index fdc0a73f60c..8001359e6b3 100644
--- a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
+++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
@@ -4,7 +4,7 @@
. $(dirname $0)/../../cluster.rc
function check_peers {
-$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup
@@ -36,23 +36,35 @@ TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
#bug-948686 - volume sync after bringing up the killed node
TEST $CLI_1 peer probe $H3
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
#kill a node
TEST kill_node 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2
#modify volume config to see change in volume-sync
TEST $CLI_1 volume set $V0 write-behind off
#add some files to the volume to see effect of volume-heal cmd
TEST touch $M0/{1..100};
TEST $CLI_1 volume stop $V0;
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status'
+
TEST $glusterd_3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+sleep 5
TEST $CLI_3 volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
TEST $CLI_2 volume stop $V0;
TEST $CLI_2 volume delete $V0;
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
index 7b73b1e86ea..b89ca22415e 100644
--- a/tests/bugs/glusterd/optimized-basic-testcases.t
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
@@ -69,6 +69,11 @@ TEST pidof glusterd;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
EXPECT 'Created' volinfo_field $V0 'Status';
+#bug-1786478 - default volume option after volume reset
+addr_family=`volinfo_field $V0 'transport.address-family'`
+TEST $CLI volume reset $V0
+EXPECT $addr_family volinfo_field $V0 'transport.address-family'
+
#bug-955588 - uuid validation
uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
@@ -124,7 +129,8 @@ TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
#bug-1022055 - validate log rotate command
-TEST $CLI volume log rotate $V0;
+TEST ! $CLI volume log rotate $V0;
+TEST $CLI volume log $V0 rotate;
#bug-1092841 - validating barrier enable/disable
diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t
index ff46729e0bd..3cc3351b43b 100644
--- a/tests/bugs/glusterd/quorum-validation.t
+++ b/tests/bugs/glusterd/quorum-validation.t
@@ -34,6 +34,8 @@ TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
TEST ! $CLI_1 volume set $V0 barrier enable
+#quorum is not met, rebalance/profile start should fail
+TEST ! $CLI_1 volume rebalance $V0 start
TEST ! $CLI_1 volume profile $V0 start
#bug-1690753 - Volume stop when quorum not met is successful
diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t
index 9565faef01d..469ec6cd48e 100644
--- a/tests/bugs/glusterd/rebalance-in-cluster.t
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
@@ -4,6 +4,10 @@
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
+function rebalance_status_field_1 {
+ $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p
+}
+
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
@@ -29,6 +33,11 @@ TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
TEST $CLI_1 volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm
+TEST kill_glusterd 2
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0
+
+TEST start_glusterd 2
#bug-1245142
$CLI_1 volume rebalance $V0 start &
diff --git a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t b/tests/bugs/glusterd/remove-brick-validation.t
index 11ed0d94d79..a0ff4ff6a24 100644
--- a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t
+++ b/tests/bugs/glusterd/remove-brick-validation.t
@@ -18,20 +18,6 @@ TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
-#test case for bug 1266818 - disabling enable-shared-storage option
-##should not delete user created volume with name glusterd_shared_storage
-
-## creating a volume with name glusterd_shared_storage
-TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B2/${V0}1
-TEST $CLI_1 volume start glusterd_shared_storage
-
-## disabling enable-shared-storage should not succeed and should not delete the
-## user created volume with name "glusterd_shared_storage"
-TEST ! $CLI_1 volume all enable-shared-storage disable
-
-## volume with name should exist
-TEST $CLI_1 volume info glusterd_shared_storage
-
#testcase: bug-1245045-remove-brick-validation
TEST $CLI_1 peer probe $H3;
diff --git a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
index 20c84d26b9c..00beab59137 100644
--- a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
+++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
@@ -49,6 +49,7 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
#Create a 3x3 dist-rep volume
TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
TEST $CLI volume start $V1
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "9" brick_count ${V1}
# Mount FUSE and create file/directory
TEST glusterfs -s $H0 --volfile-id $V1 $M0
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
index 3a27c2a9d1b..a871e112d87 100644
--- a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
+++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
@@ -4,7 +4,7 @@
. $(dirname $0)/../../cluster.rc
function check_peers {
-count=`$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
+count=`$CLI_3 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
echo $count
}
diff --git a/tests/bugs/glusterd/validating-options-for-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
index ee231338ff1..ddc80b17870 100644
--- a/tests/bugs/glusterd/validating-options-for-replicated-volume.t
+++ b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
@@ -61,10 +61,15 @@ TEST ! $CLI volume set $V0 background-self-heal-count " "
TEST $CLI volume set $V0 background-self-heal-count 10
EXPECT "10" volume_option $V0 cluster.background-self-heal-count
-TEST ! $CLI volume set $V0 cache-size ""
-TEST ! $CLI volume set $V0 cache-size " "
-TEST $CLI volume set $V0 cache-size 512MB
-EXPECT "512MB" volume_option $V0 performance.cache-size
+TEST ! $CLI volume set $V0 io-cache-size ""
+TEST ! $CLI volume set $V0 io-cache-size " "
+TEST $CLI volume set $V0 io-cache-size 64MB
+EXPECT "64MB" volume_option $V0 performance.io-cache-size
+
+TEST ! $CLI volume set $V0 quick-read-cache-size ""
+TEST ! $CLI volume set $V0 quick-read-cache-size " "
+TEST $CLI volume set $V0 quick-read-cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.quick-read-cache-size
TEST ! $CLI volume set $V0 self-heal-daemon ""
TEST ! $CLI volume set $V0 self-heal-daemon " "
diff --git a/tests/bugs/glusterfs-server/bug-852147.t b/tests/bugs/glusterfs-server/bug-852147.t
index c644cfa62dc..75db2a26e05 100755
--- a/tests/bugs/glusterfs-server/bug-852147.t
+++ b/tests/bugs/glusterfs-server/bug-852147.t
@@ -66,7 +66,7 @@ ren_file=$log_file".*"
rm -rf $ren_file
#Initiating log rotate
-TEST $CLI volume log rotate $V0
+TEST $CLI volume log $V0 rotate
#Capturing new log file's size
new_file_size=`file-size $log_file`
diff --git a/tests/bugs/glusterfs-server/bug-873549.t b/tests/bugs/glusterfs-server/bug-873549.t
index a3b2f9c9bf7..8b5534728fd 100644
--- a/tests/bugs/glusterfs-server/bug-873549.t
+++ b/tests/bugs/glusterfs-server/bug-873549.t
@@ -10,7 +10,7 @@ TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume set $V0 performance.cache-size 512MB
+TEST $CLI volume set $V0 performance.quick-read-cache-size 512MB
TEST $CLI volume start $V0
TEST $CLI volume statedump $V0 all
diff --git a/tests/bugs/glusterfs-server/bug-887145.t b/tests/bugs/glusterfs-server/bug-887145.t
index 82f7ccacfc1..db2cf3c050b 100755
--- a/tests/bugs/glusterfs-server/bug-887145.t
+++ b/tests/bugs/glusterfs-server/bug-887145.t
@@ -29,7 +29,15 @@ chmod 600 $M0/file;
TEST mount_nfs $H0:/$V0 $N0 nolock;
-chown -R nfsnobody:nfsnobody $M0/dir;
+grep nfsnobody /etc/passwd > /dev/null
+if [ $? -eq 1 ]; then
+usr=nobody
+grp=nobody
+else
+usr=nfsnobody
+grp=nfsnobody
+fi
+chown -R $usr:$grp $M0/dir;
chown -R tmp_user:tmp_user $M0/other;
TEST $CLI volume set $V0 server.root-squash on;
@@ -38,7 +46,7 @@ EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
# create files and directories in the root of the glusterfs and nfs mount
# which is owned by root and hence the right behavior is getting EACCESS
-# as the fops are executed as nfsnobody.
+# as the fops are executed as nfsnobody/nobody.
touch $M0/foo 2>/dev/null;
TEST [ $? -ne 0 ]
touch $N0/foo 2>/dev/null;
@@ -61,7 +69,7 @@ cat $N0/passwd 1>/dev/null;
TEST [ $? -eq 0 ]
# create files and directories should succeed as the fops are being executed
-# inside the directory owned by nfsnobody
+# inside the directory owned by nfsnobody/nobody
TEST touch $M0/dir/file;
TEST touch $N0/dir/foo;
TEST mkdir $M0/dir/new;
diff --git a/tests/bugs/glusterfs/bug-873962-spb.t b/tests/bugs/glusterfs/bug-873962-spb.t
index db84a223089..db71cc0f6fe 100644
--- a/tests/bugs/glusterfs/bug-873962-spb.t
+++ b/tests/bugs/glusterfs/bug-873962-spb.t
@@ -14,6 +14,7 @@ TEST $CLI volume set $V0 performance.io-cache off
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume start $V0
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
diff --git a/tests/bugs/logging/bug-823081.t b/tests/bugs/logging/bug-823081.t
index 0ed8f4c26c1..bd1965d2d49 100755
--- a/tests/bugs/logging/bug-823081.t
+++ b/tests/bugs/logging/bug-823081.t
@@ -22,20 +22,20 @@ function set_tail ()
set_tail $V0;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_success\" ]]
TEST ! $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_failure\" ]]
set_tail $V1;
TEST gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_success_force\" ]]
TEST ! gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_failure_force\" ]]
cleanup;
diff --git a/tests/bugs/posix/bug-1651445.t b/tests/bugs/posix/bug-1651445.t
index f6f1833f919..4d08b69b9b0 100644
--- a/tests/bugs/posix/bug-1651445.t
+++ b/tests/bugs/posix/bug-1651445.t
@@ -17,39 +17,35 @@ TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
-TEST $CLI volume set $V0 storage.reserve-size 10MB
+#Setting the size in bytes
+TEST $CLI volume set $V0 storage.reserve 40MB
-#No effect as priority to reserve-size
-TEST $CLI volume set $V0 storage.reserve 20
+#wait 5s to reset disk_space_full flag
+sleep 5
TEST dd if=/dev/zero of=$M0/a bs=100M count=1
-sleep 5
+TEST dd if=/dev/zero of=$M0/b bs=10M count=1
-#Below dd confirms posix is giving priority to reserve-size
-TEST dd if=/dev/zero of=$M0/b bs=40M count=1
+# Wait 5s to update disk_space_full flag because thread check disk space
+# after every 5s
sleep 5
+# setup_lvm create lvm partition of 150M and 40M are reserve so after
+# consuming more than 110M next dd should fail
TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
+TEST dd if=/dev/urandom of=$M0/a bs=1022 count=1 oflag=seek_bytes,sync seek=102 conv=notrunc
rm -rf $M0/*
-#Size will reserve from the previously set reserve option = 20%
-TEST $CLI volume set $V0 storage.reserve-size 0
-#Overwrite reserve option
-TEST $CLI volume set $V0 storage.reserve-size 40MB
+#Setting the size in percent and repeating the above steps
+TEST $CLI volume set $V0 storage.reserve 40
-#wait 5s to reset disk_space_full flag
sleep 5
-TEST dd if=/dev/zero of=$M0/a bs=100M count=1
+TEST dd if=/dev/zero of=$M0/a bs=80M count=1
TEST dd if=/dev/zero of=$M0/b bs=10M count=1
-# Wait 5s to update disk_space_full flag because thread check disk space
-# after every 5s
-
sleep 5
-# setup_lvm create lvm partition of 150M and 40M are reserve so after
-# consuming more than 110M next dd should fail
TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
TEST $CLI volume stop $V0
diff --git a/tests/bugs/protocol/bug-1433815-auth-allow.t b/tests/bugs/protocol/bug-1433815-auth-allow.t
index fa22ad8afd5..a78c0eb7111 100644
--- a/tests/bugs/protocol/bug-1433815-auth-allow.t
+++ b/tests/bugs/protocol/bug-1433815-auth-allow.t
@@ -17,6 +17,7 @@ TEST $CLI volume create $V0 $H0:$B0/$V0
# Set auth.allow so it *doesn't* include ourselves.
TEST $CLI volume set $V0 auth.allow 1.2.3.4
TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
# "System getspec" will include the username and password if the request comes
# from a server (which we are). Unfortunately, this will cause authentication
diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t
index 8f420eec012..708bc1a1e29 100644
--- a/tests/bugs/replicate/bug-1101647.t
+++ b/tests/bugs/replicate/bug-1101647.t
@@ -12,6 +12,8 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0;
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
#Create base entry in indices/xattrop
echo "Data">$M0/file
diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
index 5fe8be07b50..b69a38ae788 100644
--- a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
+++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
@@ -25,9 +25,11 @@ iatt=$(stat -c "%g:%u:%A" file)
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT 2 get_pending_heal_count $V0
#Trigger metadataheal
TEST stat file
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#iattrs must be matching
iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file)
diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
index 0767f47fdda..10ce0131f4f 100644
--- a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
+++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
@@ -49,25 +49,15 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
-#Kill brick 0 and turn on the client side heal and do ls to trigger the heal.
-#The pending xattrs on bricks 1 & 2 should have pending entry on brick 0.
-TEST kill_brick $V0 $H0 $B0/${V0}0
+# We were killing one brick and checking that entry heal does not reset the
+# pending xattrs for the down brick. Now that we need all bricks to be up for
+# entry heal, I'm removing that test from the .t
+
TEST $CLI volume set $V0 cluster.data-self-heal on
TEST $CLI volume set $V0 cluster.metadata-self-heal on
TEST $CLI volume set $V0 cluster.entry-self-heal on
TEST ls $M0
-EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
-EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
-EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
-EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
-
-#Bring back all the bricks and trigger the heal again by doing ls. Now the
-#pending xattrs on all the bricks should be 0.
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST ls $M0
-
TEST cat $M0/f1
TEST cat $M0/f2
TEST cat $M0/f3
diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
index 26f90497d6f..49c4dea4e9c 100644
--- a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
+++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
@@ -53,8 +53,6 @@ TEST ! ls $B0/${V0}1/file$i
TEST ls $B0/${V0}2/file$i
dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
TEST [ "$dirty" != "000000000000000000000000" ]
-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file$i
-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file$i
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
index 3cb73bcad52..011535066f9 100644
--- a/tests/bugs/replicate/bug-1744548-heal-timeout.t
+++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
@@ -4,6 +4,11 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../afr.rc
+function get_cumulative_opendir_count {
+#sed command prints content between Cumulative and Interval, this keeps content from Cumulative stats
+ $CLI volume profile $V0 info |sed -n '/^Cumulative/,/^Interval/p'|grep OPENDIR| awk '{print $8}'|tr -d '\n'
+}
+
cleanup;
TEST glusterd;
@@ -20,23 +25,23 @@ TEST ! $CLI volume heal $V0
TEST $CLI volume profile $V0 start
TEST $CLI volume profile $V0 info clear
TEST $CLI volume heal $V0 enable
-TEST $CLI volume heal $V0
-# Each brick does 3 opendirs, corresponding to dirty, xattrop and entry-changes
-COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
-TEST [ "$COUNT" == "333" ]
+# Each brick does 4 opendirs, corresponding to dirty, xattrop and entry-changes, anonymous-inode
+EXPECT_WITHIN 4 "^444$" get_cumulative_opendir_count
# Check that a change in heal-timeout is honoured immediately.
TEST $CLI volume set $V0 cluster.heal-timeout 5
sleep 10
-COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
# Two crawls must have happened.
-TEST [ "$COUNT" == "666" ]
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^121212$" get_cumulative_opendir_count
# shd must not heal if it is disabled and heal-timeout is changed.
TEST $CLI volume heal $V0 disable
+#Wait for configuration update and any opendir fops to complete
+sleep 10
TEST $CLI volume profile $V0 info clear
TEST $CLI volume set $V0 cluster.heal-timeout 6
-sleep 6
+#Better to wait for more than 6 seconds to account for configuration updates
+sleep 10
COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
TEST [ -z $COUNT ]
cleanup;
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
new file mode 100644
index 00000000000..7e24eaec03d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch $M0/a
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/a
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime0=$(get_mtime $B0/brick0/a)
+mtime1=$(get_mtime $B0/brick1/a)
+TEST [ $mtime0 -eq $mtime1 ]
+
+ctime0=$(get_ctime $B0/brick0/a)
+ctime1=$(get_ctime $B0/brick1/a)
+TEST [ $ctime0 -eq $ctime1 ]
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+
+TEST touch $M0/b
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/b
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime2=$(get_mtime $B0/brick0/b)
+mtime3=$(get_mtime $B0/brick1/b)
+TEST [ $mtime2 -eq $mtime3 ]
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
new file mode 100644
index 00000000000..94b465181fa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 heal-timeout 5
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Re-create the file when a brick is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST rm $M0/FILE
+echo "New Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+
+# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
+$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
+TEST sleep 5 # give index heal a chance to do one run.
+brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
+brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
+TEST [ $brick0_pending -eq "000000000000000000000002" ]
+TEST [ $brick2_pending -eq "000000000000000000000002" ]
+EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+$CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST cat $M0/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t
index 123e7e16425..660d34ca25f 100644
--- a/tests/bugs/replicate/bug-880898.t
+++ b/tests/bugs/replicate/bug-880898.t
@@ -1,12 +1,19 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
pkill glusterfs
uuid=""
for line in $(cat $GLUSTERD_WORKDIR/glusterd.info)
diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
new file mode 100644
index 00000000000..fab94b71b27
--- /dev/null
+++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function create_bricks {
+ TEST truncate -s 100M $B0/brick0
+ TEST truncate -s 100M $B0/brick1
+ TEST truncate -s 20M $B0/brick2
+ LO1=`SETUP_LOOP $B0/brick0`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO1
+ LO2=`SETUP_LOOP $B0/brick1`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO2
+ LO3=`SETUP_LOOP $B0/brick2`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO3
+ TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+ TEST MOUNT_LOOP $LO1 $B0/${V0}0
+ TEST MOUNT_LOOP $LO2 $B0/${V0}1
+ TEST MOUNT_LOOP $LO3 $B0/${V0}2
+}
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ touch $M0/file$i
+ if [ -e $B0/${V0}2/file$i ];
+ then
+ ((i++))
+ else
+ break
+ fi
+ done
+}
+
+TESTS_EXPECTED_IN_LOOP=13
+
+#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((echo "Test" > $M0/file-3) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
+
+#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
+#Keeping the third brick of lower size to simulate disk full scenario#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((cat /dev/zero > $M0/file1) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
new file mode 100644
index 00000000000..d3b0c504c80
--- /dev/null
+++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Change permission on brick-0: simulates the case where there is metadata
+# mismatch but no pending xattrs. This brick will become the source for heal.
+TEST chmod +x $B0/$V0"0"/FILE
+
+# Add gfid to xattrop
+xattrop_b0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_b0`
+gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
+TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Brick-0 should contain xattrs blaming other 2 bricks.
+# The values will be zero because heal is over.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
+
+# Brick-1 and Brick-2 must not contain any afr xattrs.
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+# check permission bits.
+EXPECT '755' stat -c %a $B0/${V0}0/FILE
+EXPECT '755' stat -c %a $B0/${V0}1/FILE
+EXPECT '755' stat -c %a $B0/${V0}2/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/rpc/bug-954057.t b/tests/bugs/rpc/bug-954057.t
index 65af274f09d..40acdc2fdc7 100755
--- a/tests/bugs/rpc/bug-954057.t
+++ b/tests/bugs/rpc/bug-954057.t
@@ -25,7 +25,15 @@ TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST mkdir $M0/dir
TEST mkdir $M0/nobody
-TEST chown nfsnobody:nfsnobody $M0/nobody
+grep nfsnobody /etc/passwd > /dev/null
+if [ $? -eq 1 ]; then
+usr=nobody
+grp=nobody
+else
+usr=nfsnobody
+grp=nfsnobody
+fi
+TEST chown $usr:$grp $M0/nobody
TEST `echo "file" >> $M0/file`
TEST cp $M0/file $M0/new
TEST chmod 700 $M0/new
diff --git a/tests/bugs/shard/bug-1272986.t b/tests/bugs/shard/bug-1272986.t
index 762887051fa..66e896ad0c4 100644
--- a/tests/bugs/shard/bug-1272986.t
+++ b/tests/bugs/shard/bug-1272986.t
@@ -16,16 +16,16 @@ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
# Write some data into a file, such that its size crosses the shard block size.
-TEST dd if=/dev/zero of=$M1/file bs=1M count=5 conv=notrunc
+TEST dd if=/dev/urandom of=$M1/file bs=1M count=5 conv=notrunc oflag=direct
md5sum1_reader=$(md5sum $M0/file | awk '{print $1}')
EXPECT "$md5sum1_reader" echo `md5sum $M1/file | awk '{print $1}'`
# Append some more data into the file.
-TEST `echo "abcdefg" >> $M1/file`
+TEST dd if=/dev/urandom of=$M1/file bs=256k count=1 conv=notrunc oflag=direct
-md5sum2_reader=$(md5sum $M0/file | awk '{print $1}')
+md5sum2_reader=$(dd if=$M0/file iflag=direct bs=256k| md5sum | awk '{print $1}')
# Test to see if the reader refreshes its cache correctly as part of the reads
# triggered through md5sum. If it does, then the md5sum on the reader and writer
diff --git a/tests/bugs/shard/bug-1696136.c b/tests/bugs/shard/bug-1696136.c
index b9e8d1375e5..cb650535b09 100644
--- a/tests/bugs/shard/bug-1696136.c
+++ b/tests/bugs/shard/bug-1696136.c
@@ -87,8 +87,9 @@ main(int argc, char *argv[])
goto out;
}
- offset = atoi(argv[4]);
- len = atoi(argv[5]);
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
fd = glfs_open(fs, argv[6], O_RDWR);
if (fd == NULL) {
diff --git a/tests/bugs/shard/bug-shard-discard.c b/tests/bugs/shard/bug-shard-discard.c
index 15dca6c2181..6fa93fb89d1 100644
--- a/tests/bugs/shard/bug-shard-discard.c
+++ b/tests/bugs/shard/bug-shard-discard.c
@@ -50,8 +50,9 @@ main(int argc, char *argv[])
goto out;
}
- off = atoi(argv[4]);
- len = atoi(argv[5]);
+ /* Note that off_t is signed but size_t isn't. */
+ off = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
ret = glfs_discard(fd, off, len);
if (ret <= 0) {
diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
new file mode 100644
index 00000000000..ba22d2b74fe
--- /dev/null
+++ b/tests/bugs/shard/issue-1243.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST $CLI volume set $V0 md-cache-timeout 10
+
+# Write data into a file such that its size crosses shard-block-size
+TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a setxattr on the file.
+TEST setfattr -n trusted.libvirt -v some-value $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a removexattr on the file.
+TEST setfattr -x trusted.libvirt $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+cleanup
diff --git a/tests/bugs/shard/issue-1281.t b/tests/bugs/shard/issue-1281.t
new file mode 100644
index 00000000000..9704caa8944
--- /dev/null
+++ b/tests/bugs/shard/issue-1281.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+#Open a file and store descriptor in fd = 5
+exec 5>$M0/foo
+
+#Unlink the same file which is opened in prev step
+TEST unlink $M0/foo
+
+#Write something on the file using the open fd = 5
+echo "issue-1281" >&5
+
+#Write on the descriptor should be succesful
+EXPECT 0 echo $?
+
+#Close the fd = 5
+exec 5>&-
+
+cleanup
diff --git a/tests/bugs/shard/issue-1425.t b/tests/bugs/shard/issue-1425.t
new file mode 100644
index 00000000000..bbe82c0e5b2
--- /dev/null
+++ b/tests/bugs/shard/issue-1425.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+FILE_COUNT_TIME=5
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 20M $M0/foo
+gfid_new=$(get_gfid_string $M0/foo)
+
+# Check for the base shard
+TEST stat $M0/foo
+TEST stat $B0/${V0}0/foo
+
+# There should be 4 associated shards
+EXPECT_WITHIN $FILE_COUNT_TIME 4 get_file_count $B0/${V0}0/.shard/$gfid_new
+
+# There should be 1+4 shards and we expect 4 lookups less than on the build without this patch
+EXPECT "21" echo `$CLI volume profile $V0 info incremental | grep -w LOOKUP | awk '{print $8}'`
+
+# Delete the base shard and check shards get cleaned up
+TEST unlink $M0/foo
+
+TEST ! stat $M0/foo
+TEST ! stat $B0/${V0}0/foo
+
+# There should be no shards now
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_new
+cleanup
diff --git a/tests/bugs/shard/shard-fallocate.c b/tests/bugs/shard/shard-fallocate.c
index 45b9ce00509..cb0714e8564 100644
--- a/tests/bugs/shard/shard-fallocate.c
+++ b/tests/bugs/shard/shard-fallocate.c
@@ -87,8 +87,9 @@ main(int argc, char *argv[])
goto out;
}
- offset = atoi(argv[4]);
- len = atoi(argv[5]);
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
fd = glfs_open(fs, argv[6], O_RDWR);
if (fd == NULL) {
diff --git a/tests/bugs/snapshot/bug-1111041.t b/tests/bugs/snapshot/bug-1111041.t
index f771d64f2a3..efda9688d8b 100755
--- a/tests/bugs/snapshot/bug-1111041.t
+++ b/tests/bugs/snapshot/bug-1111041.t
@@ -11,6 +11,10 @@ function is_snapd_running {
$CLI volume status $1 | grep "Snapshot Daemon" | wc -l;
}
+function snapd_pid {
+ $CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'}
+}
+
TEST glusterd;
TEST pidof glusterd;
@@ -25,14 +29,12 @@ TEST $CLI volume set $V0 features.uss enable;
EXPECT "1" is_snapd_running $V0
-SNAPD_PID=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'});
+SNAPD_PID=$(snapd_pid);
TEST [ $SNAPD_PID -gt 0 ]
kill -9 $SNAPD_PID
-SNAPD_PID=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'});
-
-TEST [ $SNAPD_PID = 'N/A' ]
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^N/A$" snapd_pid
cleanup ;
diff --git a/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
index f30194b6339..04a85db0c1a 100644
--- a/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
+++ b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
@@ -130,3 +130,4 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" mounted_snaps ${V1}
cleanup;
# run first!
+#G_TESTDEF_TEST_STATUS_CENTOS6=BRICK_MUX_BAD_TEST,BUG=1743069
diff --git a/tests/bugs/snapshot/bug-1597662.t b/tests/bugs/snapshot/bug-1597662.t
index dc87d17a0ef..f582930476a 100644
--- a/tests/bugs/snapshot/bug-1597662.t
+++ b/tests/bugs/snapshot/bug-1597662.t
@@ -34,12 +34,13 @@ function is_snap_path
EXPECT "1" is_snap_path
$CLI snapshot deactivate snap1;
-
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} 'Stopped' snapshot_status snap1
# snap is deactivated so snap_path should not exist
EXPECT "0" is_snap_path
# activate snap again
$CLI snapshot activate snap1;
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} 'Started' snapshot_status snap1
# snap is active so snap_path should exist
EXPECT "1" is_snap_path
diff --git a/tests/bugs/transport/bug-873367.t b/tests/bugs/transport/bug-873367.t
index d4c07024ed0..8070bc1b83c 100755
--- a/tests/bugs/transport/bug-873367.t
+++ b/tests/bugs/transport/bug-873367.t
@@ -13,7 +13,7 @@ rm -f $SSL_BASE/glusterfs.*
mkdir -p $B0/1
mkdir -p $M0
-TEST openssl genrsa -out $SSL_KEY 1024
+TEST openssl genrsa -out $SSL_KEY 2048
TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
ln $SSL_CERT $SSL_CA
diff --git a/tests/bugs/write-behind/issue-884.c b/tests/bugs/write-behind/issue-884.c
new file mode 100644
index 00000000000..e9c33b351ad
--- /dev/null
+++ b/tests/bugs/write-behind/issue-884.c
@@ -0,0 +1,267 @@
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <pthread.h>
+
+#include <glusterfs/api/glfs.h>
+
+/* Based on a reproducer by Stefan Ring. It seems to be quite sensible to any
+ * timing modification, so the code has been maintained as is, only with minor
+ * changes. */
+
+struct glfs *glfs;
+
+pthread_mutex_t the_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t the_cond = PTHREAD_COND_INITIALIZER;
+
+typedef struct _my_aiocb {
+ int64_t size;
+ volatile int64_t seq;
+ int which;
+} my_aiocb;
+
+typedef struct _worker_data {
+ my_aiocb cb;
+ struct iovec iov;
+ int64_t offset;
+} worker_data;
+
+typedef struct {
+ worker_data wdata[2];
+
+ volatile unsigned busy;
+} all_data_t;
+
+all_data_t all_data;
+
+static void
+completion_fnc(struct glfs_fd *fd, ssize_t ret, struct glfs_stat *pre,
+ struct glfs_stat *post, void *arg)
+{
+ void *the_thread;
+ my_aiocb *cb = (my_aiocb *)arg;
+ long seq = cb->seq;
+
+ assert(ret == cb->size);
+
+ pthread_mutex_lock(&the_mutex);
+ pthread_cond_broadcast(&the_cond);
+
+ all_data.busy &= ~(1 << cb->which);
+ cb->seq = -1;
+
+ the_thread = (void *)pthread_self();
+ printf("worker %d is done from thread %p, seq %ld!\n", cb->which,
+ the_thread, seq);
+
+ pthread_mutex_unlock(&the_mutex);
+}
+
+static void
+init_wdata(worker_data *data, int which)
+{
+ data->cb.which = which;
+ data->cb.seq = -1;
+
+ data->iov.iov_base = malloc(1024 * 1024);
+ memset(data->iov.iov_base, 6,
+ 1024 * 1024); /* tail part never overwritten */
+}
+
+static void
+init()
+{
+ all_data.busy = 0;
+
+ init_wdata(&all_data.wdata[0], 0);
+ init_wdata(&all_data.wdata[1], 1);
+}
+
+static void
+do_write(struct glfs_fd *fd, int content, int size, int64_t seq,
+ worker_data *wdata, const char *name)
+{
+ int ret;
+
+ wdata->cb.size = size;
+ wdata->cb.seq = seq;
+
+ if (content >= 0)
+ memset(wdata->iov.iov_base, content, size);
+ wdata->iov.iov_len = size;
+
+ pthread_mutex_lock(&the_mutex);
+ printf("(%d) dispatching write \"%s\", offset %lx, len %x, seq %ld\n",
+ wdata->cb.which, name, (long)wdata->offset, size, (long)seq);
+ pthread_mutex_unlock(&the_mutex);
+ ret = glfs_pwritev_async(fd, &wdata->iov, 1, wdata->offset, 0,
+ completion_fnc, &wdata->cb);
+ assert(ret >= 0);
+}
+
+#define IDLE 0 // both workers must be idle
+#define ANY 1 // use any worker, other one may be busy
+
+int
+get_worker(int waitfor, int64_t excl_seq)
+{
+ int which;
+
+ pthread_mutex_lock(&the_mutex);
+
+ while (waitfor == IDLE && (all_data.busy & 3) != 0 ||
+ waitfor == ANY &&
+ ((all_data.busy & 3) == 3 ||
+ excl_seq >= 0 && (all_data.wdata[0].cb.seq == excl_seq ||
+ all_data.wdata[1].cb.seq == excl_seq)))
+ pthread_cond_wait(&the_cond, &the_mutex);
+
+ if (!(all_data.busy & 1))
+ which = 0;
+ else
+ which = 1;
+
+ all_data.busy |= (1 << which);
+
+ pthread_mutex_unlock(&the_mutex);
+
+ return which;
+}
+
+static int
+doit(struct glfs_fd *fd)
+{
+ int ret;
+ int64_t seq = 0;
+ int64_t offset = 0; // position in file, in blocks
+ int64_t base = 0x1000; // where to place the data, in blocks
+
+ int async_mode = ANY;
+
+ init();
+
+ for (;;) {
+ int which;
+ worker_data *wdata;
+
+ // for growing to the first offset
+ for (;;) {
+ int gap = base + 0x42 - offset;
+ if (!gap)
+ break;
+ if (gap > 80)
+ gap = 80;
+
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = offset << 9;
+ do_write(fd, 0, gap << 9, seq++, wdata, "gap-filling");
+
+ offset += gap;
+ }
+
+ // 8700
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x42) << 9;
+ do_write(fd, 1, 62 << 9, seq++, wdata, "!8700");
+
+ // 8701
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x42) << 9;
+ do_write(fd, 2, 55 << 9, seq++, wdata, "!8701");
+
+ // 8702
+ which = get_worker(async_mode, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x79) << 9;
+ do_write(fd, 3, 54 << 9, seq++, wdata, "!8702");
+
+ // 8703
+ which = get_worker(async_mode, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0xaf) << 9;
+ do_write(fd, 4, 81 << 9, seq++, wdata, "!8703");
+
+ // 8704
+ // this writes both 5s and 6s
+ // the range of 5s is the one that overwrites 8703
+
+ which = get_worker(async_mode, seq - 1);
+ wdata = &all_data.wdata[which];
+
+ memset(wdata->iov.iov_base, 5, 81 << 9);
+ wdata->offset = (base + 0xaf) << 9;
+ do_write(fd, -1, 1623 << 9, seq++, wdata, "!8704");
+
+ offset = base + 0x706;
+ base += 0x1000;
+ if (base >= 0x100000)
+ break;
+ }
+
+ printf("done!\n");
+ fflush(stdout);
+
+ pthread_mutex_lock(&the_mutex);
+
+ while ((all_data.busy & 3) != 0)
+ pthread_cond_wait(&the_cond, &the_mutex);
+
+ pthread_mutex_unlock(&the_mutex);
+
+ ret = glfs_close(fd);
+ assert(ret >= 0);
+ /*
+ ret = glfs_fini(glfs);
+ assert(ret >= 0);
+ */
+ return 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret;
+ int open_flags = O_RDWR | O_DIRECT | O_TRUNC;
+ struct glfs_fd *fd;
+
+ glfs = glfs_new(argv[1]);
+ if (!glfs) {
+ printf("glfs_new!\n");
+ goto out;
+ }
+ ret = glfs_set_volfile_server(glfs, "tcp", "localhost", 24007);
+ if (ret < 0) {
+ printf("set_volfile!\n");
+ goto out;
+ }
+ ret = glfs_init(glfs);
+ if (ret) {
+ printf("init!\n");
+ goto out;
+ }
+ fd = glfs_open(glfs, argv[2], open_flags);
+ if (!fd) {
+ printf("open!\n");
+ goto out;
+ }
+ srand(time(NULL));
+ return doit(fd);
+out:
+ return 1;
+}
diff --git a/tests/bugs/write-behind/issue-884.t b/tests/bugs/write-behind/issue-884.t
new file mode 100755
index 00000000000..2bcf7d15265
--- /dev/null
+++ b/tests/bugs/write-behind/issue-884.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test tries to detect a race condition in write-behind. It's based on a
+# reproducer written by Stefan Ring that is able to hit it sometimes. On my
+# system, it happened around 10% of the runs. This means that if this bug
+# appears again, this test will fail once every 10 runs. Most probably this
+# failure will be hidden by the automatic test retry of the testing framework.
+#
+# Please, if this test fails, it needs to be analyzed in detail.
+
+function run() {
+ "${@}" >/dev/null
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+# This makes it easier to hit the issue
+TEST $CLI volume set $V0 client-log-level TRACE
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+build_tester $(dirname $0)/issue-884.c -lgfapi
+
+TEST touch $M0/testfile
+
+# This program generates a file of 535694336 bytes with a fixed pattern
+TEST run $(dirname $0)/issue-884 $V0 testfile
+
+# This is the md5sum of the expected pattern without corruption
+EXPECT "ad105f9349345a70fc697632cbb5eec8" echo "$(md5sum $B0/$V0/testfile | awk '{ print $1; }')"
+
+cleanup
diff --git a/tests/cluster.rc b/tests/cluster.rc
index 99be8e79c21..34f5b02398f 100644
--- a/tests/cluster.rc
+++ b/tests/cluster.rc
@@ -11,7 +11,7 @@ function launch_cluster() {
define_backends $count;
define_hosts $count;
define_glusterds $count $2;
- define_clis $count;
+ define_clis $count $3;
start_glusterds;
}
@@ -50,15 +50,16 @@ function define_glusterds() {
sopt="management.glusterd-sockfile=${!b}/glusterd/gd.sock"
#Get the logdir
logdir=`gluster --print-logdir`
+ clopt="management.cluster-test-mode=${logdir}/$i";
#Fetch the testcases name and prefix the glusterd log with it
logfile=`echo ${0##*/}`_glusterd$i.log
- lopt="--log-file=$logdir/$logfile"
+ lopt="--log-file=$logdir/$i/$logfile"
if [ "$2" == "-LDEBUG" ]; then
- eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
+ eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
else
- eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
+ eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
fi
done
}
@@ -89,6 +90,20 @@ function kill_glusterd() {
kill `cat $pidfile`;
}
+function restart_glusterd() {
+ local index=$1
+ local b
+ local pidfile
+ local g
+
+ b="B$index"
+ pidfile="${!b}/glusterd.pid"
+
+ kill `cat $pidfile`
+
+ g="glusterd_${index}"
+ ${!g}
+}
function kill_node() {
local index=$1;
@@ -133,8 +148,13 @@ function define_clis() {
lopt1="--log-file=$logdir/$logfile1"
- eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
- eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
+ if [ "$2" == "-NO_FORCE" ]; then
+ eval "CLI_$i='$CLI_NO_FORCE --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
+ eval "CLI$i='$CLI_NO_FORCE --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
+ else
+ eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
+ eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
+ fi
done
}
@@ -191,3 +211,8 @@ function cluster_brick_up_status {
eval \$CLI_$1 volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'
}
+function cluster_remove_brick_status_completed_field {
+ local vol=$1
+ local brick_list=$2
+ $CLI_1 volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p
+}
diff --git a/tests/ec.rc b/tests/ec.rc
index 04405ecb829..f18752fc99a 100644
--- a/tests/ec.rc
+++ b/tests/ec.rc
@@ -7,3 +7,12 @@ function ec_up_status()
local ec_id=$3
grep -E "^up =" $m/.meta/graphs/active/${v}-disperse-${ec_id}/private | cut -f2 -d'='
}
+
+function ec_option_value()
+{
+ local v=$1
+ local m=$2
+ local ec_id=$3
+ local opt=$4
+ grep -E "^$opt =" $m/.meta/graphs/active/${v}-disperse-${ec_id}/private | cut -f2 -d'='| awk '{print $1}'
+}
diff --git a/tests/env.rc.in b/tests/env.rc.in
index c7472a7988d..0478d66aec6 100644
--- a/tests/env.rc.in
+++ b/tests/env.rc.in
@@ -2,7 +2,7 @@ prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
-PATH=@sbindir@:$PATH
+PATH=@bindir@:@sbindir@:$PATH
export PATH
GLUSTERD_PIDFILEDIR=@localstatedir@/run/gluster
diff --git a/tests/features/flock_interrupt.t b/tests/features/flock_interrupt.t
index 964a4bc20ef..b8717e30dfb 100644
--- a/tests/features/flock_interrupt.t
+++ b/tests/features/flock_interrupt.t
@@ -28,6 +28,5 @@ flock $M0/testfile sleep 6 & { sleep 0.3; flock -w 2 $M0/testfile true; echo ok
EXPECT_WITHIN 4 ok cat got_lock;
## Finish up
-sleep 7;
rm -f got_lock;
cleanup;
diff --git a/tests/features/fuse-lru-limit.t b/tests/features/fuse-lru-limit.t
index 9f1211660ce..dd6be2d5397 100644
--- a/tests/features/fuse-lru-limit.t
+++ b/tests/features/fuse-lru-limit.t
@@ -10,6 +10,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
TEST $CLI volume start $V0
TEST glusterfs -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "2" online_brick_count
EXPECT "1" get_mount_active_size_value $V0 $M0
EXPECT "0" get_mount_lru_size_value $V0 $M0
diff --git a/tests/features/interrupt.t b/tests/features/interrupt.t
index bd70ff87545..067eb1b7486 100644
--- a/