summaryrefslogtreecommitdiffstats
path: root/api/src/glfs-internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'api/src/glfs-internal.h')
-rw-r--r--api/src/glfs-internal.h57
1 files changed, 53 insertions, 4 deletions
diff --git a/api/src/glfs-internal.h b/api/src/glfs-internal.h
index d658ce2ae..30ff599f2 100644
--- a/api/src/glfs-internal.h
+++ b/api/src/glfs-internal.h
@@ -33,6 +33,8 @@ struct glfs {
int err;
xlator_t *active_subvol;
+ xlator_t *next_subvol;
+ xlator_t *old_subvol;
char *oldvolfile;
ssize_t oldvollen;
@@ -40,12 +42,17 @@ struct glfs {
inode_t *cwd;
uint32_t dev_id; /* Used to fill st_dev in struct stat */
+
+ struct list_head openfds;
+
+ gf_boolean_t migration_in_progress;
};
struct glfs_fd {
+ struct list_head openfds;
struct glfs *fs;
off_t offset;
- fd_t *fd;
+ fd_t *fd; /* Currently guared by @fs->mutex. TODO: per-glfd lock */
struct list_head entries;
gf_dirent_t *next;
};
@@ -62,7 +69,11 @@ int glfs_resolve (struct glfs *fs, xlator_t *subvol, const char *path, loc_t *lo
struct iatt *iatt, int reval);
int glfs_lresolve (struct glfs *fs, xlator_t *subvol, const char *path, loc_t *loc,
struct iatt *iatt, int reval);
-void glfs_first_lookup (xlator_t *subvol);
+fd_t *glfs_resolve_fd (struct glfs *fs, xlator_t *subvol, struct glfs_fd *glfd);
+
+fd_t *__glfs_migrate_fd (struct glfs *fs, xlator_t *subvol, struct glfs_fd *glfd);
+
+int glfs_first_lookup (xlator_t *subvol);
static inline void
__glfs_entry_fs (struct glfs *fs)
@@ -78,12 +89,50 @@ __glfs_entry_fd (struct glfs_fd *fd)
}
+/*
+ By default all lock attempts from user context must
+ use glfs_lock() and glfs_unlock(). This allows
+ for a safe implementation of graph migration where
+ we can give up the mutex during syncop calls so
+ that bottom up calls (particularly CHILD_UP notify)
+ can do a mutex_lock() on @glfs without deadlocking
+ the filesystem
+*/
+static inline int
+glfs_lock (struct glfs *fs)
+{
+ pthread_mutex_lock (&fs->mutex);
+
+ while (!fs->init)
+ pthread_cond_wait (&fs->cond, &fs->mutex);
+
+ while (fs->migration_in_progress)
+ pthread_cond_wait (&fs->cond, &fs->mutex);
+
+ return 0;
+}
+
+
+static inline void
+glfs_unlock (struct glfs *fs)
+{
+ pthread_mutex_unlock (&fs->mutex);
+}
+
+
void glfs_fd_destroy (struct glfs_fd *glfd);
struct glfs_fd *glfs_fd_new (struct glfs *fs);
-
-xlator_t * glfs_fd_subvol (struct glfs_fd *glfd);
+void glfs_fd_bind (struct glfs_fd *glfd);
xlator_t * glfs_active_subvol (struct glfs *fs);
+xlator_t * __glfs_active_subvol (struct glfs *fs);
+void glfs_subvol_done (struct glfs *fs, xlator_t *subvol);
+
+inode_t * glfs_refresh_inode (xlator_t *subvol, inode_t *inode);
+
+inode_t *glfs_cwd_get (struct glfs *fs);
+int glfs_cwd_set (struct glfs *fs, inode_t *inode);
+int __glfs_cwd_set (struct glfs *fs, inode_t *inode);
#endif /* !_GLFS_INTERNAL_H */