/* Copyright (c) 2010-2012 Red Hat, Inc. This file is part of GlusterFS. This file is licensed to you under your choice of the GNU Lesser General Public License, version 3 or any later version (LGPLv3 or later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include "cli.h" #include "cli-cmd.h" #include "cli-mem-types.h" #include "cli1-xdr.h" #include "run.h" #include "syscall.h" #include "common-utils.h" extern struct rpc_clnt *global_rpc; extern struct rpc_clnt *global_quotad_rpc; extern rpc_clnt_prog_t *cli_rpc_prog; extern rpc_clnt_prog_t cli_quotad_clnt; int cli_cmd_volume_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, const char **words, int wordcount); int cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; cli_cmd_volume_get_ctx_t ctx = {0,}; cli_local_t *local = NULL; int sent = 0; int parse_error = 0; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if ((wordcount == 2) || (wordcount == 3 && !strcmp (words[2], "all"))) { ctx.flags = GF_CLI_GET_NEXT_VOLUME; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_NEXT_VOLUME]; } else if (wordcount == 3) { ctx.flags = GF_CLI_GET_VOLUME; ctx.volname = (char *)words[2]; if (strlen (ctx.volname) > GD_VOLUME_NAME_MAX) { cli_out ("Invalid volume name"); goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME]; } else { cli_usage_out (word->pattern); parse_error = 1; return -1; } local = cli_local_get (); if (!local) goto out; local->get_vol.flags = ctx.flags; if (ctx.volname) local->get_vol.volname = gf_strdup (ctx.volname); frame->local = local; if (proc->fn) { ret = proc->fn (frame, THIS, &ctx); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Getting Volume information failed!"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_sync_volume_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; int sent = 0; int parse_error = 0; dict_t *dict = NULL; cli_local_t *local = NULL; gf_answer_t answer = GF_ANSWER_NO; const char *question = "Sync volume may make data " "inaccessible while the sync " "is in progress. Do you want " "to continue?"; if ((wordcount < 3) || (wordcount > 4)) { cli_usage_out (word->pattern); parse_error = 1; goto out; } dict = dict_new (); if (!dict) goto out; if ((wordcount == 3) || !strcmp(words[3], "all")) { ret = dict_set_int32 (dict, "flags", (int32_t) GF_CLI_SYNC_ALL); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "failed to set" "flag"); goto out; } } else { ret = dict_set_str (dict, "volname", (char *) words[3]); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "failed to set " "volume"); goto out; } } ret = dict_set_str (dict, "hostname", (char *) words[2]); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "failed to set hostname"); goto out; } if (!(state->mode & GLUSTER_MODE_SCRIPT)) { answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SYNC_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; CLI_LOCAL_INIT (local, words, frame, dict); if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume sync failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_create_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; char *brick_list = NULL; int32_t brick_count = 0; int32_t sub_count = 0; int32_t type = GF_CLUSTER_TYPE_NONE; cli_local_t *local = NULL; char *trans_type = NULL; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CREATE_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_create_parse (state, words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = dict_get_str (options, "transport", &trans_type); if (ret) { gf_log("cli", GF_LOG_ERROR, "Unable to get transport type"); goto out; } if (state->mode & GLUSTER_MODE_WIGNORE) { ret = dict_set_int32 (options, "force", _gf_true); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set force " "option"); goto out; } } CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume create failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; char *volname = NULL; gf_answer_t answer = GF_ANSWER_NO; const char *question = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; dict_t *dict = NULL; question = "Deleting volume will erase all information about the volume. " "Do you want to continue?"; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DELETE_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; dict = dict_new (); if (!dict) goto out; if (wordcount != 3) { cli_usage_out (word->pattern); parse_error = 1; goto out; } volname = (char *)words[2]; ret = dict_set_str (dict, "volname", volname); if (ret) { gf_log (THIS->name, GF_LOG_WARNING, "dict set failed"); goto out; } if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) { question = "Deleting the shared storage volume" "(gluster_shared_storage), will affect features " "like snapshot scheduler, geo-replication " "and NFS-Ganesha. Do you still want to " "continue?"; } answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } CLI_LOCAL_INIT (local, words, frame, dict); if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume delete failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_start_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; int sent = 0; int parse_error = 0; dict_t *dict = NULL; int flags = 0; cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if (wordcount < 3 || wordcount > 4) { cli_usage_out (word->pattern); parse_error = 1; goto out; } dict = dict_new (); if (!dict) { goto out; } if (!words[2]) goto out; ret = dict_set_str (dict, "volname", (char *)words[2]); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "dict set failed"); goto out; } if (wordcount == 4) { if (!strcmp("force", words[3])) { flags |= GF_CLI_FLAG_OP_FORCE; } else { ret = -1; cli_usage_out (word->pattern); parse_error = 1; goto out; } } ret = dict_set_int32 (dict, "flags", flags); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "dict set failed"); goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_START_VOLUME]; CLI_LOCAL_INIT (local, words, frame, dict); if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume start failed"); } CLI_STACK_DESTROY (frame); return ret; } gf_answer_t cli_cmd_get_confirmation (struct cli_state *state, const char *question) { char answer[5] = {'\0', }; char flush = '\0'; size_t len; if (state->mode & GLUSTER_MODE_SCRIPT) return GF_ANSWER_YES; printf ("%s (y/n) ", question); if (fgets (answer, 4, stdin) == NULL) { cli_out("gluster cli read error"); goto out; } len = strlen (answer); if (len && answer [len - 1] == '\n'){ answer [--len] = '\0'; } else { do{ flush = getchar (); }while (flush != '\n'); } if (len > 3) goto out; if (!strcasecmp (answer, "y") || !strcasecmp (answer, "yes")) return GF_ANSWER_YES; else if (!strcasecmp (answer, "n") || !strcasecmp (answer, "no")) return GF_ANSWER_NO; out: cli_out ("Invalid input, please enter y/n"); return GF_ANSWER_NO; } int cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; int flags = 0; gf_answer_t answer = GF_ANSWER_NO; int sent = 0; int parse_error = 0; dict_t *dict = NULL; char *volname = NULL; cli_local_t *local = NULL; const char *question = "Stopping volume will make its data inaccessible. " "Do you want to continue?"; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if (wordcount < 3 || wordcount > 4) { cli_usage_out (word->pattern); parse_error = 1; goto out; } volname = (char*) words[2]; dict = dict_new (); ret = dict_set_str (dict, "volname", volname); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "dict set failed"); goto out; } if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) { question = "Stopping the shared storage volume" "(gluster_shared_storage), will affect features " "like snapshot scheduler, geo-replication " "and NFS-Ganesha. Do you still want to " "continue?"; } if (wordcount == 4) { if (!strcmp("force", words[3])) { flags |= GF_CLI_FLAG_OP_FORCE; } else { ret = -1; cli_usage_out (word->pattern); parse_error = 1; goto out; } } ret = dict_set_int32 (dict, "flags", flags); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "dict set failed"); goto out; } answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STOP_VOLUME]; CLI_LOCAL_INIT (local, words, frame, dict); if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume stop on '%s' failed", volname); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_rename_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; int sent = 0; int parse_error = 0; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; dict = dict_new (); if (!dict) goto out; if (wordcount != 4) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = dict_set_str (dict, "old-volname", (char *)words[2]); if (ret) goto out; ret = dict_set_str (dict, "new-volname", (char *)words[3]); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RENAME_VOLUME]; if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (dict) dict_destroy (dict); if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume rename on '%s' failed", (char *)words[2]); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_defrag_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; #ifdef GF_SOLARIS_HOST_OS cli_out ("Command not supported on Solaris"); goto out; #endif frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_defrag_parse (words, wordcount, &dict); if (ret) { cli_usage_out (word->pattern); parse_error = 1; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEFRAG_VOLUME]; CLI_LOCAL_INIT (local, words, frame, dict); if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume rebalance failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_reset_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int sent = 0; int parse_error = 0; int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; cli_local_t *local = NULL; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_reset_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume reset failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_profile_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int sent = 0; int parse_error = 0; int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; cli_local_t *local = NULL; ret = cli_cmd_volume_profile_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PROFILE_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume profile failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int sent = 0; int parse_error = 0; int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; cli_local_t *local = NULL; char *op_errstr = NULL; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SET_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_set_parse (state, words, wordcount, &options, &op_errstr); if (ret) { if (op_errstr) { cli_err ("%s", op_errstr); GF_FREE (op_errstr); } else cli_usage_out (word->pattern); parse_error = 1; goto out; } CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume set failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_add_brick_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; gf_answer_t answer = GF_ANSWER_NO; cli_local_t *local = NULL; const char *question = "Changing the 'stripe count' of the volume is " "not a supported feature. In some cases it may result in data " "loss on the volume. Also there may be issues with regular " "filesystem operations on the volume after the change. Do you " "really want to continue with 'stripe' count option ? "; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_add_brick_parse (words, wordcount, &options, 0); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } /* TODO: there are challenges in supporting changing of stripe-count, until it is properly supported give warning to user */ if (dict_get (options, "stripe-count")) { answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } } if (state->mode & GLUSTER_MODE_WIGNORE) { ret = dict_set_int32 (options, "force", _gf_true); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set force " "option"); goto out; } } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_BRICK]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume add-brick failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_tier_validate_replica_type (dict_t *dict, int type) { int brick_count = -1; int replica_count = 1; int ret = -1; ret = dict_get_int32 (dict, "count", &brick_count); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get brick count"); goto out; } ret = dict_get_int32 (dict, "replica-count", &replica_count); if (ret) { gf_log ("cli", GF_LOG_DEBUG, "Failed to get replica count. " "Defaulting to one"); replica_count = 1; } /* * Change the calculation of sub_count once attach-tier support * disperse volume. * sub_count = disperse_count for disperse volume * */ if (brick_count % replica_count) { if (type == GF_CLUSTER_TYPE_REPLICATE) cli_err ("number of bricks is not a multiple of " "replica count"); else if (type == GF_CLUSTER_TYPE_DISPERSE) cli_err ("number of bricks is not a multiple of " "disperse count"); else cli_err ("number of bricks given doesn't match " "required count"); ret = -1; goto out; } ret = 0; out: return ret; } int do_cli_cmd_volume_attach_tier (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; int type = 0; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_add_brick_parse (words, wordcount, &options, &type); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } /* * Merge this check when attach-tier has it's own cli parse function. */ ret = cli_tier_validate_replica_type (options, type); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } if (state->mode & GLUSTER_MODE_WIGNORE) { ret = dict_set_int32 (options, "force", _gf_true); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set force " "option"); goto out; } } ret = dict_set_int32 (options, "attach-tier", 1); if (ret) goto out; ret = dict_set_int32 (options, "hot-type", type); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ATTACH_TIER]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("attach-tier failed"); } CLI_STACK_DESTROY (frame); return ret; } int do_cli_cmd_volume_detach_tier (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; gf_answer_t answer = GF_ANSWER_NO; cli_local_t *local = NULL; int need_question = 0; const char *question = "Removing tier can result in data loss. " "Do you want to Continue?"; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_detach_tier_parse(words, wordcount, &options, &need_question); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = dict_set_int32 (options, "force", 1); if (ret) goto out; ret = dict_set_int32 (options, "count", 0); if (ret) goto out; if (!(state->mode & GLUSTER_MODE_SCRIPT) && need_question) { /* we need to ask question only in case of 'commit or force' */ answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DETACH_TIER]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume detach-tier failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_tier_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; call_frame_t *frame = NULL; dict_t *options = NULL; char *volname = NULL; rpc_clnt_procedure_t *proc = NULL; cli_local_t *local = NULL; int i = 0; if (wordcount < 4) { cli_usage_out (word->pattern); if (wordcount == 3 && !strcmp(words[2], "help")) ret = 0; goto out; } if (!strcmp(words[1], "detach-tier")) { ret = do_cli_cmd_volume_detach_tier (state, word, words, wordcount); goto out; } else if (!strcmp(words[3], "detach")) { for (i = 3; i < wordcount; i++) words[i] = words[i+1]; ret = do_cli_cmd_volume_detach_tier (state, word, words, wordcount-1); goto out; } else if (!strcmp(words[1], "attach-tier")) { ret = do_cli_cmd_volume_attach_tier (state, word, words, wordcount); goto out; } else if (!strcmp(words[3], "attach")) { for (i = 3; i < wordcount; i++) words[i] = words[i+1]; ret = do_cli_cmd_volume_attach_tier (state, word, words, wordcount-1); goto out; } ret = cli_cmd_volume_tier_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_TIER]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_out ("Tier command failed"); } if (options) dict_unref (options); return ret; } static int gf_cli_create_auxiliary_mount (char *volname) { int ret = -1; char mountdir[PATH_MAX] = {0,}; char pidfile_path[PATH_MAX] = {0,}; char logfile[PATH_MAX] = {0,}; char qpid [16] = {0,}; char *sockpath = NULL; GLUSTERFS_GET_AUX_MOUNT_PIDFILE (pidfile_path, volname); if (gf_is_service_running (pidfile_path, NULL)) { gf_log ("cli", GF_LOG_DEBUG, "Aux mount of volume %s is running" " already", volname); ret = 0; goto out; } GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mountdir, volname, "/"); ret = sys_mkdir (mountdir, 0777); if (ret && errno != EEXIST) { gf_log ("cli", GF_LOG_ERROR, "Failed to create auxiliary mount " "directory %s. Reason : %s", mountdir, strerror (errno)); goto out; } snprintf (logfile, PATH_MAX-1, "%s/quota-mount-%s.log", DEFAULT_LOG_FILE_DIRECTORY, volname); snprintf(qpid, 15, "%d", GF_CLIENT_PID_QUOTA_MOUNT); if (global_state->glusterd_sock) { sockpath = global_state->glusterd_sock; } else { sockpath = DEFAULT_GLUSTERD_SOCKFILE; } ret = runcmd (SBIN_DIR"/glusterfs", "--volfile-server", sockpath, "--volfile-server-transport", "unix", "--volfile-id", volname, "-l", logfile, "-p", pidfile_path, "--client-pid", qpid, mountdir, NULL); if (ret) { gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs " "client. Please check the log file %s for more details", logfile); ret = -1; goto out; } ret = 0; out: return ret; } static int cli_stage_quota_op (char *volname, int op_code) { int ret = -1; switch (op_code) { case GF_QUOTA_OPTION_TYPE_ENABLE: case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE: case GF_QUOTA_OPTION_TYPE_LIMIT_OBJECTS: case GF_QUOTA_OPTION_TYPE_REMOVE: case GF_QUOTA_OPTION_TYPE_REMOVE_OBJECTS: case GF_QUOTA_OPTION_TYPE_LIST: ret = gf_cli_create_auxiliary_mount (volname); if (ret) { cli_err ("quota: Could not start quota " "auxiliary mount"); goto out; } ret = 0; break; default: ret = 0; break; } out: return ret; } int cli_get_soft_limit (dict_t *options, const char **words, dict_t *xdata) { call_frame_t *frame = NULL; cli_local_t *local = NULL; rpc_clnt_procedure_t *proc = NULL; char *default_sl = NULL; char *default_sl_dup = NULL; int ret = -1; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) { ret = -1; goto out; } //We need a ref on @options to prevent CLI_STACK_DESTROY //from destroying it prematurely. dict_ref (options); CLI_LOCAL_INIT (local, words, frame, options); proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA]; ret = proc->fn (frame, THIS, options); ret = dict_get_str (options, "default-soft-limit", &default_sl); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get default soft limit"); goto out; } default_sl_dup = gf_strdup (default_sl); if (!default_sl_dup) { ret = -1; goto out; } ret = dict_set_dynstr (xdata, "default-soft-limit", default_sl_dup); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set default soft limit"); GF_FREE (default_sl_dup); goto out; } out: CLI_STACK_DESTROY (frame); return ret; } /* Checks if at least one limit has been set on the volume * * Returns true if at least one limit is set. Returns false otherwise. */ gf_boolean_t _limits_set_on_volume (char *volname, int type) { gf_boolean_t limits_set = _gf_false; int ret = -1; char quota_conf_file[PATH_MAX] = {0,}; int fd = -1; char buf[16] = {0,}; float version = 0.0f; char gfid_type_stored = 0; char gfid_type = 0; /* TODO: fix hardcoding; Need to perform an RPC call to glusterd * to fetch working directory */ snprintf (quota_conf_file, sizeof quota_conf_file, "%s/vols/%s/quota.conf", GLUSTERD_DEFAULT_WORKDIR, volname); fd = open (quota_conf_file, O_RDONLY); if (fd == -1) goto out; ret = quota_conf_read_version (fd, &version); if (ret) goto out; if (type == GF_QUOTA_OPTION_TYPE_LIST) gfid_type = GF_QUOTA_CONF_TYPE_USAGE; else gfid_type = GF_QUOTA_CONF_TYPE_OBJECTS; /* Try to read atleast one gfid of type 'gfid_type' */ while (1) { ret = quota_conf_read_gfid (fd, buf, &gfid_type_stored, version); if (ret <= 0) break; if (gfid_type_stored == gfid_type) { limits_set = _gf_true; break; } } out: if (fd != -1) sys_close (fd); return limits_set; } /* Checks if the mount is connected to the bricks * * Returns true if connected and false if not */ gf_boolean_t _quota_aux_mount_online (char *volname) { int ret = 0; char mount_path[PATH_MAX + 1] = {0,}; struct stat buf = {0,}; GF_ASSERT (volname); /* Try to create the aux mount before checking if bricks are online */ ret = gf_cli_create_auxiliary_mount (volname); if (ret) { cli_err ("quota: Could not start quota auxiliary mount"); return _gf_false; } GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mount_path, volname, "/"); ret = sys_stat (mount_path, &buf); if (ret) { if (ENOTCONN == errno) { cli_err ("quota: Cannot connect to bricks. Check if " "bricks are online."); } else { cli_err ("quota: Error on quota auxiliary mount (%s).", strerror (errno)); } return _gf_false; } return _gf_true; } int cli_cmd_quota_handle_list_all (const char **words, dict_t *options) { int all_failed = 1; int count = 0; int ret = -1; rpc_clnt_procedure_t *proc = NULL; cli_local_t *local = NULL; call_frame_t *frame = NULL; dict_t *xdata = NULL; char *gfid_str = NULL; char *volname = NULL; char *volname_dup = NULL; unsigned char buf[16] = {0}; int fd = -1; char quota_conf_file[PATH_MAX] = {0}; gf_boolean_t xml_err_flag = _gf_false; char err_str[NAME_MAX] = {0,}; int32_t type = 0; char gfid_type = 0; float version = 0.0f; xdata = dict_new (); if (!xdata) { ret = -1; goto out; } ret = dict_get_str (options, "volname", &volname); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name"); goto out; } ret = dict_get_int32 (options, "type", &type); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get quota option type"); goto out; } ret = dict_set_int32 (xdata, "type", type); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set type in xdata"); goto out; } ret = cli_get_soft_limit (options, words, xdata); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to fetch default " "soft-limit"); goto out; } /* Check if at least one limit is set on volume. No need to check for * quota enabled as cli_get_soft_limit() handles that */ if (!_limits_set_on_volume (volname, type)) { snprintf (err_str, sizeof (err_str), "No%s quota configured on" " volume %s", (type == GF_QUOTA_OPTION_TYPE_LIST) ? "" : " inode", volname); if (global_state->mode & GLUSTER_MODE_XML) { xml_err_flag = _gf_true; } else { cli_out ("quota: %s", err_str); } ret = 0; goto out; } /* Check if the mount is online before doing any listing */ if (!_quota_aux_mount_online (volname)) { ret = -1; goto out; } frame = create_frame (THIS, THIS->ctx->pool); if (!frame) { ret = -1; goto out; } volname_dup = gf_strdup (volname); if (!volname_dup) { ret = -1; goto out; } ret = dict_set_dynstr (xdata, "volume-uuid", volname_dup); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set volume-uuid"); GF_FREE (volname_dup); goto out; } //TODO: fix hardcoding; Need to perform an RPC call to glusterd //to fetch working directory snprintf (quota_conf_file, sizeof quota_conf_file, "%s/vols/%s/quota.conf", GLUSTERD_DEFAULT_WORKDIR, volname); fd = open (quota_conf_file, O_RDONLY); if (fd == -1) { //This may because no limits were yet set on the volume gf_log ("cli", GF_LOG_TRACE, "Unable to open " "quota.conf"); ret = 0; goto out; } ret = quota_conf_read_version (fd, &version); if (ret) goto out; CLI_LOCAL_INIT (local, words, frame, xdata); proc = &cli_quotad_clnt.proctable[GF_AGGREGATOR_GETLIMIT]; gfid_str = GF_CALLOC (1, gf_common_mt_char, 64); if (!gfid_str) { ret = -1; goto out; } for (count = 0;; count++) { ret = quota_conf_read_gfid (fd, buf, &gfid_type, version); if (ret == 0) { break; } else if (ret < 0) { gf_log (THIS->name, GF_LOG_CRITICAL, "Quota " "configuration store may be corrupt."); goto out; } if ((type == GF_QUOTA_OPTION_TYPE_LIST && gfid_type == GF_QUOTA_CONF_TYPE_OBJECTS) || (type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS && gfid_type == GF_QUOTA_CONF_TYPE_USAGE)) continue; uuid_utoa_r (buf, gfid_str); ret = dict_set_str (xdata, "gfid", gfid_str); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to set gfid"); goto out; } ret = proc->fn (frame, THIS, xdata); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get quota " "limits for %s", uuid_utoa ((unsigned char*)buf)); } dict_del (xdata, "gfid"); all_failed = all_failed && ret; } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_quota_limit_list_end (local); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error in printing " "xml output"); goto out; } } if (count > 0) { ret = all_failed? -1: 0; } else { ret = 0; } out: if (xml_err_flag) { ret = cli_xml_output_str ("volQuota", NULL, -1, 0, err_str); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error outputting in " "xml format"); } } if (fd != -1) { sys_close (fd); } GF_FREE (gfid_str); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Could not fetch and display quota" " limits"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_bitrot_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; int parse_err = 0; call_frame_t *frame = NULL; dict_t *options = NULL; cli_local_t *local = NULL; rpc_clnt_procedure_t *proc = NULL; int sent = 0; ret = cli_cmd_bitrot_parse (words, wordcount, &options); if (ret < 0) { cli_usage_out (word->pattern); parse_err = 1; goto out; } frame = create_frame (THIS, THIS->ctx->pool); if (!frame) { ret = -1; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BITROT]; if (proc == NULL) { ret = -1; goto out; } CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_err == 0)) cli_err ("Bit rot command failed. Please check the cli " "logs for more details"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = 0; int parse_err = 0; int32_t type = 0; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; gf_answer_t answer = GF_ANSWER_NO; cli_local_t *local = NULL; int sent = 0; char *volname = NULL; const char *question = "Disabling quota will delete all the quota " "configuration. Do you want to continue?"; //parse **words into options dictionary if (strcmp (words[1], "inode-quota") == 0) { ret = cli_cmd_inode_quota_parse (words, wordcount, &options); if (ret < 0) { cli_usage_out (word->pattern); parse_err = 1; goto out; } } else { ret = cli_cmd_quota_parse (words, wordcount, &options); if (ret < 0) { cli_usage_out (word->pattern); parse_err = 1; goto out; } } ret = dict_get_int32 (options, "type", &type); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get opcode"); goto out; } //handle quota-disable and quota-list-all different from others switch (type) { case GF_QUOTA_OPTION_TYPE_DISABLE: answer = cli_cmd_get_confirmation (state, question); if (answer == GF_ANSWER_NO) goto out; break; case GF_QUOTA_OPTION_TYPE_LIST: case GF_QUOTA_OPTION_TYPE_LIST_OBJECTS: if (wordcount != 4) break; ret = cli_cmd_quota_handle_list_all (words, options); goto out; default: break; } ret = dict_get_str (options, "volname", &volname); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name"); goto out; } //create auxiliary mount need for quota commands that operate on path ret = cli_stage_quota_op (volname, type); if (ret) goto out; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) { ret = -1; goto out; } CLI_LOCAL_INIT (local, words, frame, options); proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA]; if (proc->fn) ret = proc->fn (frame, THIS, options); out: if (ret) { cli_cmd_sent_status_get (&sent); if (sent == 0 && parse_err == 0) cli_out ("Quota command failed. Please check the cli " "logs for more details"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_remove_brick_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; gf_answer_t answer = GF_ANSWER_NO; int sent = 0; int parse_error = 0; int need_question = 0; cli_local_t *local = NULL; char *volname = NULL; const char *question = "Removing brick(s) can result in data loss. " "Do you want to Continue?"; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_remove_brick_parse (words, wordcount, &options, &need_question); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = dict_get_str (options, "volname", &volname); if (ret || !volname) { gf_log ("cli", GF_LOG_ERROR, "Failed to fetch volname"); ret = -1; goto out; } if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) { question = "Removing brick from the shared storage volume" "(gluster_shared_storage), will affect features " "like snapshot scheduler, geo-replication " "and NFS-Ganesha. Do you still want to " "continue?"; need_question = _gf_true; } if (!(state->mode & GLUSTER_MODE_SCRIPT) && need_question) { /* we need to ask question only in case of 'commit or force' */ answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume remove-brick failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_replace_brick_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; #ifdef GF_SOLARIS_HOST_OS cli_out ("Command not supported on Solaris"); goto out; #endif proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REPLACE_BRICK]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_replace_brick_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume replace-brick failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_set_transport_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { cli_cmd_broadcast_response (0); return 0; } int cli_cmd_volume_top_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; ret = cli_cmd_volume_top_parse (words, wordcount, &options); if (ret) { parse_error = 1; cli_usage_out (word->pattern); goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_TOP_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume top failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_log_rotate_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; if (!((wordcount == 4) || (wordcount == 5))) { cli_usage_out (word->pattern); parse_error = 1; goto out; } if (!((strcmp ("rotate", words[2]) == 0) || (strcmp ("rotate", words[3]) == 0))) { cli_usage_out (word->pattern); parse_error = 1; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_ROTATE]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_log_rotate_parse (words, wordcount, &options); if (ret) goto out; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume log rotate failed"); } CLI_STACK_DESTROY (frame); return ret; } #if (SYNCDAEMON_COMPILE) static int cli_check_gsync_present () { char buff[PATH_MAX] = {0, }; runner_t runner = {0,}; char *ptr = NULL; int ret = 0; ret = setenv ("_GLUSTERD_CALLED_", "1", 1); if (-1 == ret) { gf_log ("", GF_LOG_WARNING, "setenv syscall failed, hence could" "not assert if geo-replication is installed"); goto out; } runinit (&runner); runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "--version", NULL); runner_redir (&runner, STDOUT_FILENO, RUN_PIPE); ret = runner_start (&runner); if (ret == -1) { gf_log ("", GF_LOG_INFO, "geo-replication not installed"); goto out; } ptr = fgets(buff, sizeof(buff), runner_chio (&runner, STDOUT_FILENO)); if (ptr) { if (!strstr (buff, "gsyncd")) { ret = -1; goto out; } } else { ret = -1; goto out; } ret = runner_end (&runner); if (ret) gf_log ("", GF_LOG_ERROR, "geo-replication not installed"); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret ? -1 : 0; } void cli_cmd_check_gsync_exists_cbk (struct cli_cmd *this) { int ret = 0; ret = cli_check_gsync_present (); if (ret) this->disable = _gf_true; } #endif int cli_cmd_volume_gsync_set_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = 0; int parse_err = 0; dict_t *options = NULL; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; cli_local_t *local = NULL; proc = &cli_rpc_prog->proctable [GLUSTER_CLI_GSYNC_SET]; frame = create_frame (THIS, THIS->ctx->pool); if (frame == NULL) { ret = -1; goto out; } ret = cli_cmd_gsync_set_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_err = 1; goto out; } CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) ret = proc->fn (frame, THIS, options); out: if (ret && parse_err == 0) cli_out (GEOREP" command failed"); CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_status_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; uint32_t cmd = 0; cli_local_t *local = NULL; ret = cli_cmd_volume_status_parse (words, wordcount, &dict); if (ret) { cli_usage_out (word->pattern); goto out; } ret = dict_get_uint32 (dict, "cmd", &cmd); if (ret) goto out; if (!(cmd & GF_CLI_STATUS_ALL)) { /* for one volume or brick */ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATUS_VOLUME]; } else { /* volume status all or all detail */ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATUS_ALL]; } if (!proc->fn) goto out; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; CLI_LOCAL_INIT (local, words, frame, dict); ret = proc->fn (frame, THIS, dict); out: CLI_STACK_DESTROY (frame); return ret; } int cli_get_detail_status (dict_t *dict, int i, cli_volume_status_t *status) { uint64_t free = 0; uint64_t total = 0; char key[1024] = {0}; int ret = 0; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.free", i); ret = dict_get_uint64 (dict, key, &free); status->free = gf_uint64_2human_readable (free); if (!status->free) goto out; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.total", i); ret = dict_get_uint64 (dict, key, &total); status->total = gf_uint64_2human_readable (total); if (!status->total) goto out; #ifdef GF_LINUX_HOST_OS memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.device", i); ret = dict_get_str (dict, key, &(status->device)); if (ret) status->device = NULL; #endif memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.block_size", i); ret = dict_get_uint64 (dict, key, &(status->block_size)); if (ret) { ret = 0; status->block_size = 0; } #ifdef GF_LINUX_HOST_OS memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.mnt_options", i); ret = dict_get_str (dict, key, &(status->mount_options)); if (ret) status->mount_options = NULL; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.fs_name", i); ret = dict_get_str (dict, key, &(status->fs_name)); if (ret) { ret = 0; status->fs_name = NULL; } memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.inode_size", i); ret = dict_get_str (dict, key, &(status->inode_size)); if (ret) status->inode_size = NULL; #endif /* GF_LINUX_HOST_OS */ memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.total_inodes", i); ret = dict_get_uint64 (dict, key, &(status->total_inodes)); if (ret) status->total_inodes = 0; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.free_inodes", i); ret = dict_get_uint64 (dict, key, &(status->free_inodes)); if (ret) { ret = 0; status->free_inodes = 0; } out: return ret; } void cli_print_detailed_status (cli_volume_status_t *status) { cli_out ("%-20s : %-20s", "Brick", status->brick); if (status->online) { cli_out ("%-20s : %-20d", "TCP Port", status->port); cli_out ("%-20s : %-20d", "RDMA Port", status->rdma_port); } else { cli_out ("%-20s : %-20s", "TCP Port", "N/A"); cli_out ("%-20s : %-20s", "RDMA Port", "N/A"); } cli_out ("%-20s : %-20c", "Online", (status->online) ? 'Y' : 'N'); cli_out ("%-20s : %-20s", "Pid", status->pid_str); #ifdef GF_LINUX_HOST_OS if (status->fs_name) cli_out ("%-20s : %-20s", "File System", status->fs_name); else cli_out ("%-20s : %-20s", "File System", "N/A"); if (status->device) cli_out ("%-20s : %-20s", "Device", status->device); else cli_out ("%-20s : %-20s", "Device", "N/A"); if (status->mount_options) { cli_out ("%-20s : %-20s", "Mount Options", status->mount_options); } else { cli_out ("%-20s : %-20s", "Mount Options", "N/A"); } if (status->inode_size) { cli_out ("%-20s : %-20s", "Inode Size", status->inode_size); } else { cli_out ("%-20s : %-20s", "Inode Size", "N/A"); } #endif if (status->free) cli_out ("%-20s : %-20s", "Disk Space Free", status->free); else cli_out ("%-20s : %-20s", "Disk Space Free", "N/A"); if (status->total) cli_out ("%-20s : %-20s", "Total Disk Space", status->total); else cli_out ("%-20s : %-20s", "Total Disk Space", "N/A"); if (status->total_inodes) { cli_out ("%-20s : %-20"GF_PRI_INODE, "Inode Count", status->total_inodes); } else { cli_out ("%-20s : %-20s", "Inode Count", "N/A"); } if (status->free_inodes) { cli_out ("%-20s : %-20"GF_PRI_INODE, "Free Inodes", status->free_inodes); } else { cli_out ("%-20s : %-20s", "Free Inodes", "N/A"); } } int cli_print_brick_status (cli_volume_status_t *status) { int fieldlen = CLI_VOL_STATUS_BRICK_LEN; int bricklen = 0; char *p = NULL; int num_spaces = 0; p = status->brick; bricklen = strlen (p); while (bricklen > 0) { if (bricklen > fieldlen) { cli_out ("%.*s", fieldlen, p); p += fieldlen; bricklen -= fieldlen; } else { num_spaces = (fieldlen - bricklen) + 1; printf ("%s", p); while (num_spaces-- != 0) printf (" "); if (status->port || status->rdma_port) { if (status->online) cli_out ("%-10d%-11d%-8c%-5s", status->port, status->rdma_port, status->online?'Y':'N', status->pid_str); else cli_out ("%-10s%-11s%-8c%-5s", "N/A", "N/A", status->online?'Y':'N', status->pid_str); } else cli_out ("%-10s%-11s%-8c%-5s", "N/A", "N/A", status->online?'Y':'N', status->pid_str); bricklen = 0; } } return 0; } #define NEEDS_GLFS_HEAL(op) ((op == GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE) || \ (op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) || \ (op == GF_SHD_OP_INDEX_SUMMARY) || \ (op == GF_SHD_OP_SPLIT_BRAIN_FILES)) int cli_launch_glfs_heal (int heal_op, dict_t *options) { char buff[PATH_MAX] = {0}; runner_t runner = {0}; char *filename = NULL; char *hostname = NULL; char *path = NULL; char *volname = NULL; char *out = NULL; int ret = 0; runinit (&runner); ret = dict_get_str (options, "volname", &volname); runner_add_args (&runner, SBIN_DIR"/glfsheal", volname, NULL); runner_redir (&runner, STDOUT_FILENO, RUN_PIPE); switch (heal_op) { case GF_SHD_OP_INDEX_SUMMARY: break; case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE: ret = dict_get_str (options, "file", &filename); runner_add_args (&runner, "bigger-file", filename, NULL); break; case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK: ret = dict_get_str (options, "heal-source-hostname", &hostname); ret = dict_get_str (options, "heal-source-brickpath", &path); runner_add_args (&runner, "source-brick", NULL); runner_argprintf (&runner, "%s:%s", hostname, path); if (dict_get_str (options, "file", &filename) == 0) runner_argprintf (&runner, filename); break; case GF_SHD_OP_SPLIT_BRAIN_FILES: runner_add_args (&runner, "split-brain-info", NULL); break; default: ret = -1; } ret = runner_start (&runner); if (ret == -1) goto out; while ((out = fgets (buff, sizeof(buff), runner_chio (&runner, STDOUT_FILENO)))) { printf ("%s", out); } ret = runner_end (&runner); ret = WEXITSTATUS (ret); out: return ret; } int cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; int sent = 0; int parse_error = 0; dict_t *options = NULL; xlator_t *this = NULL; cli_local_t *local = NULL; int heal_op = 0; this = THIS; frame = create_frame (this, this->ctx->pool); if (!frame) goto out; if (wordcount < 3) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = cli_cmd_volume_heal_options_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = dict_get_int32 (options, "heal-op", &heal_op); if (ret < 0) goto out; if (NEEDS_GLFS_HEAL (heal_op)) { ret = cli_launch_glfs_heal (heal_op, options); if (ret == -1) goto out; } else { proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume heal failed."); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_statedump_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if (wordcount < 3) { cli_usage_out (word->pattern); parse_error = 1; goto out; } if (wordcount >= 3) { ret = cli_cmd_volume_statedump_options_parse (words, wordcount, &options); if (ret) { parse_error = 1; gf_log ("cli", GF_LOG_ERROR, "Error parsing " "statedump options"); cli_out ("Error parsing options"); cli_usage_out (word->pattern); } } ret = dict_set_str (options, "volname", (char *)words[2]); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATEDUMP_VOLUME]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume statedump failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_list_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; call_frame_t *frame = NULL; rpc_clnt_procedure_t *proc = NULL; int sent = 0; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LIST_VOLUME]; if (proc->fn) { ret = proc->fn (frame, THIS, NULL); } out: if (ret) { cli_cmd_sent_status_get (&sent); if (sent == 0) cli_out ("Volume list failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_clearlocks_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if (wordcount < 7 || wordcount > 8) { cli_usage_out (word->pattern); parse_error = 1; goto out; } ret = cli_cmd_volume_clrlks_opts_parse (words, wordcount, &options); if (ret) { parse_error = 1; gf_log ("cli", GF_LOG_ERROR, "Error parsing " "clear-locks options"); cli_out ("Error parsing options"); cli_usage_out (word->pattern); } ret = dict_set_str (options, "volname", (char *)words[2]); if (ret) goto out; ret = dict_set_str (options, "path", (char *)words[3]); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CLRLOCKS_VOLUME]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume clear-locks failed"); } CLI_STACK_DESTROY (frame); return ret; } int cli_cmd_volume_barrier_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if (wordcount != 4) { cli_usage_out (word->pattern); parse_error = 1; goto out; } options = dict_new(); if (!options) { ret = -1; goto out; } ret = dict_set_str(options, "volname", (char *)words[2]); if (ret) goto out; ret = dict_set_str (options, "barrier", (char *)words[3]); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BARRIER_VOLUME]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) ret = proc->fn (frame, THIS, options); out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_err ("Volume barrier failed"); } CLI_STACK_DESTROY (frame); if (options) dict_unref (options); return ret; } int cli_cmd_volume_getopt_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_err = 0; cli_local_t *local = NULL; if (wordcount != 4) { cli_usage_out (word->pattern); parse_err = 1; goto out; } frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; options = dict_new (); if (!options) goto out; ret = dict_set_str (options, "volname", (char *)words[2]); if (ret) goto out; ret = dict_set_str (options, "key", (char *)words[3]); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOL_OPT]; CLI_LOCAL_INIT (local, words, frame, options); if (proc->fn) ret = proc->fn (frame, THIS, options); out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_err == 0)) cli_err ("Volume get option failed"); } CLI_STACK_DESTROY (frame); if (options) dict_unref (options); return ret; } struct cli_cmd volume_cmds[] = { { "volume info [all|]", cli_cmd_volume_info_cbk, "list information of all volumes"}, { "volume create [stripe ] " "[replica [arbiter ]] " "[disperse []] [disperse-data ] [redundancy ] " "[transport ] " #ifdef HAVE_BD_XLATOR "?" #endif "... [force]", cli_cmd_volume_create_cbk, "create a new volume of specified type with mentioned bricks"}, { "volume delete ", cli_cmd_volume_delete_cbk, "delete volume specified by "}, { "volume start [force]", cli_cmd_volume_start_cbk, "start volume specified by "}, { "volume stop [force]", cli_cmd_volume_stop_cbk, "stop volume specified by "}, /*{ "volume rename ", cli_cmd_volume_rename_cbk, "rename volume to "},*/ { "volume tier status\n" "volume tier start [force]" "volume tier attach [] ...\n" "volume tier detach \n", cli_cmd_volume_tier_cbk, "Tier translator specific operations."}, { "volume attach-tier [] ...", cli_cmd_volume_tier_cbk, "NOTE: this is old syntax, will be depreciated in next release. " "Please use gluster volume tier attach " "[] ..."}, { "volume detach-tier " " ", cli_cmd_volume_tier_cbk, "NOTE: this is old syntax, will be depreciated in next release. " "Please use gluster volume tier detach " "{start|stop|commit} [force]"}, { "volume add-brick [ ] ... [force]", cli_cmd_volume_add_brick_cbk, "add brick to volume "}, { "volume remove-brick [replica ] ..." " ", cli_cmd_volume_remove_brick_cbk, "remove brick from volume "}, { "volume rebalance {{fix-layout start} | {start [force]|stop|status}}", cli_cmd_volume_defrag_cbk, "rebalance operations"}, { "volume replace-brick " "{commit force}", cli_cmd_volume_replace_brick_cbk, "replace-brick operations"}, /*{ "volume set-transport [] ...", cli_cmd_volume_set_transport_cbk, "set transport type for volume "},*/ { "volume set ", cli_cmd_volume_set_cbk, "set options for volume "}, { "volume help", cli_cmd_volume_help_cbk, "display help for the volume command"}, { "volume log rotate [BRICK]", cli_cmd_log_rotate_cbk, "rotate the log file for corresponding volume/brick"}, { "volume log rotate [BRICK]", cli_cmd_log_rotate_cbk, "rotate the log file for corresponding volume/brick" " NOTE: This is an old syntax, will be deprecated from next release."}, { "volume sync [all|]", cli_cmd_sync_volume_cbk, "sync the volume information from a peer"}, { "volume reset [option] [force]", cli_cmd_volume_reset_cbk, "reset all the reconfigured options"}, #if (SYNCDAEMON_COMPILE) {"volume "GEOREP" [] [] {create [[ssh-port n] [[no-verify]|[push-pem]]] [force]" "|start [force]|stop [force]|pause [force]|resume [force]|config|status [detail]|delete} [options...]", cli_cmd_volume_gsync_set_cbk, "Geo-sync operations", cli_cmd_check_gsync_exists_cbk}, #endif { "volume profile {start|info [peek|incremental [peek]|cumulative|clear]|stop} [nfs]", cli_cmd_volume_profile_cbk, "volume profile operations"}, { "volume quota {enable|disable|list [ ...]| " "list-objects [ ...] | remove | remove-objects | " "default-soft-limit } |\n" "volume quota {limit-usage []} |\n" "volume quota {limit-objects []} |\n" "volume quota {alert-time|soft-timeout|hard-timeout} {