summaryrefslogtreecommitdiffstats
path: root/sanity
diff options
context:
space:
mode:
Diffstat (limited to 'sanity')
-rwxr-xr-xsanity/dev_sanity/counter.sh17
-rw-r--r--sanity/dev_sanity/creat_struct.sh11
-rwxr-xr-xsanity/dev_sanity/inotify.c94
-rwxr-xr-xsanity/dev_sanity/sanity_check.sh92
-rwxr-xr-xsanity/dev_sanity/sanity_test.sh464
-rw-r--r--sanity/dev_sanity/structure.txt187
6 files changed, 865 insertions, 0 deletions
diff --git a/sanity/dev_sanity/counter.sh b/sanity/dev_sanity/counter.sh
new file mode 100755
index 0000000..37df892
--- /dev/null
+++ b/sanity/dev_sanity/counter.sh
@@ -0,0 +1,17 @@
+#set -x
+#check for file named in 5-digit numbers and increment it - sanity_counter
+cd /sanity/test/counter
+ls [0-9][0-9][0-9][0-9][0-9] | while read fn;
+do
+id=$fn
+new=`expr $id + 1`
+rm $id
+echo $id #sanity_id
+if [ $new -eq 100000 ]
+ then
+ touch 10000
+
+ else
+ touch $new
+fi
+done
diff --git a/sanity/dev_sanity/creat_struct.sh b/sanity/dev_sanity/creat_struct.sh
new file mode 100644
index 0000000..9be6a68
--- /dev/null
+++ b/sanity/dev_sanity/creat_struct.sh
@@ -0,0 +1,11 @@
+mkdir -p /sanity/test
+mkdir -p /sanity/test/archive/{afr,dht,stripe}
+mkdir -p /sanity/test/build/{afr,dht,stripe}
+mkdir -p /sanity/test/counter
+mkdir -p /sanity/test/incoming
+mkdir -p /sanity/test/install/{afr,dht,stripe}
+mkdir -p /sanity/test/queue
+mkdir -p /sanity/test/results/{afr,dht,stripe}
+mkdir -p /sanity/test/spec
+mkdir -p /sanity/test/tarball
+mkdir -p /sanity/test/trash
diff --git a/sanity/dev_sanity/inotify.c b/sanity/dev_sanity/inotify.c
new file mode 100755
index 0000000..5e2311f
--- /dev/null
+++ b/sanity/dev_sanity/inotify.c
@@ -0,0 +1,94 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/inotify.h>
+/* This will watch for any new arrivals at TARGET directory and if found,it will invoke the SANITY_TEST script. */
+
+#define TARGET "/sanity/test/incoming"
+#define SANITY_TEST "/opt/qa/tools/dev_sanity/sanity_test.sh "
+
+void get_event (int fd, const char * target);
+
+
+/* ----------------------------------------------------------------- */
+
+int main (int argc, char *argv[])
+{
+ char target[FILENAME_MAX];
+ int result;
+ int fd;
+ int wd; /* watch descriptor */
+
+ if (argc < 2) {
+ strcpy (target,TARGET);
+
+ }
+ else {
+ fprintf (stderr, "Watching %s\n", TARGET);
+ strcpy (target, TARGET);
+ }
+
+ fd = inotify_init();
+ if (fd < 0) {
+ fprintf (stderr, "Error: %s\n", strerror(errno));
+ return 1;
+ }
+
+ wd = inotify_add_watch (fd, target, IN_ALL_EVENTS);
+ if (wd < 0) {
+ fprintf (stderr, "Error: %s\n", strerror(errno));
+ return 1;
+ }
+
+ while (1) {
+ get_event(fd, target);
+ }
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------- */
+/* Allow for 1024 simultanious events */
+#define BUFF_SIZE ((sizeof(struct inotify_event)+FILENAME_MAX)*1024)
+
+void get_event (int fd, const char * target)
+{
+ ssize_t len, i = 0;
+ int status;
+ char action[81+FILENAME_MAX] = {0};
+ char buff[BUFF_SIZE] = {0};
+
+ len = read (fd, buff, BUFF_SIZE);
+
+ while (i < len) {
+ struct inotify_event *pevent = (struct inotify_event *)&buff[i];
+ char action[81+FILENAME_MAX] = {0};
+
+ if (pevent->len)
+ strcpy (action, pevent->name);
+ else
+ strcpy (action, target);
+
+ if (pevent->mask & IN_CLOSE_WRITE){
+ strcat(action, " opened for writing was closed");
+ /*invoke the script to process newly arrived tar file*/
+ if (fork()==0){//child process.
+ printf("Child running...");
+ if(fork()==0){//grandchild
+ system(SANITY_TEST);
+ }else
+ exit(0);
+// waitpid(-1, &status, 0);
+ }
+ }
+// waitpid(-1,&status,0);
+ printf ("%s\n", action);
+ i += sizeof(struct inotify_event) + pevent->len;
+
+ }
+
+} /* get_event */
+
diff --git a/sanity/dev_sanity/sanity_check.sh b/sanity/dev_sanity/sanity_check.sh
new file mode 100755
index 0000000..ef783c1
--- /dev/null
+++ b/sanity/dev_sanity/sanity_check.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+set -xe
+#This script will perform following actions
+# a) create will tar file from developer's current working directory using aws ssh key from $KEY directory.
+# b) transfer the tar file to remote machine (REMOTE_SYS) for testing.
+# c) and archive the tar file.
+
+#
+#directories
+BASENAME=/sanity/test
+TARBALL_DIR=$BASENAME/tarball
+
+#tar file name
+TARFILE=glusterfs.tar
+BACKUP_DIR=$BASENAME/archive
+
+#logfile
+LOG_FILE=$BASENAME/sanity.log
+
+#glusterfs test machine
+REMOTE_USER=root
+REMOTE_SYS="192.168.1.85" #dev-sanity #ec2-174-129-181-3.compute-1.amazonaws.com
+REMOTE_DIR=/sanity/test/incoming
+REMOTE_SYS1="10.1.12.191"
+REMOTE_SYS2="10.1.12.192"
+#aws key file path
+KEY=~
+
+#transfer the file.
+function file_transfer(){
+echo "coping file $TARBALL_DIR/$TARFILE to remote system $REMOTE_USER@$REMOTE_SYS" >> $LOG_FILE 2>&1
+TARFILE=`ls -tr $TARBALL_DIR/ | head -1`
+echo "Got the file $TARFILE" >> $LOG_FILE 2>&1
+
+#while copying to remote directory with name of translator.
+echo "doing scp $TARBALL_DIR/$TARFILE $REMOTE_USER@$REMOTE_SYS:/$REMOTE_DIR/$translator$usr`hostname` .tar" >> $LOG_FILE 2>&1
+remote_file=$translator"_"$usr"_"`hostname`.tar
+#scp -i $KEY/gluster.pem
+
+if [ $translator == "afr" ];then
+scp $TARBALL_DIR/$TARFILE $REMOTE_USER@$REMOTE_SYS:/$REMOTE_DIR/$remote_file
+fi
+if [ $translator == "dht" ];then
+scp $TARBALL_DIR/$TARFILE $REMOTE_USER@$REMOTE_SYS1:/$REMOTE_DIR/$remote_file
+fi
+if [ $translator == "stripe" ];then
+scp $TARBALL_DIR/$TARFILE $REMOTE_USER@$REMOTE_SYS2:/$REMOTE_DIR/$remote_file
+fi
+
+echo "archive the tar file" >> $LOG_FILE 2>&1
+mkdir -vp $BACKUP_DIR/`date +%m_%d_%y` >> $LOG_FILE 2>&1
+mv -v $TARBALL_DIR/$TARFILE $BACKUP_DIR/`date +%m_%d`/$remote_file.`date +%T`.gz >> $LOG_FILE 2>&1
+
+}
+
+function usage_help(){
+ echo "usage: sanity_check.sh <check-value> <gluster-mail-id>"
+ echo "<check-value> can be one of following three values ,afr or dht or stripe"
+ echo "example : sanity_check.sh afr user@gluster.com"
+ exit;
+}
+
+# Main part
+translator=$1
+#get user name
+usr=`echo $2 | awk '{split($0,array,"@")} END{print array[1]}'`
+echo $usr
+
+
+if [ ! $# -eq 2 ]
+ then
+ usage_help
+ fi
+[ $translator != afr ] && [ $translator != dht ] && [ $translator != stripe ] && echo "Invalid option." && usage_help
+
+#if required directories not exists creat them.
+mkdir -p $TARBALL_DIR
+mkdir -p $BACKUP_DIR
+echo "Creating tar file .."
+git archive --format=tar HEAD > $TARBALL_DIR/$TARFILE
+echo "done"
+#transfer the tar file.
+echo "transferring tar file.."
+file_transfer
+
+if [ $? -eq 0 ]
+ then
+ echo "File transferred successfully,logs will be sent to $2"
+ fi
+
+
+
diff --git a/sanity/dev_sanity/sanity_test.sh b/sanity/dev_sanity/sanity_test.sh
new file mode 100755
index 0000000..38a391d
--- /dev/null
+++ b/sanity/dev_sanity/sanity_test.sh
@@ -0,0 +1,464 @@
+#!/bin/bash
+set -x
+
+#Note : This script will be invoked from inotify.c program as soon as any new files created under /sanity/test/incoming directory.
+#also note , incoming tar file will be in format <translatorName_glusterUsername_developerHostname.tar> For ex:afr_lakshmipathi_entropy.tar
+
+# This script will pick up file from /sanity/test/incoming directory and performs following tasks
+# a) Check whether no process is running,if so then move the tar file from queue to build directory ,extract it and build.
+# If process already running or build directory is not empty or mount point already in use,sleep 60 seconds & and check again.
+# b) then start gluster and mount it on client mount point.
+# c) finally perform QA test on mount point.
+# d) mail the results
+
+
+
+
+
+#directories
+BASENAME=/sanity/test
+INCOMING_DIR=$BASENAME/incoming #where new tar will be scp'ed from remote machines.
+BUILD_DIR=$BASENAME/build #where glusterfs build is done.
+RESULTS_DIR=$BASENAME/results # final QA test results will be available.
+QUEUE_DIR=$BASENAME/queue #tar files will be queued here first,before moving to build dir.
+MOUNT_PT=/export/sanity #sanity test mount point
+#RESULTS_DIR=$MOUNT_PT/results
+
+
+#Standard volume files will be available under VOL_DIR
+VOL_DIR=$BASENAME/spec
+# for afr
+AFR_CLIENT_VOL=$VOL_DIR/afr_client.vol
+AFR_SERVER_VOL=$VOL_DIR/afr_server.vol
+#for stripe
+STRIPE_CLIENT_VOL=$VOL_DIR/stripe_client.vol
+STRIPE_SERVER_VOL=$VOL_DIR/stripe_server.vol
+#for dht
+DHT_CLIENT_VOL=$VOL_DIR/dht_client.vol
+DHT_SERVER_VOL=$VOL_DIR/dht_server.vol
+
+#glusterfs installation path
+INSTALL_DIR=$BASENAME/install
+TRASH_DIR=$BASENAME/trash
+
+#sanity script logfile
+LOG_FILE=$BASENAME/sanity.log
+
+#qa tools path
+QA_DIR=/opt/qa/tools/system_light
+QA_TOOLS=/opt/qa/tools/system_light/run.sh
+
+
+########################################### functions###########################################
+
+
+
+#move the tar file from INCOMING_DIR to QUEUE_DIR.
+function mv_incoming_queue(){
+echo "mv_incoming_queue:==>start" >> $LOG_FILE 2>&1
+
+tarball=`ls -tr $INCOMING_DIR | head -1`
+echo "mv_incoming_queue:Moving tar file from $INCOMING_DIR/$tarball to $QUEUE_DIR" >> $LOG_FILE 2>&1
+if [ -f $tarball ]
+then
+ echo " $tarball file exist" >> $LOG_FILE 2>&1
+else
+ echo "file not found " >> $LOG_FILE 2>&1
+fi
+
+#cut tar part from transltor - so that it could be used for vol.name,log file and mount point.
+translator_name=`echo $tarball | awk '{split($0,array,"_")} END{print array[1]}'`
+
+#before moving to tar to queue , check whether any file with same name exists - if so ,then add inode that file.
+if [ -f $QUEUE_DIR/$tarball ]
+then
+ echo " $tarball file exist.move it queue after renaming it." >> $LOG_FILE 2>&1
+ mv $INCOMING_DIR/$tarball $QUEUE_DIR/`echo $tarball | awk '{split($0,array,".")} END{print array[1]}'`.`ls -i $INCOMING_DIR/$tarball | cut -d' ' -f1`.tgz
+else
+mv $INCOMING_DIR/$tarball $QUEUE_DIR
+fi
+
+echo "mv_incoming_queue:==> exit " >> $LOG_FILE 2>&1
+}
+
+
+
+
+
+#move file from QUEUE_DIR to BUILD_DIR
+function mv_queue_build(){
+echo "mv_queue_build:==>start" >> $LOG_FILE 2>&1
+echo "mv_queue_build:Moving tar file from $QUEUE_DIR to build " >> $LOG_FILE 2>&1
+
+tarball=`ls -tr $QUEUE_DIR | head -1`
+if [ -f $1 ]
+then
+ echo " $tarball file exist" >> $LOG_FILE 2>&1
+else
+ echo "file not found " >> $LOG_FILE 2>&1
+fi
+
+echo "moving $QUEUE_DIR/$tarball to $BUILD_DIR/$translator_name" >> $LOG_FILE 2>&1
+mv -v $QUEUE_DIR/$tarball $BUILD_DIR/$translator_name >> $LOG_FILE 2>&1
+echo "mv_queue_build:==> exit " >> $LOG_FILE 2>&1
+}
+
+
+
+
+
+
+#check whether tar can be moved from Queue to build - to start build process.
+#So a) check mount point is free or not.b)glusterfs/d already running or not.c)Verify build directory ,ensure no other process started building.
+function is_ready(){
+echo "is_ready:==>start" >> $LOG_FILE 2>&1
+
+#is glusterfs client already mounted,for specific translator?
+echo "checking for $translator_name_client.vol"
+cat /etc/mtab | grep $translator_name"_client.vol"
+if [ $? -eq 0 ]
+then
+ echo " `date` : $translator_name Already mounted" >> $LOG_FILE 2>&1
+ echo " `date` : No free slot avail. for $translator_name"
+ sleep 60
+ is_ready
+else
+ echo "not mounted.proceed. " >> $LOG_FILE 2>&1
+ rm -rf $BASENAME/$translator_name"_mail.txt"
+ touch $BASENAME/$translator_name"_mail.txt"
+
+fi
+
+
+#is glusterfs and glusterfsd aready running for the given translator?
+for file in `pgrep glusterfs`
+do
+grep $translator_name"_client.vol" /proc/$file/cmdline
+if [ $? -eq 0 ]
+ then
+ echo "$translator_name client runnning with pid $file"
+ sleep 60
+ is_ready
+ fi
+grep $translator_name"_server.vol" /proc/$file/cmdline
+if [ $? -eq 0 ]
+ then
+ echo "$translator_name server runnning with pid $file"
+ sleep 60
+ is_ready
+ fi
+done
+
+#glusterfs not running and not mounted but it's building.
+if [ "$(ls -A $BUILD_DIR/$translator_name)" ]; then
+ echo "$BUILD_DIR/$translator_name is not Empty - Some other process is building glusterfs" >> $LOG_FILE 2>&1
+ sleep 60
+ is_ready
+else
+ echo "$BUILD_DIR/$translator_name is Empty. Proceed." >> $LOG_FILE 2>&1
+fi
+echo "is_ready:==> exit " >> $LOG_FILE 2>&1
+}
+
+
+
+
+#Build glusterfs from src
+function build_glusterfs(){
+echo " build_glusterfs:==>start" >> $LOG_FILE 2>&1
+echo " moving to $BUILD_DIR/$translator_name and extract $tarball" >> $LOG_FILE 2>&1
+cd $BUILD_DIR/$translator_name && tar -xvf $tarball
+#buildflag - status of build
+buildflag='y'
+#run autogen.sh
+./autogen.sh
+echo "perform cmm" >> $LOG_FILE 2>&1
+./configure --prefix $INSTALL_DIR/$translator_name
+make && make install
+if [ $? -eq 0 ]
+then
+ echo "build successful." >> $LOG_FILE 2>&1
+else
+ echo "build not successful. " >> $LOG_FILE 2>&1
+ buildflag='n'
+fi
+echo " build_glusterfs:==> exit " >> $LOG_FILE 2>&1
+}
+
+
+
+
+
+#make sure standard volume files already present on the system.-check for missing volume files.
+function check_vol_files(){
+MISSING_FILES='n'
+ [ ! -f $AFR_CLIENT_VOL ] && echo "$AFR_CLIENT_VOL not found " && MISSING_FILES='y'
+ [ ! -f $AFR_SERVER_VOL ] && echo "$AFR_SERVER_VOL not found" && MISSING_FILES='y'
+ [ ! -f $STRIPE_CLIENT_VOL ] && echo "$STRIPE_CLIENT_VOL not found" && MISSING_FILES='y'
+ [ ! -f $STRIPE_SERVER_VOL ] && echo "$STRIPE_SERVER_VOL not found" && MISSING_FILES='y'
+ [ ! -f $DHT_CLIENT_VOL ] && echo "$DHT_CLIENT_VOL not found" && MISSING_FILES='y'
+ [ ! -f $DHT_SERVER_VOL ] && echo "$DHT_SERVER_VOL not found" && MISSING_FILES='y'
+
+if [ $MISSING_FILES == 'n' ]
+ then
+ echo "volumes files found" >> $LOG_FILE 2>&1
+ else
+ echo "Missing Volume files" >> $LOG_FILE 2>&1
+fi
+
+}
+
+
+
+#start glusterfs server.
+function start_glusterfsd(){
+echo " start_glusterfsd:==>start" >> $LOG_FILE 2>&1
+#Log and volume files named after specified translator like stripe_client.vol or afr_client.vol
+server_vol=$translator_name"_server.vol"
+server_log=$translator_name"_server.log"
+start_server='y'
+
+echo "Starting glusterfs server" >> $LOG_FILE 2>&1
+echo "Running command:$INSTALL_DIR/$translator_name/sbin/glusterfsd -f $VOL_DIR/$server_vol -l $RESULTS_DIR/$server_log -L DEBUG" >> $LOG_FILE 2>&1
+$INSTALL_DIR/$translator_name/sbin/glusterfsd -f $VOL_DIR/$server_vol -l $RESULTS_DIR/$server_log -L DEBUG &
+
+if [ $? -eq 0 ]; then
+echo "server started successfully." >> $BASENAME/$translator_name"_mail.txt"
+else
+echo "Unable to start glusterfsd.See log file $RESULTS_DIR/$server_log for more details" >> $BASENAME/$translator_name"_mail.txt"
+start_server='n'
+fi
+echo " start_glusterfsd:==> exit " >> $LOG_FILE 2>&1
+}
+#creat and start glusterfsd
+function creat_start_glusterfsd(){
+pgrep glusterd
+if [ $? -eq 1 ];then
+$INSTALL_DIR/$translator_name/sbin/glusterd
+fi
+
+#re-create backend newly
+rm -rf /export/sanity/afr_export*
+
+mkdir -p /export/sanity/afr_export
+mkdir -p /export/sanity/afr_export1
+
+#creat volume
+$INSTALL_DIR/$translator_name/sbin/gluster volume create $translator_name replica 2 `hostname`:/export/sanity/afr_export `hostname`:/export/sanity/afr_export1
+#start volume
+$INSTALL_DIR/$translator_name/sbin/gluster volume start $translator_name
+if [ $? -eq 0 ]; then
+echo "Following gluster volume started successfully." >> $BASENAME/$translator_name"_mail.txt"
+$INSTALL_DIR/$translator_name/sbin/gluster volume info >> $BASENAME/$translator_name"_mail.txt"
+echo "----------------------------------------------";
+fi
+}
+
+
+#start glusterfs client.
+function start_glusterfs(){
+echo " start_glusterfs:==>start" >> $LOG_FILE 2>&1
+echo "Trying to mount glusterfs on $MOUNT_PT/$translator_name " >> $LOG_FILE 2>&1
+
+#client volume file name and log file name
+client_vol=$translator_name"_client.vol"
+client_log=$translator_name"_client.log"
+start_client='y'
+
+echo "Running command:$INSTALL_DIR/$translator_name/sbin/glusterfs -f $VOL_DIR/$client_vol $MOUNT_PT/$translator_name -l $RESULTS_DIR/$client_log -L DEBUG" >> $LOG_FILE 2>&1
+$INSTALL_DIR/$translator_name/sbin/glusterfs -f $VOL_DIR/$client_vol $MOUNT_PT/$translator_name -l $RESULTS_DIR/$client_log -L DEBUG
+
+if [ $? -eq 0 ]; then
+echo "glusterfs client started" >> $BASENAME/$translator_name"_mail.txt"
+else
+echo "Unable to mount glusterfs.See log file for more details" >> $BASENAME/$translator_name"_mail.txt"
+start_client='n'
+fi
+echo " start_glusterfs:==> exit " >> $LOG_FILE 2>&1
+}
+#mount gluster volume
+function mount_glusterfs(){
+mount -t glusterfs `hostname`:$translator_name $MOUNT_PT/$translator_name
+if [ $? -eq 0 ];then
+echo "gluster volume mounted successfully." >> $BASENAME/$translator_name"_mail.txt"
+df >> $BASENAME/$translator_name"_mail.txt"
+echo "-----------------------------------";
+fi
+}
+
+function umount_glusterfs(){
+umount -l $MOUNT_PT/$translator_name
+}
+
+function stop_glusterd(){
+$INSTALL_DIR/$translator_name/sbin/gluster --mode=script volume stop $translator_name
+$INSTALL_DIR/$translator_name/sbin/gluster --mode=script volume delete $translator_name
+/etc/init.d/glusterd stop
+
+#cleanup build dirs
+rm -f $BUILD_DIR/$translator_name/.gitignore
+mv -vf $BUILD_DIR/$translator_name/* $TRASH_DIR # >> $LOG_FILE 2>&1
+rm -rf $TRASH_DIR/*
+}
+
+
+
+#perform QA test.
+function start_tests() {
+echo " start_tests:==>start" >> $LOG_FILE 2>&1
+
+ echo "starting QA test" >> $LOG_FILE 2>&1
+ cd $QA_DIR
+ $QA_TOOLS -w $MOUNT_PT/$translator_name -l $RESULTS_DIR/$translator_name"QA.log"
+ echo "QA test: completed" >> $BASENAME/$translator_name"_mail.txt"
+
+echo " start_tests:==> exit " >> $LOG_FILE 2>&1
+}
+
+
+
+#save logs and sent a mail.
+function cleanup(){
+echo "cleanup ==> start " >> $LOG_FILE 2>&1
+#stop glusterfs and glusterfsd
+for file in `pgrep glusterfs`
+do
+grep $translator_name"_client.vol" /proc/$file/cmdline
+if [ $? -eq 0 ]
+ then
+ kill $file
+ fi
+grep $translator_name"_server.vol" /proc/$file/cmdline
+if [ $? -eq 0 ]
+ then
+ kill $file
+ fi
+done
+
+#save results
+echo "Moving glusterfs log files " >> $LOG_FILE 2>&1
+trans_user_sys=`echo $tarball | awk '{split($0,array,".")} END{print array[1]}'`
+
+#logs=$RESULTS_DIR/$translator_name/$trans_user_sys/`date +%m_%d_%y_%T`
+mkdir -p $RESULTS_DIR/$translator_name/$trans_user_sys
+cd $RESULTS_DIR/$translator_name/$trans_user_sys
+logs=`date +%m_%d_%y_%T`
+mkdir -vp $logs >> $LOG_FILE 2>&1
+
+mv -v $RESULTS_DIR/$server_log $logs/ >> $LOG_FILE 2>&1
+mv -v $RESULTS_DIR/$client_log $logs/ >> $LOG_FILE 2>&1
+
+echo "Moving QA test log file " >> $LOG_FILE 2>&1
+mv -v $RESULTS_DIR/$translator_name"QA.log" $logs >> $LOG_FILE 2>&1
+
+#create tar file of logs
+echo "creating tar file tar cfz $trans_user_sys".tgz" $logs " >> $LOG_FILE 2>&1
+tar cfz $trans_user_sys".tgz" $logs
+
+#attach the log files and sent to developer.
+#get from address
+usr=`echo $trans_user_sys | cut -d'_' -f2`
+sys=`echo $trans_user_sys | cut -d'_' -f3`
+echo "See attached log files for more details " >> $BASENAME/$translator_name"_mail.txt"
+#mutt -s "Developer Sanity Test" -a $trans_user_sys".tgz" "$usr@gluster.com" < $BASENAME/$translator_name"_mail.txt"
+mail -s "dev sanity test results" -a $trans_user_sys".tgz" "$usr@gluster.com" < $BASENAME/$translator_name"_mail.txt"
+rm -f $BUILD_DIR/$translator_name/.gitignore
+mv -vf $BUILD_DIR/$translator_name/* $TRASH_DIR >> $LOG_FILE 2>&1
+
+echo "Cleanup installed binaries" >> $LOG_FILE 2>&1
+mv -v $INSTALL_DIR/$translator_name/* $TRASH_DIR >> $LOG_FILE 2>&1
+mv $BASENAME/$translator_name"_mail.txt" $TRASH_DIR
+rm -r $TRASH_DIR/* >> $LOG_FILE 2>&1
+
+echo "cleanup ==> exit " >> $LOG_FILE 2>&1
+}
+
+function mail_status(){
+#mail -s "dev sanity test results" -a $trans_user_sys".tgz" "$usr@gluster.com" < $BASENAME/$translator_name"_mail.txt"
+#save results
+echo "Moving glusterfs log files " >> $LOG_FILE 2>&1
+trans_user_sys=`echo $tarball | awk '{split($0,array,".")} END{print array[1]}'`
+
+#logs=$RESULTS_DIR/$translator_name/$trans_user_sys/`date +%m_%d_%y_%T`
+mkdir -p $RESULTS_DIR/$translator_name/$trans_user_sys
+cd $RESULTS_DIR/$translator_name/$trans_user_sys
+logs=`date +%m_%d_%y_%T`
+mkdir -vp $logs >> $LOG_FILE 2>&1
+
+#mv -v $RESULTS_DIR/$server_log $logs/ >> $LOG_FILE 2>&1
+#mv -v $RESULTS_DIR/$client_log $logs/ >> $LOG_FILE 2>&1
+
+echo "Moving QA test log file " >> $LOG_FILE 2>&1
+mv -v $RESULTS_DIR/$translator_name"QA.log" $logs >> $LOG_FILE 2>&1
+
+#create tar file of logs
+echo "creating tar file tar cfz $trans_user_sys".tgz" $logs " >> $LOG_FILE 2>&1
+tar cfz $trans_user_sys".tgz" $logs
+
+#attach the log files and sent to developer.
+#get from address
+usr=`echo $trans_user_sys | cut -d'_' -f2`
+sys=`echo $trans_user_sys | cut -d'_' -f3`
+echo "See attached log files for more details " >> $BASENAME/$translator_name"_mail.txt"
+#mutt -s "Developer Sanity Test" -a $trans_user_sys".tgz" "$usr@gluster.com" < $BASENAME/$translator_name"_mail.txt"
+cat /sanity/test/results/tests_failed >> $BASENAME/$translator_name"_mail.txt"
+rm -rf /sanity/test/results/tests_failed
+mail -s "dev sanity test results" -a $trans_user_sys".tgz" "$usr@gluster.com" < $BASENAME/$translator_name"_mail.txt"
+}
+
+########################################### MAIN part###########################################
+
+echo "###########sanity_test.sh log################################" >> $LOG_FILE 2>&1
+
+date>>$LOG_FILE
+echo "############" >> $LOG_FILE 2>&1
+
+
+#move the tar file
+#echo "checking volume files" >> $LOG_FILE 2>&1
+#check_vol_files
+#echo "done." >> $LOG_FILE 2>&1
+
+
+#move $INCOMING tar file to $QUEUE directory.
+echo "moving tar file to $QUEUE_DIR directory" >> $LOG_FILE 2>&1
+mv_incoming_queue
+echo "done." >> $LOG_FILE 2>&1
+
+#check for status
+is_ready
+
+
+#move from $QUEUE to $BUILD if it is free.
+echo "Moving to build.." >> $LOG_FILE 2>&1
+mv_queue_build
+
+echo "Start building.." >> $LOG_FILE 2>&1
+build_glusterfs
+echo "done." >> $LOG_FILE 2>&1
+if [ $buildflag == 'n' ]
+ then
+ echo "build process : FAILED" >> $BASENAME/$translator_name"_mail.txt"
+ else
+ echo "build process : PASSED" >> $BASENAME/$translator_name"_mail.txt"
+ #start_glusterfsd # starting server
+ creat_start_glusterfsd
+# if [ $start_server == 'y' ]
+# then
+# start_glusterfs # starting client
+# fi
+ #qa testing
+ mount_glusterfs;sleep 10;
+# if [ $start_client == 'y' ]
+# then
+ start_tests # start testing
+# fi
+
+fi
+#clean up build dir
+#cleanup
+umount_glusterfs
+stop_glusterd
+mail_status
+###########################################EOF###########################################
+
diff --git a/sanity/dev_sanity/structure.txt b/sanity/dev_sanity/structure.txt
new file mode 100644
index 0000000..af72f08
--- /dev/null
+++ b/sanity/dev_sanity/structure.txt
@@ -0,0 +1,187 @@
+/sanity/:
+total 4
+drwxrwxrwx. 12 root root 4096 2011-06-24 04:06 test
+
+/sanity/test:
+total 84
+drwxrwxrwx. 5 root root 4096 2011-02-24 01:59 archive
+drwxrwxrwx. 5 root root 4096 2010-07-27 15:00 build
+drwxrwxrwx. 2 root root 4096 2010-08-04 10:50 counter
+drwxrwxrwx. 2 root root 4096 2011-06-24 04:00 incoming
+drwxrwxrwx. 5 root root 4096 2011-03-22 01:51 install
+drwxrwxrwx. 2 root root 4096 2011-06-24 04:00 queue
+drwxrwxrwx. 5 root root 4096 2011-06-24 04:06 results
+-rwxr-xr-x. 1 root root 38885 2011-06-24 04:06 sanity.log
+drwxrwxrwx. 4 root root 4096 2011-06-24 03:33 spec
+drwxrwxrwx. 2 root root 4096 2010-07-27 15:00 tarball
+drwxrwxrwx. 2 root root 4096 2011-06-24 04:06 trash
+
+/sanity/test/archive:
+total 12
+drwxrwxrwx. 2 root root 4096 2011-02-24 01:59 afr
+drwxrwxrwx. 2 root root 4096 2011-02-24 01:59 dht
+drwxrwxrwx. 2 root root 4096 2011-02-24 01:59 stripe
+
+/sanity/test/archive/afr:
+total 0
+
+/sanity/test/archive/dht:
+total 0
+
+/sanity/test/archive/stripe:
+total 0
+
+/sanity/test/build:
+total 12
+drwxrwxrwx. 2 root root 4096 2011-06-24 03:03 afr
+drwxrwxrwx. 2 root root 4096 2011-06-24 04:06 dht
+drwxrwxrwx. 2 root root 4096 2011-06-24 03:38 stripe
+
+/sanity/test/build/afr:
+total 0
+
+/sanity/test/build/dht:
+total 0
+
+/sanity/test/build/stripe:
+total 0
+
+/sanity/test/counter:
+total 4
+-rw-r--r--. 1 root root 0 2010-08-04 10:50 10093
+-rwxr-xr-x. 1 root root 283 2010-07-27 15:00 counter.sh
+
+/sanity/test/incoming:
+total 0
+
+/sanity/test/install:
+total 12
+drwxrwxrwx. 2 root root 4096 2011-06-24 03:03 afr
+drwxrwxrwx. 2 root root 4096 2011-06-24 04:06 dht
+drwxrwxrwx. 2 root root 4096 2011-06-24 03:38 stripe
+
+/sanity/test/install/afr:
+total 0
+
+/sanity/test/install/dht:
+total 0
+
+/sanity/test/install/stripe:
+total 0
+
+/sanity/test/queue:
+total 0
+
+/sanity/test/results:
+total 12
+drwxrwxrwx. 3 root root 4096 2011-06-24 02:46 afr
+drwxrwxrwx. 3 root root 4096 2011-06-24 03:29 dht
+drwxrwxrwx. 3 root root 4096 2011-06-24 03:29 stripe
+
+/sanity/test/results/afr:
+total 4
+drwxr-xr-x. 4 root root 4096 2011-06-24 03:03 afr_lakshmipathi_space
+
+/sanity/test/results/afr/afr_lakshmipathi_space:
+total 188
+drwxr-xr-x. 2 root root 4096 2011-06-24 02:46 06_24_11_02:46:13
+drwxr-xr-x. 2 root root 4096 2011-06-24 03:03 06_24_11_03:03:01
+-rw-r--r--. 1 root root 184093 2011-06-24 03:03 afr_lakshmipathi_space.tgz
+
+/sanity/test/results/afr/afr_lakshmipathi_space/06_24_11_02:46:13:
+total 3740
+-rw-r--r--. 1 root root 2716743 2011-06-24 02:46 afr_client.log
+-rw-r--r--. 1 root root 1346 2011-06-24 02:46 afrQA.log
+-rw-r--r--. 1 root root 1103897 2011-06-24 02:46 afr_server.log
+
+/sanity/test/results/afr/afr_lakshmipathi_space/06_24_11_03:03:01:
+total 3772
+-rw-r--r--. 1 root root 2746697 2011-06-24 03:03 afr_client.log
+-rw-r--r--. 1 root root 1346 2011-06-24 03:03 afrQA.log
+-rw-r--r--. 1 root root 1104540 2011-06-24 03:03 afr_server.log
+
+/sanity/test/results/dht:
+total 4
+drwxr-xr-x. 5 root root 4096 2011-06-24 04:06 dht_lakshmipathi_space
+
+/sanity/test/results/dht/dht_lakshmipathi_space:
+total 52
+drwxr-xr-x. 2 root root 4096 2011-06-24 03:29 06_24_11_03:29:26
+drwxr-xr-x. 2 root root 4096 2011-06-24 03:44 06_24_11_03:44:37
+drwxr-xr-x. 2 root root 4096 2011-06-24 04:06 06_24_11_04:06:05
+-rw-r--r--. 1 root root 38434 2011-06-24 04:06 dht_lakshmipathi_space.tgz
+
+/sanity/test/results/dht/dht_lakshmipathi_space/06_24_11_03:29:26:
+total 88
+-rw-r--r--. 1 root root 70240 2011-06-24 03:29 dht_client.log
+-rw-r--r--. 1 root root 1344 2011-06-24 03:29 dhtQA.log
+-rw-r--r--. 1 root root 6049 2011-06-24 03:29 dht_server.log
+
+/sanity/test/results/dht/dht_lakshmipathi_space/06_24_11_03:44:37:
+total 764
+-rw-r--r--. 1 root root 610514 2011-06-24 03:44 dht_client.log
+-rw-r--r--. 1 root root 1345 2011-06-24 03:44 dhtQA.log
+-rw-r--r--. 1 root root 161896 2011-06-24 03:44 dht_server.log
+
+/sanity/test/results/dht/dht_lakshmipathi_space/06_24_11_04:06:05:
+total 800
+-rw-r--r--. 1 root root 622681 2011-06-24 04:06 dht_client.log
+-rw-r--r--. 1 root root 1345 2011-06-24 04:06 dhtQA.log
+-rw-r--r--. 1 root root 186369 2011-06-24 04:06 dht_server.log
+
+/sanity/test/results/stripe:
+total 4
+drwxr-xr-x. 4 root root 4096 2011-06-24 03:38 stripe_lakshmipathi_space
+
+/sanity/test/results/stripe/stripe_lakshmipathi_space:
+total 184
+drwxr-xr-x. 2 root root 4096 2011-06-24 03:29 06_24_11_03:29:40
+drwxr-xr-x. 2 root root 4096 2011-06-24 03:38 06_24_11_03:38:50
+-rw-r--r--. 1 root root 176595 2011-06-24 03:38 stripe_lakshmipathi_space.tgz
+
+/sanity/test/results/stripe/stripe_lakshmipathi_space/06_24_11_03:29:40:
+total 80
+-rw-r--r--. 1 root root 62341 2011-06-24 03:29 stripe_client.log
+-rw-r--r--. 1 root root 1746 2011-06-24 03:29 stripeQA.log
+-rw-r--r--. 1 root root 10245 2011-06-24 03:29 stripe_server.log
+
+/sanity/test/results/stripe/stripe_lakshmipathi_space/06_24_11_03:38:50:
+total 5676
+-rw-r--r--. 1 root root 2131344 2011-06-24 03:38 stripe_client.log
+-rw-r--r--. 1 root root 1349 2011-06-24 03:38 stripeQA.log
+-rw-r--r--. 1 root root 3671318 2011-06-24 03:38 stripe_server.log
+
+/sanity/test/spec:
+total 32
+-rwxr-xr-x. 1 root root 1480 2011-02-24 01:51 afr_client.vol
+-rwxr-xr-x. 1 root root 1017 2011-03-22 03:40 afr_server.vol
+-rwxr-xr-x. 1 root root 1471 2011-06-24 03:33 dht_client.vol
+-rwxr-xr-x. 1 root root 1012 2011-03-22 03:40 dht_server.vol
+drwxr-xr-x. 2 root root 4096 2010-07-29 09:04 old-vols
+-rwxr-xr-x. 1 root root 2076 2011-06-24 03:33 stripe_client.vol
+-rwxr-xr-x. 1 root root 1748 2011-03-22 03:41 stripe_server.vol
+drwxr-xr-x. 2 root root 4096 2010-08-02 06:51 vol-ip
+
+/sanity/test/spec/old-vols:
+total 24
+-rwxr-xr-x. 1 root root 496 2010-07-29 09:04 afr_client.vol
+-rwxr-xr-x. 1 root root 527 2010-07-29 09:04 afr_server.vol
+-rwxr-xr-x. 1 root root 476 2010-07-29 09:04 dht_client.vol
+-rwxr-xr-x. 1 root root 527 2010-07-29 09:04 dht_server.vol
+-rwxr-xr-x. 1 root root 465 2010-07-29 09:04 stripe_client.vol
+-rwxr-xr-x. 1 root root 530 2010-07-29 09:04 stripe_server.vol
+
+/sanity/test/spec/vol-ip:
+total 24
+-rwxr-xr-x. 1 root root 1482 2010-08-02 06:51 afr_client.vol
+-rwxr-xr-x. 1 root root 1011 2010-08-02 06:51 afr_server.vol
+-rwxr-xr-x. 1 root root 1473 2010-08-02 06:51 dht_client.vol
+-rwxr-xr-x. 1 root root 1006 2010-08-02 06:51 dht_server.vol
+-rwxr-xr-x. 1 root root 2080 2010-08-02 06:51 stripe_client.vol
+-rwxr-xr-x. 1 root root 1736 2010-08-02 06:51 stripe_server.vol
+
+/sanity/test/tarball:
+total 0
+
+/sanity/test/trash:
+total 0