Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
saliency-based-citation/slurm_launch.sh
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
78 lines (71 sloc)
2.7 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# ======= SLURM OPTIONS ======= (user input required) | |
### See inline comments for what each option means | |
#SBATCH --partition=GPU | |
### Set the job name | |
#SBATCH --job-name=salsa_70b | |
### Specify the # of cpus for your job. | |
#SBATCH --nodes=1 | |
# set cpus per node | |
#SBATCH --cpus-per-task=8 | |
#SBATCH --mem=64gb | |
#SBATCH --gres=gpu:A40:4 | |
#SBATCH --time=36:01:01 | |
### pass the full environment | |
#SBATCH --export=ALL | |
#SBATCH --output=%j.o | |
#SBATCH --error=%j.e | |
# ===== END SLURM OPTIONS ===== | |
### IMPORTANT: load Python environment | |
source activate citation | |
echo "loaded module" | |
### Go to the directory of the sample.sh file | |
cd $SLURM_SUBMIT_DIR | |
### Make a folder for job_logs if one doesn't exist | |
mkdir -p job_logs | |
# print the available GPUs | |
echo "Available GPUs:" | |
nvidia-smi | |
### Run the experiments | |
echo "running code" | |
bash launch_experiment.sh 3 # Run Llama-3.1-70B experiments | |
echo "finished running" | |
### move the log files inside the folder | |
mv $SLURM_JOB_ID.o job_logs/$SLURM_JOB_ID.o | |
mv $SLURM_JOB_ID.e job_logs/$SLURM_JOB_ID.e | |
# #!/bin/bash | |
# # ======= SLURM OPTIONS ======= (user input required) | |
# ### See inline comments for what each option means | |
# #SBATCH --partition=Bunescu | |
# ### Set the job name | |
# #SBATCH --job-name=saliency_big | |
# ### Specify the # of cpus for your job. | |
# #SBATCH --nodes=1 | |
# # set cpus per node | |
# #SBATCH --cpus-per-task=8 | |
# #SBATCH --mem=64gb | |
# #SBATCH --gres=gpu:4 | |
# #SBATCH --time=36:01:01 | |
# ### pass the full environment | |
# #SBATCH --export=ALL | |
# #SBATCH --output=%j.o | |
# #SBATCH --error=%j.e | |
# # ===== END SLURM OPTIONS ===== | |
# ### IMPORTANT: load Python 3 environment with Pytorch and cuda enabled | |
# #module load pytorch/1.5.1-anaconda3-cuda10.2 | |
# source activate citation | |
# #module load pytorch/1.6.0-cuda10.2 | |
# echo "loaded module" | |
# ### Go to the directory of the sample.sh file | |
# cd $SLURM_SUBMIT_DIR | |
# ### Make a folder for job_logs if one doesn't exist | |
# mkdir -p job_logs | |
# ### Run the python file | |
# echo "running code" | |
# # Run the script | |
# # prompt_based, gradient_based, sliding_window | |
# python run_pipeline.py --yaml_config llama31_big.yaml && python run_pipeline.py --attribution_system prompt_based --yaml_config llama31_big.yaml && python run_pipeline.py --attribution_system gradient_based --yaml_config llama31_big.yaml && python run_pipeline.py --yaml_config llama31_big_best.yaml --output_path attribution_results_best && python run_pipeline.py --yaml_config llama31_big_smoothing.yaml --output_path attribution_results_smoothing && python run_pipeline.py --yaml_config llama31_big_thresholding.yaml --output_path attribution_results_thresholding | |
# echo "finished running" | |
# ### move the log files inside the folder | |
# mv $SLURM_JOB_ID.o job_logs/$SLURM_JOB_ID.o | |
# mv $SLURM_JOB_ID.e job_logs/$SLURM_JOB_ID.e | |