-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsl-train-explain-multiling.bash
More file actions
48 lines (40 loc) · 1.13 KB
/
sl-train-explain-multiling.bash
File metadata and controls
48 lines (40 loc) · 1.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/bin/bash
#SBATCH --job-name=training
#SBATCH --account=project_2005092
#SBATCH --time=15:10:00 #20:15:00
#SBATCH --partition=gpu
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=16G
#SBATCH --gres=gpu:v100:1
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH -o logs/%j.out
#SBATCH -e logs/%j.err
rm logs/current.err
rm logs/current.out
ln -s $SLURM_JOBID.err logs/current.err
ln -s $SLURM_JOBID.out logs/current.out
module purge
#module load pytorch/1.8
#source /scratch/project_2002026/amanda/venv/bin/activate
source /scratch/project_2002026/samuel/VENVS/expl/bin/activate
# /scratch/project_2002026/samuel/class-explainer/oscar_data \
export TRANSFORMERS_CACHE=v_cachedir
echo "learning rate is"
echo $3
# best lr seems to be 0.0001
srun python run_resplits_multiling.py \
--data /scratch/project_2005092/veronika/class-explainer/final_oscar_data \
--model_name xlm-roberta-base \
--lr $3 \
--epochs 12 \
--batch_size 30 \
--split 0.9999 \
--patience 1 \
--save_explanations explanations_final/$1 \
--save_model models/$1 \
--seed $2
rm -vrf models/$1-ckpt
#gzip explanations/$1.tsv
seff $SLURM_JOBID