Upload 12 files
Browse files- discriminative/eu/fine-tuning/predict/model-transfer.sh +44 -0
- discriminative/eu/fine-tuning/predict/translate-test.sh +0 -0
- discriminative/eu/fine-tuning/predict/translate-train.sh +47 -0
- discriminative/eu/fine-tuning/requirements.txt +8 -0
- discriminative/eu/fine-tuning/run_xnli_eus.py +688 -0
- discriminative/eu/fine-tuning/train/logs/model_transder-2gpu.err +141 -0
- discriminative/eu/fine-tuning/train/logs/model_transder.err +0 -0
- discriminative/eu/fine-tuning/train/logs/model_transfer-2gpu.log +0 -0
- discriminative/eu/fine-tuning/train/logs/model_transfer.log +0 -0
- discriminative/eu/fine-tuning/train/model-transfer-2gpu.sh +54 -0
- discriminative/eu/fine-tuning/train/model-transfer.sh +62 -0
- discriminative/eu/fine-tuning/train/translate-train.sh +75 -0
discriminative/eu/fine-tuning/predict/model-transfer.sh
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --qos=regular
|
| 3 |
+
#SBATCH --job-name=var-nli-pred
|
| 4 |
+
#SBATCH --cpus-per-task=1
|
| 5 |
+
#SBATCH --nodes=1
|
| 6 |
+
#SBATCH --ntasks-per-node=1
|
| 7 |
+
#SBATCH --mem=64GB
|
| 8 |
+
#SBATCH --gres=gpu:4
|
| 9 |
+
#SBATCH --constraint=a100-sxm4
|
| 10 |
+
#SBATCH --time=1-00:00:00
|
| 11 |
+
#SBATCH --output=var-nli-pre.log
|
| 12 |
+
#SBATCH --error=var-nli-pre.err
|
| 13 |
+
#SBATCH --mail-type=REQUEUE
|
| 14 |
+
#SBATCH --mail-user=jaione.bengoetxea@ehu.eus
|
| 15 |
+
|
| 16 |
+
source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
|
| 17 |
+
|
| 18 |
+
for seed in 23 27 33
|
| 19 |
+
# 27 33
|
| 20 |
+
do
|
| 21 |
+
for model in jhu-clsp/mmBERT-base
|
| 22 |
+
# jhu-clsp/mmBERT-base FacebookAI/xlm-roberta-large answerdotai/ModernBERT-large
|
| 23 |
+
do
|
| 24 |
+
for dataset in nafar_nat
|
| 25 |
+
# eu native var
|
| 26 |
+
do
|
| 27 |
+
python /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/fine-tuning/run_xnli_eus.py \
|
| 28 |
+
--model_name_or_path "/scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/models/model-transfer/$model/$seed" \
|
| 29 |
+
--language eu \
|
| 30 |
+
--train_language en \
|
| 31 |
+
--test_data $dataset \
|
| 32 |
+
--do_predict \
|
| 33 |
+
--per_device_train_batch_size 32 \
|
| 34 |
+
--num_train_epochs 10.0 \
|
| 35 |
+
--max_seq_length 128 \
|
| 36 |
+
--output_dir /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/results/model-transfer/$model/no-rep/$dataset/$seed \
|
| 37 |
+
--save_steps 50000 \
|
| 38 |
+
--metric_for_best_model accuracy \
|
| 39 |
+
--seed $seed \
|
| 40 |
+
--eval_steps 5000 \
|
| 41 |
+
--save_total_limit 2
|
| 42 |
+
done
|
| 43 |
+
done
|
| 44 |
+
done
|
discriminative/eu/fine-tuning/predict/translate-test.sh
ADDED
|
Binary file (1.28 kB). View file
|
|
|
discriminative/eu/fine-tuning/predict/translate-train.sh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --partition=hitz-exclusive
|
| 3 |
+
#SBATCH --account=hitz-exclusive
|
| 4 |
+
#SBATCH --job-name=var-nli-pred
|
| 5 |
+
#SBATCH --cpus-per-task=1
|
| 6 |
+
#SBATCH --nodes=1
|
| 7 |
+
#SBATCH --ntasks-per-node=1
|
| 8 |
+
#SBATCH --mem=64GB
|
| 9 |
+
#SBATCH --gres=gpu:2
|
| 10 |
+
#SBATCH --constraint=a100-sxm4
|
| 11 |
+
#SBATCH --time=02:00:00
|
| 12 |
+
#SBATCH --output=var-nli-pre2.log
|
| 13 |
+
#SBATCH --error=var-nli-pre2.err
|
| 14 |
+
#SBATCH --mail-type=REQUEUE
|
| 15 |
+
#SBATCH --mail-user=jaione.bengoetxea@ehu.eus
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
|
| 19 |
+
|
| 20 |
+
for seed in 27 23 33
|
| 21 |
+
do
|
| 22 |
+
for model in ixa-ehu/roberta-eus-euscrawl-large-cased FacebookAI/xlm-roberta-large
|
| 23 |
+
#
|
| 24 |
+
# microsoft/mdeberta-v3-base FacebookAI/xlm-roberta-large ixa-ehu/roberta-eus-euscrawl-large-cased ixa-ehu/berteus-base-cased
|
| 25 |
+
do
|
| 26 |
+
for dataset in xnli_expanded
|
| 27 |
+
# eu native var
|
| 28 |
+
do
|
| 29 |
+
python /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/fine-tuning/run_xnli_eus.py \
|
| 30 |
+
--model_name_or_path "/scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/models/translate-train/$model/$seed" \
|
| 31 |
+
--language eu \
|
| 32 |
+
--train_language eu \
|
| 33 |
+
--test_data $dataset \
|
| 34 |
+
--do_predict \
|
| 35 |
+
--per_device_train_batch_size 32 \
|
| 36 |
+
--num_train_epochs 10.0 \
|
| 37 |
+
--max_seq_length 128 \
|
| 38 |
+
--output_dir /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/results/translate-train/$model/$dataset/$seed \
|
| 39 |
+
--save_steps 50000 \
|
| 40 |
+
--metric_for_best_model accuracy \
|
| 41 |
+
--seed $seed \
|
| 42 |
+
--eval_steps 5000 \
|
| 43 |
+
--save_total_limit 2
|
| 44 |
+
|
| 45 |
+
done
|
| 46 |
+
done
|
| 47 |
+
done
|
discriminative/eu/fine-tuning/requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate >= 0.12.0
|
| 2 |
+
datasets >= 1.8.0
|
| 3 |
+
sentencepiece != 0.1.92
|
| 4 |
+
scipy
|
| 5 |
+
scikit-learn
|
| 6 |
+
protobuf
|
| 7 |
+
torch >= 1.3
|
| 8 |
+
evaluate
|
discriminative/eu/fine-tuning/run_xnli_eus.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding=utf-8
|
| 3 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
| 4 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
|
| 18 |
+
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM), evaluation on XNLI-eu(Basque).
|
| 19 |
+
Adapted from `transformers/examples/pytorch/text-classification/run_xnli.py`"""
|
| 20 |
+
|
| 21 |
+
import logging
|
| 22 |
+
import os
|
| 23 |
+
import random
|
| 24 |
+
import sys
|
| 25 |
+
import warnings
|
| 26 |
+
from dataclasses import dataclass, field
|
| 27 |
+
from typing import Optional
|
| 28 |
+
|
| 29 |
+
import datasets
|
| 30 |
+
import evaluate
|
| 31 |
+
import numpy as np
|
| 32 |
+
from datasets import load_dataset
|
| 33 |
+
|
| 34 |
+
import transformers
|
| 35 |
+
from transformers import (
|
| 36 |
+
AutoConfig,
|
| 37 |
+
AutoModelForSequenceClassification,
|
| 38 |
+
AutoTokenizer,
|
| 39 |
+
DataCollatorWithPadding,
|
| 40 |
+
EvalPrediction,
|
| 41 |
+
HfArgumentParser,
|
| 42 |
+
Trainer,
|
| 43 |
+
TrainingArguments,
|
| 44 |
+
default_data_collator,
|
| 45 |
+
set_seed,
|
| 46 |
+
)
|
| 47 |
+
from transformers.trainer_utils import get_last_checkpoint
|
| 48 |
+
from transformers.utils import check_min_version
|
| 49 |
+
from huggingface_hub.utils import send_telemetry
|
| 50 |
+
from transformers.utils.versions import require_version
|
| 51 |
+
import argparse
|
| 52 |
+
|
| 53 |
+
import torch
|
| 54 |
+
|
| 55 |
+
# import wandb
|
| 56 |
+
# wandb.login(key="8495a960a8aceb5bd2f765006a1bd883733f7366")
|
| 57 |
+
# wandb.init(project="xnli-eu-zero-shot-finetuning")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
parser = argparse.ArgumentParser()
|
| 61 |
+
parser.add_argument("--ev_dataset", type=str, default="eu", help="Choose a development dataset from the two partitions available on the HF of XNLIeu (eu, eu_mt)")
|
| 62 |
+
parser.add_argument("--pred_dataset", type=str, default="eu", help="Choose a test dataset from the three partitions available on the HF of XNLIeu (eu, eu_mt, eu_native)")
|
| 63 |
+
args, remaining_args = parser.parse_known_args()
|
| 64 |
+
|
| 65 |
+
ev_dataset = args.ev_dataset
|
| 66 |
+
pred_dataset = args.pred_dataset
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
| 70 |
+
check_min_version("4.34.0.dev0")
|
| 71 |
+
|
| 72 |
+
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
| 73 |
+
|
| 74 |
+
logger = logging.getLogger(__name__)
|
| 75 |
+
|
| 76 |
+
@dataclass
|
| 77 |
+
class DataTrainingArguments:
|
| 78 |
+
"""
|
| 79 |
+
Arguments pertaining to what data we are going to input our model for training and eval.
|
| 80 |
+
|
| 81 |
+
Using `HfArgumentParser` we can turn this class
|
| 82 |
+
into argparse arguments to be able to specify them on
|
| 83 |
+
the command line.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
max_seq_length: Optional[int] = field(
|
| 87 |
+
default=128,
|
| 88 |
+
metadata={
|
| 89 |
+
"help": (
|
| 90 |
+
"The maximum total input sequence length after tokenization. Sequences longer "
|
| 91 |
+
"than this will be truncated, sequences shorter will be padded."
|
| 92 |
+
)
|
| 93 |
+
},
|
| 94 |
+
)
|
| 95 |
+
overwrite_cache: bool = field(
|
| 96 |
+
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
| 97 |
+
)
|
| 98 |
+
pad_to_max_length: bool = field(
|
| 99 |
+
default=True,
|
| 100 |
+
metadata={
|
| 101 |
+
"help": (
|
| 102 |
+
"Whether to pad all samples to `max_seq_length`. "
|
| 103 |
+
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
|
| 104 |
+
)
|
| 105 |
+
},
|
| 106 |
+
)
|
| 107 |
+
max_train_samples: Optional[int] = field(
|
| 108 |
+
default=None,
|
| 109 |
+
metadata={
|
| 110 |
+
"help": (
|
| 111 |
+
"For debugging purposes or quicker training, truncate the number of training examples to this "
|
| 112 |
+
"value if set."
|
| 113 |
+
)
|
| 114 |
+
},
|
| 115 |
+
)
|
| 116 |
+
max_eval_samples: Optional[int] = field(
|
| 117 |
+
default=None,
|
| 118 |
+
metadata={
|
| 119 |
+
"help": (
|
| 120 |
+
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
|
| 121 |
+
"value if set."
|
| 122 |
+
)
|
| 123 |
+
},
|
| 124 |
+
)
|
| 125 |
+
max_predict_samples: Optional[int] = field(
|
| 126 |
+
default=None,
|
| 127 |
+
metadata={
|
| 128 |
+
"help": (
|
| 129 |
+
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
|
| 130 |
+
"value if set."
|
| 131 |
+
)
|
| 132 |
+
},
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@dataclass
|
| 137 |
+
class ModelArguments:
|
| 138 |
+
"""
|
| 139 |
+
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
model_name_or_path: str = field(
|
| 143 |
+
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
| 144 |
+
)
|
| 145 |
+
language: str = field(
|
| 146 |
+
default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
|
| 147 |
+
)
|
| 148 |
+
train_language: Optional[str] = field(
|
| 149 |
+
default=None, metadata={"help": "Train language if it is different from the evaluation language."}
|
| 150 |
+
)
|
| 151 |
+
test_data: str = field(
|
| 152 |
+
default=None, metadata={"help": "test data type to be used in evaluation"}
|
| 153 |
+
|
| 154 |
+
)
|
| 155 |
+
config_name: Optional[str] = field(
|
| 156 |
+
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
| 157 |
+
)
|
| 158 |
+
tokenizer_name: Optional[str] = field(
|
| 159 |
+
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
| 160 |
+
)
|
| 161 |
+
cache_dir: Optional[str] = field(
|
| 162 |
+
default=None,
|
| 163 |
+
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
| 164 |
+
)
|
| 165 |
+
do_lower_case: Optional[bool] = field(
|
| 166 |
+
default=False,
|
| 167 |
+
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
|
| 168 |
+
)
|
| 169 |
+
use_fast_tokenizer: bool = field(
|
| 170 |
+
default=True,
|
| 171 |
+
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
|
| 172 |
+
)
|
| 173 |
+
model_revision: str = field(
|
| 174 |
+
default="main",
|
| 175 |
+
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
|
| 176 |
+
)
|
| 177 |
+
token: str = field(
|
| 178 |
+
default=None,
|
| 179 |
+
metadata={
|
| 180 |
+
"help": (
|
| 181 |
+
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
|
| 182 |
+
"generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
|
| 183 |
+
)
|
| 184 |
+
},
|
| 185 |
+
)
|
| 186 |
+
use_auth_token: bool = field(
|
| 187 |
+
default=None,
|
| 188 |
+
metadata={
|
| 189 |
+
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
|
| 190 |
+
},
|
| 191 |
+
)
|
| 192 |
+
trust_remote_code: bool = field(
|
| 193 |
+
default=False,
|
| 194 |
+
metadata={
|
| 195 |
+
"help": (
|
| 196 |
+
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
|
| 197 |
+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
|
| 198 |
+
"execute code present on the Hub on your local machine."
|
| 199 |
+
)
|
| 200 |
+
},
|
| 201 |
+
)
|
| 202 |
+
ignore_mismatched_sizes: bool = field(
|
| 203 |
+
default=False,
|
| 204 |
+
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def main():
|
| 209 |
+
# See all possible arguments in src/transformers/training_args.py
|
| 210 |
+
# or by passing the --help flag to this script.
|
| 211 |
+
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
| 212 |
+
|
| 213 |
+
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
| 214 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses(remaining_args)
|
| 215 |
+
|
| 216 |
+
#### Fix
|
| 217 |
+
# model_args.cache_dir = "/gaueko1/users/jbengoetxea004/borra_nazazu"
|
| 218 |
+
# os.environ['HF_HOME'] = '/gaueko1/users/jbengoetxea004/borra_nazazu'
|
| 219 |
+
|
| 220 |
+
if model_args.use_auth_token is not None:
|
| 221 |
+
warnings.warn("The `use_auth_token` argument is deprecated and will be removed in v4.34.", FutureWarning)
|
| 222 |
+
if model_args.token is not None:
|
| 223 |
+
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
|
| 224 |
+
model_args.token = model_args.use_auth_token
|
| 225 |
+
|
| 226 |
+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
|
| 227 |
+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
|
| 228 |
+
# send_telemetry("run_xnli", model_args)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# Setup logging
|
| 232 |
+
logging.basicConfig(
|
| 233 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 234 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 235 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
if training_args.should_log:
|
| 239 |
+
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
|
| 240 |
+
transformers.utils.logging.set_verbosity_info()
|
| 241 |
+
|
| 242 |
+
log_level = training_args.get_process_log_level()
|
| 243 |
+
logger.setLevel(log_level)
|
| 244 |
+
datasets.utils.logging.set_verbosity(log_level)
|
| 245 |
+
transformers.utils.logging.set_verbosity(log_level)
|
| 246 |
+
transformers.utils.logging.enable_default_handler()
|
| 247 |
+
transformers.utils.logging.enable_explicit_format()
|
| 248 |
+
|
| 249 |
+
# Log on each process the small summary:
|
| 250 |
+
logger.warning(
|
| 251 |
+
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
|
| 252 |
+
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
|
| 253 |
+
)
|
| 254 |
+
logger.info(f"Training/evaluation parameters {training_args}")
|
| 255 |
+
|
| 256 |
+
# Detecting last checkpoint.
|
| 257 |
+
last_checkpoint = None
|
| 258 |
+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
| 259 |
+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
| 260 |
+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
| 261 |
+
raise ValueError(
|
| 262 |
+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
| 263 |
+
"Use --overwrite_output_dir to overcome."
|
| 264 |
+
)
|
| 265 |
+
elif last_checkpoint is not None:
|
| 266 |
+
logger.info(
|
| 267 |
+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
|
| 268 |
+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Set seed before initializing model.
|
| 272 |
+
set_seed(training_args.seed)
|
| 273 |
+
|
| 274 |
+
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
|
| 275 |
+
# download the dataset.
|
| 276 |
+
# Downloading and loading xnli dataset from the hub.
|
| 277 |
+
if training_args.do_train:
|
| 278 |
+
if model_args.train_language is None:
|
| 279 |
+
train_dataset = load_dataset(
|
| 280 |
+
"xnli",
|
| 281 |
+
model_args.language,
|
| 282 |
+
split="train",
|
| 283 |
+
cache_dir=model_args.cache_dir,
|
| 284 |
+
token=model_args.token,
|
| 285 |
+
)
|
| 286 |
+
elif model_args.train_language == "eu":
|
| 287 |
+
train_dataset = load_dataset(
|
| 288 |
+
"HiTZ/xnli-eu",
|
| 289 |
+
model_args.train_language,
|
| 290 |
+
split="train",
|
| 291 |
+
cache_dir=model_args.cache_dir,
|
| 292 |
+
token=model_args.token,
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
elif model_args.train_language == "eu_all":
|
| 296 |
+
train_dataset = load_dataset(
|
| 297 |
+
'csv',
|
| 298 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli_expanded_train_correct.tsv",
|
| 299 |
+
delimiter="\t",
|
| 300 |
+
split="train",
|
| 301 |
+
cache_dir=model_args.cache_dir,
|
| 302 |
+
token=model_args.token,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
else:
|
| 306 |
+
train_dataset = load_dataset(
|
| 307 |
+
"xnli",
|
| 308 |
+
model_args.train_language,
|
| 309 |
+
split="train",
|
| 310 |
+
cache_dir=model_args.cache_dir,
|
| 311 |
+
token=model_args.token,
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
if training_args.do_eval:
|
| 316 |
+
if model_args.train_language == "eu_all":
|
| 317 |
+
eval_dataset = load_dataset(
|
| 318 |
+
'csv',
|
| 319 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli_expanded_eval_correct.tsv",
|
| 320 |
+
delimiter="\t",
|
| 321 |
+
split="train",
|
| 322 |
+
cache_dir=model_args.cache_dir,
|
| 323 |
+
token=model_args.token,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
else:
|
| 327 |
+
eval_dataset = load_dataset(
|
| 328 |
+
"HiTZ/xnli-eu",
|
| 329 |
+
ev_dataset,
|
| 330 |
+
split="validation",
|
| 331 |
+
cache_dir=model_args.cache_dir,
|
| 332 |
+
token=model_args.token,
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
if training_args.do_predict:
|
| 337 |
+
if model_args.test_data == "eu":
|
| 338 |
+
predict_dataset = load_dataset(
|
| 339 |
+
"HiTZ/xnli-eu",
|
| 340 |
+
pred_dataset,
|
| 341 |
+
split="test",
|
| 342 |
+
cache_dir=model_args.cache_dir,
|
| 343 |
+
token=model_args.token,
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
elif model_args.test_data == "native":
|
| 347 |
+
predict_dataset = load_dataset(
|
| 348 |
+
'csv',
|
| 349 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-eu-native.tsv",
|
| 350 |
+
delimiter="\t",
|
| 351 |
+
split="train",
|
| 352 |
+
cache_dir=model_args.cache_dir,
|
| 353 |
+
token=model_args.token,
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
elif model_args.test_data == "var":
|
| 357 |
+
predict_dataset = load_dataset(
|
| 358 |
+
'csv',
|
| 359 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-eu-var.tsv",
|
| 360 |
+
delimiter="\t",
|
| 361 |
+
split="train",
|
| 362 |
+
cache_dir=model_args.cache_dir,
|
| 363 |
+
token=model_args.token,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
elif model_args.test_data == "trans_test":
|
| 367 |
+
predict_dataset = load_dataset(
|
| 368 |
+
'csv',
|
| 369 |
+
data_files="/tartalo01/users/jbengoetxea004/phd/xnli-paraphrasing/data/en/eu-native-in-english-with-labels.tsv",
|
| 370 |
+
delimiter="\t",
|
| 371 |
+
split="train",
|
| 372 |
+
cache_dir=model_args.cache_dir,
|
| 373 |
+
token=model_args.token,
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
elif model_args.test_data == "trans_test_var":
|
| 377 |
+
predict_dataset = load_dataset(
|
| 378 |
+
'csv',
|
| 379 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/eu/xnli-native-var-eu-NO-REPETITION.tsv",
|
| 380 |
+
delimiter="\t",
|
| 381 |
+
split="train",
|
| 382 |
+
cache_dir=model_args.cache_dir,
|
| 383 |
+
token=model_args.token,
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
elif model_args.test_data == "biz":
|
| 389 |
+
predict_dataset = load_dataset(
|
| 390 |
+
'csv',
|
| 391 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-test-gipuzkera-done.tsv",
|
| 392 |
+
delimiter="\t",
|
| 393 |
+
split="train",
|
| 394 |
+
encoding="latin-1",
|
| 395 |
+
cache_dir=model_args.cache_dir,
|
| 396 |
+
token=model_args.token,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
elif model_args.test_data == "gip":
|
| 400 |
+
predict_dataset = load_dataset(
|
| 401 |
+
'csv',
|
| 402 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-test-gipuzkera-done.tsv",
|
| 403 |
+
delimiter="\t",
|
| 404 |
+
split="train",
|
| 405 |
+
cache_dir=model_args.cache_dir,
|
| 406 |
+
token=model_args.token,
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
elif model_args.test_data == "nafar":
|
| 410 |
+
predict_dataset = load_dataset(
|
| 411 |
+
'csv',
|
| 412 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-test-nafar-lapurtera-done.tsv",
|
| 413 |
+
delimiter="\t",
|
| 414 |
+
split="train",
|
| 415 |
+
cache_dir=model_args.cache_dir,
|
| 416 |
+
token=model_args.token,
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
elif model_args.test_data == "biz_nat":
|
| 420 |
+
predict_dataset = load_dataset(
|
| 421 |
+
'csv',
|
| 422 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-native-bizkaieraz-done.tsv",
|
| 423 |
+
delimiter="\t",
|
| 424 |
+
split="train",
|
| 425 |
+
cache_dir=model_args.cache_dir,
|
| 426 |
+
token=model_args.token,
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
elif model_args.test_data == "gip_nat":
|
| 430 |
+
predict_dataset = load_dataset(
|
| 431 |
+
'csv',
|
| 432 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-native-gipuzkera-done.tsv",
|
| 433 |
+
delimiter="\t",
|
| 434 |
+
split="train",
|
| 435 |
+
cache_dir=model_args.cache_dir,
|
| 436 |
+
token=model_args.token,
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
elif model_args.test_data == "nafar_nat":
|
| 440 |
+
predict_dataset = load_dataset(
|
| 441 |
+
'csv',
|
| 442 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli-eu-native-nafar-lapurtera-done.tsv",
|
| 443 |
+
delimiter="\t",
|
| 444 |
+
split="train",
|
| 445 |
+
cache_dir=model_args.cache_dir,
|
| 446 |
+
token=model_args.token,
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
elif model_args.test_data == "xnli_expanded":
|
| 450 |
+
predict_dataset = load_dataset(
|
| 451 |
+
'csv',
|
| 452 |
+
data_files="/scratch/jbengoetxea/phd/XNLIvar/data/test_expanded/xnli_expanded_test_correct.tsv",
|
| 453 |
+
delimiter="\t",
|
| 454 |
+
split="train",
|
| 455 |
+
cache_dir=model_args.cache_dir,
|
| 456 |
+
token=model_args.token,
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
label_list = [0,1,2]
|
| 461 |
+
|
| 462 |
+
num_labels = len(label_list)
|
| 463 |
+
|
| 464 |
+
# Load pretrained model and tokenizer
|
| 465 |
+
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
|
| 466 |
+
# download model & vocab.
|
| 467 |
+
config = AutoConfig.from_pretrained(
|
| 468 |
+
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
|
| 469 |
+
num_labels=num_labels,
|
| 470 |
+
id2label={str(i): label for i, label in enumerate(label_list)},
|
| 471 |
+
label2id={label: i for i, label in enumerate(label_list)},
|
| 472 |
+
finetuning_task="xnli",
|
| 473 |
+
cache_dir=model_args.cache_dir,
|
| 474 |
+
revision=model_args.model_revision,
|
| 475 |
+
token=model_args.token,
|
| 476 |
+
trust_remote_code=model_args.trust_remote_code,
|
| 477 |
+
)
|
| 478 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 479 |
+
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
|
| 480 |
+
do_lower_case=model_args.do_lower_case,
|
| 481 |
+
cache_dir=model_args.cache_dir,
|
| 482 |
+
use_fast=False,
|
| 483 |
+
revision=model_args.model_revision,
|
| 484 |
+
token=model_args.token,
|
| 485 |
+
trust_remote_code=model_args.trust_remote_code,
|
| 486 |
+
)
|
| 487 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
| 488 |
+
model_args.model_name_or_path,
|
| 489 |
+
from_tf=bool(".ckpt" in model_args.model_name_or_path),
|
| 490 |
+
config=config,
|
| 491 |
+
cache_dir=model_args.cache_dir,
|
| 492 |
+
revision=model_args.model_revision,
|
| 493 |
+
token=model_args.token,
|
| 494 |
+
trust_remote_code=model_args.trust_remote_code,
|
| 495 |
+
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
# Preprocessing the datasets
|
| 499 |
+
# Padding strategy
|
| 500 |
+
if data_args.pad_to_max_length:
|
| 501 |
+
padding = "max_length"
|
| 502 |
+
else:
|
| 503 |
+
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
|
| 504 |
+
padding = False
|
| 505 |
+
|
| 506 |
+
def preprocess_function(examples):
|
| 507 |
+
# Tokenize the texts
|
| 508 |
+
return tokenizer(
|
| 509 |
+
examples["premise"],
|
| 510 |
+
examples["hypothesis"],
|
| 511 |
+
padding=padding,
|
| 512 |
+
max_length=data_args.max_seq_length,
|
| 513 |
+
truncation=True,
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
if training_args.do_train:
|
| 517 |
+
if data_args.max_train_samples is not None:
|
| 518 |
+
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
|
| 519 |
+
train_dataset = train_dataset.select(range(max_train_samples))
|
| 520 |
+
with training_args.main_process_first(desc="train dataset map pre-processing"):
|
| 521 |
+
train_dataset = train_dataset.map(
|
| 522 |
+
preprocess_function,
|
| 523 |
+
batched=True,
|
| 524 |
+
load_from_cache_file=not data_args.overwrite_cache,
|
| 525 |
+
desc="Running tokenizer on train dataset",
|
| 526 |
+
)
|
| 527 |
+
# Log a few random samples from the training set:
|
| 528 |
+
for index in random.sample(range(len(train_dataset)), 3):
|
| 529 |
+
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
|
| 530 |
+
|
| 531 |
+
if training_args.do_eval:
|
| 532 |
+
if data_args.max_eval_samples is not None:
|
| 533 |
+
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
|
| 534 |
+
eval_dataset = eval_dataset.select(range(max_eval_samples))
|
| 535 |
+
with training_args.main_process_first(desc="validation dataset map pre-processing"):
|
| 536 |
+
eval_dataset = eval_dataset.map(
|
| 537 |
+
preprocess_function,
|
| 538 |
+
batched=True,
|
| 539 |
+
load_from_cache_file=not data_args.overwrite_cache,
|
| 540 |
+
desc="Running tokenizer on validation dataset",
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
if training_args.do_predict:
|
| 544 |
+
if data_args.max_predict_samples is not None:
|
| 545 |
+
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
|
| 546 |
+
predict_dataset = predict_dataset.select(range(max_predict_samples))
|
| 547 |
+
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
|
| 548 |
+
predict_dataset = predict_dataset.map(
|
| 549 |
+
preprocess_function,
|
| 550 |
+
batched=True,
|
| 551 |
+
load_from_cache_file=not data_args.overwrite_cache,
|
| 552 |
+
desc="Running tokenizer on prediction dataset",
|
| 553 |
+
)
|
| 554 |
+
# print(predict_dataset["premise"])
|
| 555 |
+
# Get the metric function
|
| 556 |
+
metric = evaluate.load("xnli")
|
| 557 |
+
|
| 558 |
+
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
| 559 |
+
# predictions and label_ids field) and has to return a dictionary string to float.
|
| 560 |
+
def compute_metrics(p: EvalPrediction):
|
| 561 |
+
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
|
| 562 |
+
preds = np.argmax(preds, axis=1)
|
| 563 |
+
return metric.compute(predictions=preds, references=p.label_ids)
|
| 564 |
+
|
| 565 |
+
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
|
| 566 |
+
if data_args.pad_to_max_length:
|
| 567 |
+
data_collator = default_data_collator
|
| 568 |
+
elif training_args.fp16:
|
| 569 |
+
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
|
| 570 |
+
else:
|
| 571 |
+
data_collator = None
|
| 572 |
+
|
| 573 |
+
# Initialize our Trainer
|
| 574 |
+
trainer = Trainer(
|
| 575 |
+
model=model,
|
| 576 |
+
args=training_args,
|
| 577 |
+
train_dataset=train_dataset if training_args.do_train else None,
|
| 578 |
+
eval_dataset=eval_dataset if training_args.do_eval else None,
|
| 579 |
+
compute_metrics=compute_metrics,
|
| 580 |
+
tokenizer=tokenizer,
|
| 581 |
+
data_collator=data_collator,
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
# Training
|
| 585 |
+
if training_args.do_train:
|
| 586 |
+
checkpoint = None
|
| 587 |
+
if training_args.resume_from_checkpoint is not None:
|
| 588 |
+
checkpoint = training_args.resume_from_checkpoint
|
| 589 |
+
elif last_checkpoint is not None:
|
| 590 |
+
checkpoint = last_checkpoint
|
| 591 |
+
train_result = trainer.train(resume_from_checkpoint=checkpoint)
|
| 592 |
+
metrics = train_result.metrics
|
| 593 |
+
max_train_samples = (
|
| 594 |
+
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
|
| 595 |
+
)
|
| 596 |
+
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
|
| 597 |
+
|
| 598 |
+
trainer.save_model() # Saves the tokenizer too for easy upload
|
| 599 |
+
|
| 600 |
+
tokenizer.save_pretrained(training_args.output_dir)
|
| 601 |
+
|
| 602 |
+
trainer.log_metrics("train", metrics)
|
| 603 |
+
trainer.save_metrics("train", metrics)
|
| 604 |
+
trainer.save_state()
|
| 605 |
+
|
| 606 |
+
# Evaluation
|
| 607 |
+
if training_args.do_eval:
|
| 608 |
+
logger.info("*** Evaluate ***")
|
| 609 |
+
metrics = trainer.evaluate(eval_dataset=eval_dataset)
|
| 610 |
+
|
| 611 |
+
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
|
| 612 |
+
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
|
| 613 |
+
|
| 614 |
+
trainer.log_metrics("eval", metrics)
|
| 615 |
+
trainer.save_metrics("eval", metrics)
|
| 616 |
+
|
| 617 |
+
# Prediction
|
| 618 |
+
if training_args.do_predict:
|
| 619 |
+
logger.info("*** Predict ***")
|
| 620 |
+
predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
|
| 621 |
+
|
| 622 |
+
max_predict_samples = (
|
| 623 |
+
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
|
| 624 |
+
)
|
| 625 |
+
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
|
| 626 |
+
|
| 627 |
+
trainer.log_metrics("predict", metrics)
|
| 628 |
+
trainer.save_metrics("predict", metrics)
|
| 629 |
+
|
| 630 |
+
predictions = np.argmax(predictions, axis=1)
|
| 631 |
+
output_predict_file = os.path.join(training_args.output_dir, "predictions.txt")
|
| 632 |
+
if trainer.is_world_process_zero():
|
| 633 |
+
with open(output_predict_file, "w") as writer:
|
| 634 |
+
writer.write("index\tprediction\n")
|
| 635 |
+
for index, item in enumerate(predictions):
|
| 636 |
+
item = label_list[item]
|
| 637 |
+
writer.write(f"{index}\t{item}\n")
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
# Save predictions with premise and hypotheses
|
| 641 |
+
no_variation = ["native", "es", "trans_test"]
|
| 642 |
+
variation = ["var", "trans_test_var"]
|
| 643 |
+
|
| 644 |
+
if model_args.test_data in no_variation:
|
| 645 |
+
output_predict_file = os.path.join(training_args.output_dir, "predictions_with_text.txt")
|
| 646 |
+
|
| 647 |
+
if trainer.is_world_process_zero():
|
| 648 |
+
with open(output_predict_file, "w") as writer:
|
| 649 |
+
writer.write("index\tpremise\thypothesis\tlabel\tprediction\n") # Write header
|
| 650 |
+
for index, (premise, hypothesis, label, prediction_idx) in enumerate(zip(
|
| 651 |
+
predict_dataset["premise"], predict_dataset["hypothesis"], predict_dataset["label"], predictions
|
| 652 |
+
)):
|
| 653 |
+
predicted_label = label_list[prediction_idx]
|
| 654 |
+
writer.write(f"{index}\t{premise}\t{hypothesis}\t{label}\t{predicted_label}\n")
|
| 655 |
+
|
| 656 |
+
elif model_args.test_data in variation:
|
| 657 |
+
output_predict_file = os.path.join(training_args.output_dir, "predictions_with_text.txt")
|
| 658 |
+
|
| 659 |
+
if trainer.is_world_process_zero():
|
| 660 |
+
with open(output_predict_file, "w") as writer:
|
| 661 |
+
writer.write("index\tann_id\tchange_type\tprobintzia\tprem_id\tpremise\thypothesis\tlabel\tprediction\n") # Write header
|
| 662 |
+
for index, (ann_id, change_type, probintzia, prem_id, premise, hypothesis, label, prediction_idx) in enumerate(zip(
|
| 663 |
+
predict_dataset["ann_id"], predict_dataset["change_type"], predict_dataset["probintzia"], predict_dataset["prem_id"], predict_dataset["premise"], predict_dataset["hypothesis"], predict_dataset["label"], predictions
|
| 664 |
+
)):
|
| 665 |
+
predicted_label = label_list[prediction_idx]
|
| 666 |
+
writer.write(f"{index}\t{ann_id}\t{change_type}\t{probintzia}\t{prem_id}\t{premise}\t{hypothesis}\t{label}\t{predicted_label}\n")
|
| 667 |
+
|
| 668 |
+
elif model_args.test_data == "xnli_expanded":
|
| 669 |
+
output_predict_file = os.path.join(training_args.output_dir, "predictions_with_text.txt")
|
| 670 |
+
|
| 671 |
+
if trainer.is_world_process_zero():
|
| 672 |
+
with open(output_predict_file, "w") as writer:
|
| 673 |
+
writer.write("index\tdata_source\tdialect\tprem_id\tpremise\thypothesis_id\thypothesis\tlabel\tprediction\n") # Write header
|
| 674 |
+
for index, (data_source, dialect, prem_id, premise, hypothesis_id, hypothesis, label, prediction_idx) in enumerate(zip(
|
| 675 |
+
predict_dataset["data_source"], predict_dataset["dialect"], predict_dataset["prem_id"], predict_dataset["premise"], predict_dataset["hypothesis_id"], predict_dataset["hypothesis"], predict_dataset["label"], predictions
|
| 676 |
+
)):
|
| 677 |
+
predicted_label = label_list[prediction_idx]
|
| 678 |
+
writer.write(f"{index}\t{data_source}\t{dialect}\t{prem_id}\t{premise}\t{hypothesis_id}\t{hypothesis}\t{label}\t{predicted_label}\n")
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
try:
|
| 682 |
+
torch.distributed.destroy_process_group()
|
| 683 |
+
except:
|
| 684 |
+
pass
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
if __name__ == "__main__":
|
| 688 |
+
main()
|
discriminative/eu/fine-tuning/train/logs/model_transder-2gpu.err
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The following values were not passed to `accelerate launch` and had defaults used instead:
|
| 2 |
+
`--num_processes` was set to a value of `2`
|
| 3 |
+
More than one GPU was found, enabling multi-GPU training.
|
| 4 |
+
If this was unintended please pass in `--num_processes=1`.
|
| 5 |
+
`--num_machines` was set to a value of `1`
|
| 6 |
+
To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.
|
| 7 |
+
/scratch/jbengoetxea/phd/.phd_venv/bin/python3: can't open file '/scratch/jbengoetxea/phd/XNLIvar/scripts/fine-tuning/run_xnli_eus.py': [Errno 2] No such file or directory
|
| 8 |
+
/scratch/jbengoetxea/phd/.phd_venv/bin/python3: can't open file '/scratch/jbengoetxea/phd/XNLIvar/scripts/fine-tuning/run_xnli_eus.py': [Errno 2] No such file or directory
|
| 9 |
+
E1204 11:46:18.152000 1475833 torch/distributed/elastic/multiprocessing/api.py:869] failed (exitcode: 2) local_rank: 0 (pid: 1476026) of binary: /scratch/jbengoetxea/phd/.phd_venv/bin/python3
|
| 10 |
+
Traceback (most recent call last):
|
| 11 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/bin/accelerate", line 8, in <module>
|
| 12 |
+
sys.exit(main())
|
| 13 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 50, in main
|
| 14 |
+
args.func(args)
|
| 15 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1204, in launch_command
|
| 16 |
+
multi_gpu_launcher(args)
|
| 17 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/launch.py", line 825, in multi_gpu_launcher
|
| 18 |
+
distrib_run.run(args)
|
| 19 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/run.py", line 910, in run
|
| 20 |
+
elastic_launch(
|
| 21 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 138, in __call__
|
| 22 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 23 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 269, in launch_agent
|
| 24 |
+
raise ChildFailedError(
|
| 25 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 26 |
+
============================================================
|
| 27 |
+
fine-tuning/run_xnli_eus.py FAILED
|
| 28 |
+
------------------------------------------------------------
|
| 29 |
+
Failures:
|
| 30 |
+
[1]:
|
| 31 |
+
time : 2025-12-04_11:46:18
|
| 32 |
+
host : hyperion-252.sw.ehu.es
|
| 33 |
+
rank : 1 (local_rank: 1)
|
| 34 |
+
exitcode : 2 (pid: 1476027)
|
| 35 |
+
error_file: <N/A>
|
| 36 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 37 |
+
------------------------------------------------------------
|
| 38 |
+
Root Cause (first observed failure):
|
| 39 |
+
[0]:
|
| 40 |
+
time : 2025-12-04_11:46:18
|
| 41 |
+
host : hyperion-252.sw.ehu.es
|
| 42 |
+
rank : 0 (local_rank: 0)
|
| 43 |
+
exitcode : 2 (pid: 1476026)
|
| 44 |
+
error_file: <N/A>
|
| 45 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 46 |
+
============================================================
|
| 47 |
+
srun: error: hyperion-252: task 0: Exited with exit code 1
|
| 48 |
+
The following values were not passed to `accelerate launch` and had defaults used instead:
|
| 49 |
+
`--num_processes` was set to a value of `2`
|
| 50 |
+
More than one GPU was found, enabling multi-GPU training.
|
| 51 |
+
If this was unintended please pass in `--num_processes=1`.
|
| 52 |
+
`--num_machines` was set to a value of `1`
|
| 53 |
+
To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.
|
| 54 |
+
/scratch/jbengoetxea/phd/.phd_venv/bin/python3: can't open file '/scratch/jbengoetxea/phd/XNLIvar/scripts/fine-tuning/run_xnli_eus.py': [Errno 2] No such file or directory
|
| 55 |
+
/scratch/jbengoetxea/phd/.phd_venv/bin/python3: can't open file '/scratch/jbengoetxea/phd/XNLIvar/scripts/fine-tuning/run_xnli_eus.py': [Errno 2] No such file or directory
|
| 56 |
+
E1204 11:46:27.775000 1476160 torch/distributed/elastic/multiprocessing/api.py:869] failed (exitcode: 2) local_rank: 0 (pid: 1476196) of binary: /scratch/jbengoetxea/phd/.phd_venv/bin/python3
|
| 57 |
+
Traceback (most recent call last):
|
| 58 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/bin/accelerate", line 8, in <module>
|
| 59 |
+
sys.exit(main())
|
| 60 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 50, in main
|
| 61 |
+
args.func(args)
|
| 62 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1204, in launch_command
|
| 63 |
+
multi_gpu_launcher(args)
|
| 64 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/launch.py", line 825, in multi_gpu_launcher
|
| 65 |
+
distrib_run.run(args)
|
| 66 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/run.py", line 910, in run
|
| 67 |
+
elastic_launch(
|
| 68 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 138, in __call__
|
| 69 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 70 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 269, in launch_agent
|
| 71 |
+
raise ChildFailedError(
|
| 72 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 73 |
+
============================================================
|
| 74 |
+
fine-tuning/run_xnli_eus.py FAILED
|
| 75 |
+
------------------------------------------------------------
|
| 76 |
+
Failures:
|
| 77 |
+
[1]:
|
| 78 |
+
time : 2025-12-04_11:46:27
|
| 79 |
+
host : hyperion-252.sw.ehu.es
|
| 80 |
+
rank : 1 (local_rank: 1)
|
| 81 |
+
exitcode : 2 (pid: 1476197)
|
| 82 |
+
error_file: <N/A>
|
| 83 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 84 |
+
------------------------------------------------------------
|
| 85 |
+
Root Cause (first observed failure):
|
| 86 |
+
[0]:
|
| 87 |
+
time : 2025-12-04_11:46:27
|
| 88 |
+
host : hyperion-252.sw.ehu.es
|
| 89 |
+
rank : 0 (local_rank: 0)
|
| 90 |
+
exitcode : 2 (pid: 1476196)
|
| 91 |
+
error_file: <N/A>
|
| 92 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 93 |
+
============================================================
|
| 94 |
+
srun: error: hyperion-252: task 0: Exited with exit code 1
|
| 95 |
+
The following values were not passed to `accelerate launch` and had defaults used instead:
|
| 96 |
+
`--num_processes` was set to a value of `2`
|
| 97 |
+
More than one GPU was found, enabling multi-GPU training.
|
| 98 |
+
If this was unintended please pass in `--num_processes=1`.
|
| 99 |
+
`--num_machines` was set to a value of `1`
|
| 100 |
+
To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.
|
| 101 |
+
/scratch/jbengoetxea/phd/.phd_venv/bin/python3: can't open file '/scratch/jbengoetxea/phd/XNLIvar/scripts/fine-tuning/run_xnli_eus.py': [Errno 2] No such file or directory
|
| 102 |
+
/scratch/jbengoetxea/phd/.phd_venv/bin/python3: can't open file '/scratch/jbengoetxea/phd/XNLIvar/scripts/fine-tuning/run_xnli_eus.py': [Errno 2] No such file or directory
|
| 103 |
+
E1204 11:46:37.018000 1476330 torch/distributed/elastic/multiprocessing/api.py:869] failed (exitcode: 2) local_rank: 0 (pid: 1476362) of binary: /scratch/jbengoetxea/phd/.phd_venv/bin/python3
|
| 104 |
+
Traceback (most recent call last):
|
| 105 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/bin/accelerate", line 8, in <module>
|
| 106 |
+
sys.exit(main())
|
| 107 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 50, in main
|
| 108 |
+
args.func(args)
|
| 109 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1204, in launch_command
|
| 110 |
+
multi_gpu_launcher(args)
|
| 111 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/accelerate/commands/launch.py", line 825, in multi_gpu_launcher
|
| 112 |
+
distrib_run.run(args)
|
| 113 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/run.py", line 910, in run
|
| 114 |
+
elastic_launch(
|
| 115 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 138, in __call__
|
| 116 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
| 117 |
+
File "/scratch/jbengoetxea/phd/.phd_venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 269, in launch_agent
|
| 118 |
+
raise ChildFailedError(
|
| 119 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
| 120 |
+
============================================================
|
| 121 |
+
fine-tuning/run_xnli_eus.py FAILED
|
| 122 |
+
------------------------------------------------------------
|
| 123 |
+
Failures:
|
| 124 |
+
[1]:
|
| 125 |
+
time : 2025-12-04_11:46:37
|
| 126 |
+
host : hyperion-252.sw.ehu.es
|
| 127 |
+
rank : 1 (local_rank: 1)
|
| 128 |
+
exitcode : 2 (pid: 1476363)
|
| 129 |
+
error_file: <N/A>
|
| 130 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 131 |
+
------------------------------------------------------------
|
| 132 |
+
Root Cause (first observed failure):
|
| 133 |
+
[0]:
|
| 134 |
+
time : 2025-12-04_11:46:37
|
| 135 |
+
host : hyperion-252.sw.ehu.es
|
| 136 |
+
rank : 0 (local_rank: 0)
|
| 137 |
+
exitcode : 2 (pid: 1476362)
|
| 138 |
+
error_file: <N/A>
|
| 139 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
| 140 |
+
============================================================
|
| 141 |
+
srun: error: hyperion-252: task 0: Exited with exit code 1
|
discriminative/eu/fine-tuning/train/logs/model_transder.err
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
discriminative/eu/fine-tuning/train/logs/model_transfer-2gpu.log
ADDED
|
File without changes
|
discriminative/eu/fine-tuning/train/logs/model_transfer.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
discriminative/eu/fine-tuning/train/model-transfer-2gpu.sh
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
#SBATCH --qos=regular
|
| 3 |
+
#SBATCH --job-name=nli-model-transfer
|
| 4 |
+
#SBATCH --cpus-per-task=4
|
| 5 |
+
#SBATCH --nodes=1
|
| 6 |
+
#SBATCH --ntasks-per-node=1
|
| 7 |
+
#SBATCH --mem=64GB
|
| 8 |
+
#SBATCH --gres=gpu:2
|
| 9 |
+
#SBATCH --constraint=a100
|
| 10 |
+
#SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/fine-tuning/train/logs/model_transfer-2gpu.log
|
| 11 |
+
#SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/fine-tuning/train/logs/model_transder-2gpu.err
|
| 12 |
+
#SBATCH --time=01-00:00:00 #ee-hh:mm:ss
|
| 13 |
+
#SBATCH --mail-type=REQUEUE
|
| 14 |
+
#SBATCH --mail-user=jaione.bengoetxea@ehu.eus
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
source /scratch/jbengoetxea/phd/.phd_venv/bin/activate
|
| 18 |
+
|
| 19 |
+
export TORCHDYNAMO_DISABLE=1
|
| 20 |
+
|
| 21 |
+
for seed in 23 27 33
|
| 22 |
+
do
|
| 23 |
+
for model in answerdotai/ModernBERT-large
|
| 24 |
+
# microsoft/mdeberta-v3-base FacebookAI/xlm-roberta-large FacebookAI/xlm-roberta-base
|
| 25 |
+
do
|
| 26 |
+
MASTER_PORT=9327
|
| 27 |
+
MAIN_PROCESS_IP=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
| 28 |
+
srun accelerate launch \
|
| 29 |
+
--mixed_precision bf16 \
|
| 30 |
+
--dynamo_backend "no" \
|
| 31 |
+
fine-tuning/run_xnli_eus.py \
|
| 32 |
+
--model_name_or_path $model \
|
| 33 |
+
--language eu \
|
| 34 |
+
--train_language en \
|
| 35 |
+
--do_train \
|
| 36 |
+
--do_eval \
|
| 37 |
+
--per_device_train_batch_size 16 \
|
| 38 |
+
--learning_rate 10e-6 \
|
| 39 |
+
--num_train_epochs 10.0 \
|
| 40 |
+
--max_seq_length 128 \
|
| 41 |
+
--bf16 \
|
| 42 |
+
--output_dir /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/models/model-transfer/$model/$seed \
|
| 43 |
+
--save_steps 50000 \
|
| 44 |
+
--eval_strategy steps \
|
| 45 |
+
--save_strategy steps \
|
| 46 |
+
--load_best_model_at_end true \
|
| 47 |
+
--metric_for_best_model accuracy \
|
| 48 |
+
--seed $seed \
|
| 49 |
+
--eval_steps 5000 \
|
| 50 |
+
--logging_steps 25 \
|
| 51 |
+
--torch_compile false \
|
| 52 |
+
--save_total_limit 2
|
| 53 |
+
done
|
| 54 |
+
done
|
discriminative/eu/fine-tuning/train/model-transfer.sh
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
#SBATCH --partition=hitz-exclusive
|
| 3 |
+
#SBATCH --account=hitz-exclusive
|
| 4 |
+
#SBATCH --job-name=nli-model-transfer
|
| 5 |
+
#SBATCH --cpus-per-task=4
|
| 6 |
+
#SBATCH --nodes=1
|
| 7 |
+
#SBATCH --ntasks-per-node=1
|
| 8 |
+
#SBATCH --mem=64GB
|
| 9 |
+
#SBATCH --gres=gpu:4
|
| 10 |
+
#SBATCH --constraint=a100-sxm4
|
| 11 |
+
#SBATCH --output=/scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/fine-tuning/train/logs/model_transfer.log
|
| 12 |
+
#SBATCH --error=/scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/fine-tuning/train/logs/model_transder.err
|
| 13 |
+
#SBATCH --time=01-00:00:00 #ee-hh:mm:ss
|
| 14 |
+
#SBATCH --mail-type=REQUEUE
|
| 15 |
+
#SBATCH --mail-user=jaione.bengoetxea@ehu.eus
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
|
| 19 |
+
|
| 20 |
+
export TORCHDYNAMO_DISABLE=1
|
| 21 |
+
|
| 22 |
+
for seed in 23
|
| 23 |
+
# 23 27 33
|
| 24 |
+
do
|
| 25 |
+
for model in jhu-clsp/mmBERT-base
|
| 26 |
+
# microsoft/mdeberta-v3-base FacebookAI/xlm-roberta-large FacebookAI/xlm-roberta-base
|
| 27 |
+
do
|
| 28 |
+
MASTER_PORT=9327
|
| 29 |
+
MAIN_PROCESS_IP=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
| 30 |
+
srun accelerate launch \
|
| 31 |
+
--num_processes 4 \
|
| 32 |
+
--num_machines 1 \
|
| 33 |
+
--mixed_precision bf16 \
|
| 34 |
+
--dynamo_backend "no" \
|
| 35 |
+
--rdzv_backend static \
|
| 36 |
+
--main_process_ip $MAIN_PROCESS_IP \
|
| 37 |
+
--main_process_port $MASTER_PORT \
|
| 38 |
+
--machine_rank $SLURM_NODEID \
|
| 39 |
+
fine-tuning/run_xnli_eus.py \
|
| 40 |
+
--model_name_or_path $model \
|
| 41 |
+
--language eu \
|
| 42 |
+
--train_language en \
|
| 43 |
+
--do_train \
|
| 44 |
+
--do_eval \
|
| 45 |
+
--per_device_train_batch_size 8 \
|
| 46 |
+
--learning_rate 10e-6 \
|
| 47 |
+
--num_train_epochs 10.0 \
|
| 48 |
+
--max_seq_length 128 \
|
| 49 |
+
--output_dir /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/models/model-transfer/$model/$seed \
|
| 50 |
+
--save_steps 50000 \
|
| 51 |
+
--eval_strategy steps \
|
| 52 |
+
--save_strategy steps \
|
| 53 |
+
--bf16 \
|
| 54 |
+
--load_best_model_at_end true \
|
| 55 |
+
--metric_for_best_model accuracy \
|
| 56 |
+
--seed $seed \
|
| 57 |
+
--logging_steps 25 \
|
| 58 |
+
--eval_steps 5000 \
|
| 59 |
+
--torch_compile false \
|
| 60 |
+
--save_total_limit 2
|
| 61 |
+
done
|
| 62 |
+
done
|
discriminative/eu/fine-tuning/train/translate-train.sh
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --partition=hitz-exclusive
|
| 3 |
+
#SBATCH --account=hitz-exclusive
|
| 4 |
+
#SBATCH --job-name=var-nli-tra
|
| 5 |
+
#SBATCH --cpus-per-task=1
|
| 6 |
+
#SBATCH --nodes=1
|
| 7 |
+
#SBATCH --ntasks-per-node=1
|
| 8 |
+
#SBATCH --time=10:00:00
|
| 9 |
+
#SBATCH --mem=32GB
|
| 10 |
+
#SBATCH --gres=gpu:2
|
| 11 |
+
#SBATCH --output=translate-train.log
|
| 12 |
+
#SBATCH --error=translate-train.err
|
| 13 |
+
|
| 14 |
+
source /scratch/jbengoetxea/phd/.phd_venv_new/bin/activate
|
| 15 |
+
|
| 16 |
+
export TORCHDYNAMO_DISABLE=1
|
| 17 |
+
|
| 18 |
+
for seed in 23 27 33
|
| 19 |
+
do
|
| 20 |
+
for model in jhu-clsp/mmBERT-base
|
| 21 |
+
do
|
| 22 |
+
MASTER_PORT=9327
|
| 23 |
+
MAIN_PROCESS_IP=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
| 24 |
+
srun accelerate launch \
|
| 25 |
+
--num_machines 1 \
|
| 26 |
+
--mixed_precision bf16 \
|
| 27 |
+
--dynamo_backend "no" \
|
| 28 |
+
--rdzv_backend static \
|
| 29 |
+
--main_process_ip $MAIN_PROCESS_IP \
|
| 30 |
+
--main_process_port $MASTER_PORT \
|
| 31 |
+
--machine_rank $SLURM_NODEID \
|
| 32 |
+
fine-tuning/run_xnli_eus.py \
|
| 33 |
+
--model_name_or_path $model \
|
| 34 |
+
--language eu \
|
| 35 |
+
--train_language eu \
|
| 36 |
+
--do_train \
|
| 37 |
+
--do_eval \
|
| 38 |
+
--per_device_train_batch_size 32 \
|
| 39 |
+
--learning_rate 5e-5 \
|
| 40 |
+
--num_train_epochs 10.0 \
|
| 41 |
+
--max_seq_length 128 \
|
| 42 |
+
--output_dir /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/models/translate-train/$model/$seed \
|
| 43 |
+
--save_steps 50000 \
|
| 44 |
+
--load_best_model_at_end 1 \
|
| 45 |
+
--metric_for_best_model accuracy \
|
| 46 |
+
--seed $seed \
|
| 47 |
+
--eval_strategy steps \
|
| 48 |
+
--bf16 \
|
| 49 |
+
--eval_steps 5000 \
|
| 50 |
+
--save_total_limit 2
|
| 51 |
+
done
|
| 52 |
+
|
| 53 |
+
# for model in ixa-ehu/roberta-eus-euscrawl-large-cased FacebookAI/xlm-roberta-large
|
| 54 |
+
# # microsoft/mdeberta-v3-base FacebookAI/xlm-roberta-large ixa-ehu/roberta-eus-euscrawl-large-cased
|
| 55 |
+
# do
|
| 56 |
+
# python fine-tuning/run_xnli_eus.py \
|
| 57 |
+
# --model_name_or_path $model \
|
| 58 |
+
# --language eu \
|
| 59 |
+
# --train_language eu_all \
|
| 60 |
+
# --do_train \
|
| 61 |
+
# --do_eval \
|
| 62 |
+
# --per_device_train_batch_size 32 \
|
| 63 |
+
# --learning_rate 10e-6 \
|
| 64 |
+
# --num_train_epochs 10.0 \
|
| 65 |
+
# --max_seq_length 128 \
|
| 66 |
+
# --output_dir /scratch/jbengoetxea/phd/XNLIvar/scripts/discriminative/eu/models/translate-train/eu_all_corrected/$model/$seed \
|
| 67 |
+
# --save_steps 50000 \
|
| 68 |
+
# --metric_for_best_model accuracy \
|
| 69 |
+
# --seed $seed \
|
| 70 |
+
# --eval_strategy steps \
|
| 71 |
+
# --eval_steps 5000 \
|
| 72 |
+
# --save_total_limit 2
|
| 73 |
+
# done
|
| 74 |
+
|
| 75 |
+
done
|