Training / FinetuneModelTrainerForRunningInSpaces.py
Solshine's picture
Upload FinetuneModelTrainerForRunningInSpaces.py
0346b62
import torch
import transformers
import random
# Define the model configuration and tokenizer
config = transformers.AutoConfig.from_pretrained("bert-base-uncased")
tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-uncased")
# Load the pretrained model
model = transformers.AutoModelForSequenceClassification.from_pretrained("bert-base-uncased", config=config)
# Set the hyperparameters for fine-tuning
num_epochs = 3
batch_size = 32
learning_rate = 2e-5
# Create the model optimizer
optimizer = transformers.AdamW(model.parameters(), lr=learning_rate)
# Define the data collator
def collate_fn(data):
input_ids = torch.tensor([tokenizer(text, padding="max_length", truncation=True)["input_ids"] for text in data["text"]])
attention_mask = torch.tensor([tokenizer(text, padding="max_length", truncation=True)["attention_mask"] for text in data["text"]])
labels = torch.tensor(data["label"])
return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}
# Split the training data into training and validation sets
def split_data(data, validation_size=0.2):
validation_indices = random.sample(range(len(data)), int(len(data) * validation_size))
train_data = []
val_data = []
for i, item in enumerate(data):
if i in validation_indices:
val_data.append(item)
else:
train_data.append(item)
return train_data, val_data
# Split the training data
train_data, val_data = split_data(train_data, validation_size=0.2)
# Finetune the model
for epoch in range(num_epochs):
# Create the training data loader
train_loader = transformers.DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
# Train the model for one epoch
model.train()
for batch in train_loader:
optimizer.zero_grad()
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
# Evaluate the model on the validation dataset
model.eval()
with torch.no_grad():
val_loss = 0.0
for batch in val_loader:
outputs = model(**batch)
val_loss += outputs.loss.item()
print("Epoch {}: Train Loss: {:.4f} Val Loss: {:.4f}".format(epoch + 1, train_loss / len(train_loader), val_loss / len(val_loader)))
# Save the fine-tuned model
model.save_pretrained("finetuned_model")