cuong1206 commited on
Commit
c581eb9
·
verified ·
1 Parent(s): 73b7597

Create BERT.ibpynb

Browse files
Files changed (1) hide show
  1. BERT.ibpynb +157 -0
BERT.ibpynb ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!pip install -U transformers
2
+ from transformers import TrainingArguments
3
+
4
+ training_args = TrainingArguments(
5
+ output_dir="./results",
6
+ learning_rate=2e-5,
7
+ per_device_train_batch_size=4,
8
+ per_device_eval_batch_size=4,
9
+ num_train_epochs=3,
10
+ weight_decay=0.01,
11
+ logging_dir="./logs",
12
+ logging_steps=10
13
+ )
14
+
15
+ !pip uninstall -y transformers
16
+ !pip install -U transformers datasets accelerate
17
+ !pip show transformers | grep Version
18
+
19
+ import os
20
+ os.environ["WANDB_DISABLED"] = "true"
21
+
22
+
23
+ # ===============================================
24
+ # 1️⃣ CÀI ĐẶT THƯ VIỆN
25
+ # ===============================================
26
+ !pip install -q transformers datasets torch
27
+
28
+ # ===============================================
29
+ # 2️⃣ TẠO DỮ LIỆU GIẢ LẬP (CSV)
30
+ # ===============================================
31
+ import pandas as pd
32
+
33
+ data = {
34
+ "text": [
35
+ "I love this movie, it was fantastic!",
36
+ "This product is terrible and useless.",
37
+ "What a great experience, I will come again!",
38
+ "I hate this item, waste of money.",
39
+ "Absolutely amazing service and food.",
40
+ "Worst app I have ever used.",
41
+ "The phone works perfectly and fast.",
42
+ "It broke after two days, horrible!",
43
+ "Very happy with my purchase.",
44
+ "Not worth the price at all."
45
+ ],
46
+ "label": [1,0,1,0,1,0,1,0,1,0]
47
+ }
48
+
49
+ df = pd.DataFrame(data)
50
+ df.to_csv("sentiment_data.csv", index=False)
51
+ print("✅ Dữ liệu mẫu đã được tạo:\n")
52
+ print(df.head())
53
+
54
+ # ===============================================
55
+ # 3️⃣ TẢI DỮ LIỆU & CHUẨN HÓA
56
+ # ===============================================
57
+ from datasets import load_dataset
58
+
59
+ dataset = load_dataset("csv", data_files="sentiment_data.csv")
60
+ dataset = dataset["train"].train_test_split(test_size=0.3, seed=42)
61
+
62
+ train_dataset = dataset["train"]
63
+ test_dataset = dataset["test"]
64
+
65
+ print("\n🔹 Số mẫu train:", len(train_dataset))
66
+ print("🔹 Số mẫu test:", len(test_dataset))
67
+
68
+ # ===============================================
69
+ # 4️⃣ TOKENIZATION (CHUYỂN TEXT THÀNH INPUT CHO BERT)
70
+ # ===============================================
71
+ from transformers import AutoTokenizer
72
+
73
+ model_name = "bert-base-uncased"
74
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
75
+
76
+ def preprocess_function(examples):
77
+ return tokenizer(
78
+ examples["text"],
79
+ padding="max_length",
80
+ truncation=True,
81
+ max_length=64,
82
+ )
83
+
84
+ train_tokenized = train_dataset.map(preprocess_function, batched=True)
85
+ test_tokenized = test_dataset.map(preprocess_function, batched=True)
86
+
87
+ # ===============================================
88
+ # 5️⃣ CHUẨN BỊ MÔ HÌNH BERT CHO PHÂN LOẠI
89
+ # ===============================================
90
+ import torch
91
+ from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
92
+
93
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
94
+
95
+ # ===============================================
96
+ # 6️⃣ ĐỊNH NGHĨA HÀM ĐÁNH GIÁ
97
+ # ===============================================
98
+ from sklearn.metrics import accuracy_score, f1_score
99
+
100
+ def compute_metrics(eval_pred):
101
+ logits, labels = eval_pred
102
+ preds = torch.argmax(torch.tensor(logits), dim=1)
103
+ acc = accuracy_score(labels, preds)
104
+ f1 = f1_score(labels, preds)
105
+ return {"accuracy": acc, "f1": f1}
106
+
107
+ # ===============================================
108
+ # 7️⃣ CẤU HÌNH HUẤN LUYỆN
109
+ # ===============================================
110
+ training_args = TrainingArguments(
111
+ output_dir="./results",
112
+ learning_rate=2e-5,
113
+ per_device_train_batch_size=4,
114
+ per_device_eval_batch_size=4,
115
+ num_train_epochs=3,
116
+ weight_decay=0.01,
117
+ evaluation_strategy="epoch",
118
+ save_strategy="epoch",
119
+ logging_dir="./logs",
120
+ logging_steps=10
121
+ )
122
+
123
+ # ===============================================
124
+ # 8️⃣ HUẤN LUYỆN MÔ HÌNH
125
+ # ===============================================
126
+ trainer = Trainer(
127
+ model=model,
128
+ args=training_args,
129
+ train_dataset=train_tokenized,
130
+ eval_dataset=test_tokenized,
131
+ tokenizer=tokenizer,
132
+ compute_metrics=compute_metrics
133
+ )
134
+
135
+ trainer.train()
136
+
137
+ # ===============================================
138
+ # 9️⃣ ĐÁNH GIÁ
139
+ # ===============================================
140
+ eval_results = trainer.evaluate()
141
+ print("\n📊 Kết quả đánh giá:", eval_results)
142
+
143
+ # ===============================================
144
+ # 🔟 THỬ DỰ ĐOÁN
145
+ # ===============================================
146
+ text_samples = [
147
+ "I really love this product!",
148
+ "This is the worst movie ever."
149
+ ]
150
+
151
+ inputs = tokenizer(text_samples, padding=True, truncation=True, max_length=64, return_tensors="pt")
152
+ outputs = model(**inputs)
153
+ preds = torch.argmax(outputs.logits, dim=1)
154
+
155
+ for text, label in zip(text_samples, preds):
156
+ print(f"\n🗣️ {text}")
157
+ print("➡️ Dự đoán:", "Tích cực (1)" if label == 1 else "Tiêu cực (0)")