Cogent-ai commited on
Commit
aa9a845
·
verified ·
1 Parent(s): 707ee6a

Upload ra_integration_design.py

Browse files
Files changed (1) hide show
  1. ra_integration_design.py +478 -0
ra_integration_design.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from safetensors.torch import save_file as safetensors_save_file
6
+ from torchvision import models
7
+ import safetensors
8
+ import os
9
+
10
+ # --- 1. 原始模型架構 (從 reconstruct_original_model.py 複製) ---
11
+ class OriginalMoETransformerBlock(nn.Module):
12
+ def __init__(self, input_dim, hidden_dim, output_dim, num_experts):
13
+ super().__init__()
14
+ self.num_experts = num_experts
15
+ self.input_dim = input_dim
16
+ self.hidden_dim = hidden_dim
17
+ self.output_dim = output_dim
18
+
19
+ self.experts_w1 = nn.ModuleList([nn.Linear(input_dim, hidden_dim, bias=False) for _ in range(num_experts)])
20
+ self.experts_w2 = nn.ModuleList([nn.Linear(hidden_dim, output_dim, bias=False) for _ in range(num_experts)])
21
+ self.gate = nn.Linear(input_dim, num_experts, bias=False)
22
+
23
+ def forward(self, x):
24
+ gate_logits = self.gate(x)
25
+ weights = torch.softmax(gate_logits, dim=-1)
26
+
27
+ expert_outputs = torch.empty(x.shape[0], self.num_experts, self.output_dim, device=x.device)
28
+ for i in range(self.num_experts):
29
+ expert_outputs[:, i, :] = self.experts_w2[i](self.experts_w1[i](x))
30
+
31
+ output = torch.sum(expert_outputs * weights.unsqueeze(-1), dim=1)
32
+
33
+ return output
34
+
35
+ class OriginalModelReconstructed(nn.Module):
36
+ def __init__(self, vocab_size, embedding_dim, moe_hidden_dim, num_experts):
37
+ super().__init__()
38
+ self.embeddings = nn.Embedding(vocab_size, embedding_dim)
39
+ self.transformer_block_0 = OriginalMoETransformerBlock(embedding_dim, moe_hidden_dim, embedding_dim, num_experts)
40
+ self.norm = nn.LayerNorm(embedding_dim)
41
+ self.output_layer = nn.Linear(embedding_dim, vocab_size)
42
+
43
+ # 新增 Action Prediction Head for Comet-like capability
44
+ # 輸出結構化行動 (例如: [action_type, x_coord, y_coord, element_id_token])
45
+ # 簡化為一個線性層,輸出一個固定長度的向量
46
+ self.action_prediction_head = nn.Sequential(
47
+ nn.Linear(embedding_dim, 256),
48
+ nn.ReLU(),
49
+ nn.Linear(256, 4) # 假設輸出 4 個值: action_type, x, y, element_id_token
50
+ )
51
+
52
+ def forward(self, x):
53
+ x = self.embeddings(x).squeeze(1)
54
+ x = self.transformer_block_0(x)
55
+ x = self.norm(x)
56
+ x = self.output_layer(x)
57
+ return x
58
+
59
+ # --- 2. 記憶共生引擎模組 (Person X Memory Symbiosis Engine) ---
60
+ class MemorySymbiosisEngine(nn.Module):
61
+ def __init__(self, embedding_dim, memory_slots=10, memory_dim=256):
62
+ super().__init__()
63
+ self.memory_slots = memory_slots
64
+ self.memory_dim = memory_dim
65
+
66
+ self.memory_keys = nn.Parameter(torch.randn(memory_slots, memory_dim))
67
+ self.memory_values = nn.Parameter(torch.randn(memory_slots, embedding_dim))
68
+
69
+ self.query_projection = nn.Linear(embedding_dim, memory_dim)
70
+ self.memory_read_fusion = nn.Linear(embedding_dim + embedding_dim, embedding_dim)
71
+
72
+ def forward(self, current_features, user_profile_embedding=None):
73
+ query = self.query_projection(current_features)
74
+ attention_scores = torch.matmul(query, self.memory_keys.T)
75
+ attention_weights = torch.softmax(attention_scores, dim=-1)
76
+ read_memory = torch.matmul(attention_weights, self.memory_values)
77
+
78
+ fused_with_memory = torch.cat((current_features, read_memory), dim=-1)
79
+ output_features = self.memory_read_fusion(fused_with_memory)
80
+
81
+ return output_features
82
+
83
+ # --- 3. 真實錨定 (Reality Anchor, RA) 技術模組 ---
84
+ class RealityAnchor(nn.Module):
85
+ def __init__(self, embedding_dim, fact_memory_size=1000, fact_dim=256, threshold=0.7):
86
+ super().__init__()
87
+ self.fact_memory_size = fact_memory_size
88
+ self.fact_dim = fact_dim
89
+ self.threshold = threshold
90
+
91
+ # 內置的事實記憶庫:這裡用可學習的張量模擬
92
+ # 實際中,這會是一個預訓練的、經過壓縮的知識表示
93
+ self.fact_memory_bank = nn.Parameter(torch.randn(fact_memory_size, fact_dim))
94
+
95
+ # 事實注意力機制 (Fact-Attention) 的查詢投影
96
+ self.fact_query_projection = nn.Linear(embedding_dim, fact_dim)
97
+
98
+ # 調整信號生成模塊 (輕量級神經網絡)
99
+ self.adjustment_module = nn.Sequential(
100
+ nn.Linear(embedding_dim + fact_dim, embedding_dim // 2),
101
+ nn.ReLU(),
102
+ nn.Linear(embedding_dim // 2, embedding_dim) # 輸出與 embedding_dim 相同維度,用於調整
103
+ )
104
+
105
+ # 事實一致性評分器 (這裡簡化為一個線性層)
106
+ self.consistency_scorer = nn.Linear(fact_dim, 1)
107
+
108
+ def forward(self, current_token_embedding, generated_context_embedding):
109
+ # current_token_embedding: 當前生成詞元的嵌入 (batch_size, embedding_dim)
110
+ # generated_context_embedding: 當前生成上下文的��入 (batch_size, embedding_dim)
111
+
112
+ # 1. 動態事實檢索與融合 (這裡簡化為基於上下文的檢索)
113
+ # 假設 generated_context_embedding 已經包含了足夠的信息來查詢事實
114
+ fact_query = self.fact_query_projection(generated_context_embedding) # (batch_size, fact_dim)
115
+
116
+ # 近似最近鄰搜索 (這裡簡化為點積相似度)
117
+ similarity_scores = torch.matmul(fact_query, self.fact_memory_bank.T) # (batch_size, fact_memory_size)
118
+
119
+ # 獲取最相關的事實 (這裡取 top-1)
120
+ top_fact_scores, top_fact_indices = torch.topk(similarity_scores, 1, dim=-1)
121
+ retrieved_fact = self.fact_memory_bank[top_fact_indices.squeeze(-1)] # (batch_size, fact_dim)
122
+
123
+ # 2. 事實一致性評分 (Fact Consistency Scoring)
124
+ # 衡量當前詞元嵌入與檢索到的事實的一致性
125
+ # 這裡簡化為直接評分檢索到的事實,實際可能需要更複雜的交互
126
+ fact_consistency_score = torch.sigmoid(self.consistency_scorer(retrieved_fact)) # (batch_size, 1)
127
+
128
+ # 3. 即時調整生成 (Real-Time Generation Adjustment)
129
+ adjustment_signal = torch.zeros_like(current_token_embedding) # 初始化調整信號
130
+
131
+ # 如果事實一致性分數低於閾值,則觸發調整
132
+ # 注意:這裡的閾值判斷是簡化的,實際應用中可能需要更精細的控制
133
+ needs_adjustment = (fact_consistency_score < self.threshold).squeeze(-1) # (batch_size,)
134
+
135
+ if needs_adjustment.any():
136
+ # 組合當前詞元嵌入和檢索到的事實來生成調整信號
137
+ combined_for_adjustment = torch.cat((current_token_embedding[needs_adjustment], retrieved_fact[needs_adjustment]), dim=-1)
138
+ adjustment_signal[needs_adjustment] = self.adjustment_module(combined_for_adjustment)
139
+
140
+ # 將調整信號應用於當前詞元嵌入
141
+ adjusted_token_embedding = current_token_embedding + adjustment_signal
142
+
143
+ return adjusted_token_embedding, fact_consistency_score
144
+
145
+
146
+ # --- 4. 多維度生成協調器 (Multidimensional Generation Orchestrator, MGO) 技術模組 ---
147
+ class MultidimensionalGenerationOrchestrator(nn.Module):
148
+ def __init__(self, embedding_dim, vocab_size, max_output_length=50, num_intent_types=5):
149
+ super().__init__()
150
+ self.embedding_dim = embedding_dim
151
+ self.vocab_size = vocab_size
152
+ self.max_output_length = max_output_length
153
+ self.num_intent_types = num_intent_types
154
+
155
+ # 1. 生成意圖解析與分解 (Generation Intent Parsing and Decomposition)
156
+ # 這裡簡化為一個線性層來預測意圖類型
157
+ self.intent_parser = nn.Linear(embedding_dim, num_intent_types)
158
+ self.intent_embedding = nn.Embedding(num_intent_types, embedding_dim) # 意圖嵌入
159
+
160
+ # 2. 多維度生成規劃 (Multidimensional Generation Planning)
161
+ # 這裡簡化為根據意圖調整生成參數
162
+ self.planning_layer = nn.Linear(embedding_dim, 4) # 輸出控制:豐富度、連貫性、長度、風格
163
+
164
+ # 3. 自適應生成引導 (Adaptive Generation Guidance)
165
+ self.guidance_controller = nn.Linear(embedding_dim * 2 + 4, embedding_dim) # 融合上下文、意圖、規劃參數
166
+ self.length_controller = nn.Linear(embedding_dim, 1) # 預測生成終止概率
167
+
168
+ def forward(self, context_embedding, current_output_length=0, target_intent_idx=None):
169
+ # context_embedding: 融合了所有特徵的上下文嵌入 (batch_size, embedding_dim)
170
+
171
+ # 1. 生成意圖解析 (簡化:如果未指定,則從上下文預測)
172
+ if target_intent_idx is None:
173
+ intent_logits = self.intent_parser(context_embedding)
174
+ target_intent_idx = torch.argmax(intent_logits, dim=-1) # (batch_size,)
175
+
176
+ intent_embedding = self.intent_embedding(target_intent_idx) # (batch_size, embedding_dim)
177
+
178
+ # 2. 多維度生成規劃
179
+ planning_params = torch.sigmoid(self.planning_layer(intent_embedding)) # (batch_size, 4)
180
+ # planning_params: [content_richness, semantic_coherence, length_target_ratio, style_adaptability]
181
+
182
+ # 3. 自適應生成引導
183
+ # 組合上下文、意圖嵌入和規劃參數
184
+ combined_guidance_input = torch.cat([
185
+ context_embedding,
186
+ intent_embedding,
187
+ planning_params
188
+ ], dim=-1)
189
+ guidance_signal = self.guidance_controller(combined_guidance_input) # (batch_size, embedding_dim)
190
+
191
+ # 長度控制
192
+ length_control_signal = torch.sigmoid(self.length_controller(context_embedding)) # (batch_size, 1)
193
+ # 這個信號可以被用來調整生成終止的概率
194
+
195
+ # 這裡返回引導信號和規劃參數,供外部模型使用來調整生成過程
196
+ return guidance_signal, planning_params, length_control_signal
197
+
198
+ # --- 5. 智能體生態框架接口 (Agent Matrix Intelligent Agent Ecosystem Framework) ---
199
+ class AgentMatrixInterface(nn.Module):
200
+ def __init__(self, model_core):
201
+ super().__init__()
202
+ self.model_core = model_core
203
+ self.task_mapping = {
204
+ "predict_action": self._predict_action, # 新增 Comet-like 行動判斷
205
+ "analyze_image_text": self._analyze_image_text,
206
+ "analyze_image_text": self._analyze_image_text,
207
+ "retrieve_memory": self._retrieve_memory,
208
+ "generate_response": self._generate_response,
209
+ "generate_anchored_response": self._generate_anchored_response # 新增 RA 命令
210
+ }
211
+
212
+ def _analyze_image_text(self, text_input, image_input):
213
+ return self.model_core(text_input, image_input, return_fused_features=True)
214
+
215
+ def _retrieve_memory(self, query_text_input, query_image_input=None):
216
+ text_features = self.model_core.embeddings(query_text_input)
217
+ if text_features.dim() == 3:
218
+ text_features = text_features.squeeze(1)
219
+
220
+ if query_image_input is not None:
221
+ image_features = self.model_core.vision_encoder(query_image_input)
222
+ image_features = image_features.view(image_features.size(0), -1)
223
+ image_features = self.model_core.vision_projection(image_features)
224
+ current_features = self.model_core.initial_fusion_layer(torch.cat((text_features, image_features), dim=1))
225
+ else:
226
+ current_features = text_features
227
+
228
+ return self.model_core.memory_engine(current_features)
229
+
230
+ def _generate_response(self, text_input, image_input):
231
+ # 這裡調用原始的 forward 方法,不啟用 RA
232
+ return self.model_core(text_input, image_input, use_ra=False)
233
+
234
+ def _predict_action(self, text_input, image_input):
235
+ # 視覺感知和行動判斷
236
+ # 這裡調用啟用 action_prediction 的 forward 方法
237
+ return self.model_core(text_input, image_input, return_action_prediction=True)
238
+
239
+ def _generate_anchored_response(self, text_input, image_input):
240
+ # 這裡調用啟用 RA 的 forward 方法
241
+ return self.model_core(text_input, image_input, use_ra=True)
242
+
243
+ def forward(self, command, **kwargs):
244
+ if command in self.task_mapping:
245
+ if command.startswith("generate"):
246
+ # 生成相關的命令現在返回多個值
247
+ if command == "generate_anchored_response":
248
+ logits, consistency_score, planning_params, length_control_signal = self.task_mapping[command](**kwargs)
249
+ return logits, consistency_score, planning_params, length_control_signal
250
+ else:
251
+ logits, planning_params, length_control_signal = self.task_mapping[command](**kwargs)
252
+ return logits, planning_params, length_control_signal
253
+ elif command == "predict_action":
254
+ return self.task_mapping[command](**kwargs)
255
+ else:
256
+ # 生成相關的命令現在返回多個值
257
+ if command == "generate_anchored_response":
258
+ logits, consistency_score, planning_params, length_control_signal = self.task_mapping[command](**kwargs)
259
+ return logits, consistency_score, planning_params, length_control_signal
260
+ else:
261
+ logits, planning_params, length_control_signal = self.task_mapping[command](**kwargs)
262
+ return logits, planning_params, length_control_signal
263
+ else:
264
+ return self.task_mapping[command](**kwargs)
265
+ else:
266
+ raise ValueError(f"Unknown command: {command}")
267
+
268
+ # --- 5. 整合後的完整模型架構 (包含 RA) ---
269
+ class FullyIntegratedModelWithRA(nn.Module):
270
+ def __init__(self, original_model_path, vocab_size, embedding_dim, moe_hidden_dim, num_experts, visual_feature_dim=256,
271
+ memory_slots=10, memory_dim=256, fact_memory_size=1000, fact_dim=256, ra_threshold=0.7):
272
+ super().__init__()
273
+
274
+ state_dict = {}
275
+ if original_model_path and os.path.exists(original_model_path):
276
+ with safetensors.safe_open(original_model_path, framework="pt", device="cpu") as f:
277
+ for key in f.keys():
278
+ state_dict[key] = f.get_tensor(key)
279
+
280
+ original_vocab_size = 5000 # 假設原始模型的詞彙量
281
+ original_embedding_dim = 64 # 假設原始模型的 embedding_dim
282
+
283
+ # 判斷是否需要重新初始化 OriginalMoETransformerBlock 和 LayerNorm
284
+ # 如果 embedding_dim 或 moe_hidden_dim 改變,則不載入原始權重,讓其重新初始化
285
+ if embedding_dim != original_embedding_dim or moe_hidden_dim != 1024: # 假設原始 moe_hidden_dim 是 1024
286
+ print("Embedding_dim or moe_hidden_dim changed. OriginalMoETransformerBlock and LayerNorm will be reinitialized.")
287
+ self.original_model_core = OriginalModelReconstructed(original_vocab_size, original_embedding_dim, 1024, 16) # 參數擴展: moe_hidden_dim=1024, num_experts=16ts=16
288
+ # ���些層將是可訓練的
289
+ else:
290
+ self.original_model_core = OriginalModelReconstructed(vocab_size, embedding_dim, 1024, 16) # 參數擴展: moe_hidden_dim=1024, num_experts=16im, num_experts)
291
+ # 載入原始權重到核心模型 (如果存在)
292
+ if state_dict:
293
+ print("Loading original model weights...")
294
+ self.original_model_core.norm.weight.data = state_dict["gamma"]
295
+ self.original_model_core.norm.bias.data = state_dict["beta"]
296
+ for i in range(num_experts):
297
+ self.original_model_core.transformer_block_0.experts_w1[i].weight.data = state_dict[f"transformer_block.0.moe.experts.w1.weight"][i].T
298
+ self.original_model_core.transformer_block_0.experts_w2[i].weight.data = state_dict[f"transformer_block.0.moe.experts.w2.weight"][i].T
299
+ self.original_model_core.transformer_block_0.gate.weight.data = state_dict["transformer_block.0.moe.gate.weight"].T
300
+ print("Original model weights loaded.")
301
+
302
+ for param in self.original_model_core.transformer_block_0.parameters():
303
+ param.requires_grad = False
304
+ for param in self.original_model_core.norm.parameters():
305
+ param.requires_grad = False
306
+
307
+ self.embeddings = nn.Embedding(vocab_size, embedding_dim)
308
+ self.output_layer = nn.Linear(embedding_dim, vocab_size)
309
+
310
+ # 新增 Action Prediction Head for Comet-like capability
311
+ # 輸出結構化行動 (例如: [action_type, x_coord, y_coord, element_id_token])
312
+ # 簡化為一個線性層,輸出一個固定長度的向量
313
+ self.action_prediction_head = nn.Sequential(
314
+ nn.Linear(embedding_dim, 256),
315
+ nn.ReLU(),
316
+ nn.Linear(256, 4) # 假設輸出 4 個值: action_type, x, y, element_id_token
317
+ )
318
+
319
+ self.vision_encoder = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
320
+ self.vision_encoder = nn.Sequential(*list(self.vision_encoder.children())[:-1])
321
+ self.vision_projection = nn.Linear(512, visual_feature_dim)
322
+
323
+ for param in self.vision_encoder.parameters():
324
+ param.requires_grad = False
325
+
326
+ self.initial_fusion_layer = nn.Linear(visual_feature_dim + embedding_dim, embedding_dim)
327
+
328
+ self.memory_engine = MemorySymbiosisEngine(embedding_dim, memory_slots, memory_dim)
329
+
330
+ # 引入 Reality Anchor 模組
331
+ self.reality_anchor = RealityAnchor(embedding_dim, fact_memory_size, fact_dim, ra_threshold)
332
+
333
+ # 引入 Multidimensional Generation Orchestrator (MGO) 模組
334
+ self.mgo = MultidimensionalGenerationOrchestrator(embedding_dim, vocab_size)
335
+
336
+ def forward(self, text_input, image_input, user_profile_embedding=None, use_ra=False, return_fused_features=False, return_action_prediction=False):
337
+ text_features = self.embeddings(text_input)
338
+ if text_features.dim() == 3:
339
+ text_features = text_features.squeeze(1)
340
+
341
+ image_features = self.vision_encoder(image_input)
342
+ image_features = image_features.view(image_features.size(0), -1)
343
+ image_features = self.vision_projection(image_features)
344
+
345
+ fused_initial_features = self.initial_fusion_layer(torch.cat((text_features, image_features), dim=1))
346
+
347
+ if return_fused_features:
348
+ return fused_initial_features
349
+
350
+ memory_enhanced_features = self.memory_engine(fused_initial_features, user_profile_embedding)
351
+
352
+ # 應用 Reality Anchor (RA) 技術
353
+ if use_ra:
354
+ # 在這裡,我們假設 memory_enhanced_features 作為生成上下文的嵌入
355
+ # 並將其作為 current_token_embedding 傳遞給 RA 進行調 # 實際應用中, RA 可能會更細粒度地作用於每個生成的 token
356
+ adjusted_features, consistency_score = self.reality_anchor(memory_enhanced_features, memory_enhanced_features) # 簡化: 上下文和當前 token 相同
357
+ x = self.original_model_core.transformer_block_0(adjusted_features)
358
+ else:
359
+ x = self.original_model_core.transformer_block_0(memory_enhanced_features)
360
+
361
+ x = self.original_model_core.norm(x)
362
+ final_features = x # 將 x 賦值給 final_features, 以便 MGO 處理
363
+
364
+ # 應用 Multidimensional Generation Orchestrator (MGO) 技術
365
+ guidance_signal, planning_params, length_control_signal = self.mgo(final_features, current_output_length=0, target_intent_idx=None)
366
+ final_features = final_features + guidance_signal # 疊加引導信號
367
+
368
+ output = self.output_layer(final_features)
369
+
370
+ if return_action_prediction:
371
+ action_prediction = self.action_prediction_head(final_features)
372
+ return action_prediction
373
+
374
+ if use_ra:
375
+ return output, consistency_score, planning_params, length_control_signal
376
+ else:
377
+ return output, planning_params, length_control_signal
378
+
379
+ if __name__ == "__main__":
380
+ original_model_path = "/home/ubuntu/upload/moe_model.safetensors"
381
+
382
+ vocab_size = 10000 # 詞彙量保持不變
383
+ embedding_dim = 128 # 增加嵌入維度以增加參數
384
+ moe_hidden_dim = 384 # 增加 MoE 隱藏維度以增加參數
385
+ num_experts = 16
386
+ visual_feature_dim = 256
387
+ memory_slots = 10
388
+ memory_dim = 256
389
+ fact_memory_size = 1000
390
+ fact_dim = 256
391
+ ra_threshold = 0.7
392
+
393
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
394
+
395
+ # 初始化整合模型 (包含 RA)
396
+ integrated_model_ra = FullyIntegratedModelWithRA(
397
+ original_model_path=original_model_path,
398
+ vocab_size=vocab_size,
399
+ embedding_dim=embedding_dim,
400
+ moe_hidden_dim=moe_hidden_dim,
401
+ num_experts=num_experts,
402
+ visual_feature_dim=visual_feature_dim,
403
+ memory_slots=memory_slots,
404
+ memory_dim=memory_dim,
405
+ fact_memory_size=fact_memory_size,
406
+ fact_dim=fact_dim,
407
+ ra_threshold=ra_threshold
408
+ ).to(device)
409
+ integrated_model_ra.eval() # 設置為評估模式
410
+
411
+ print("Fully Integrated Model with RA initialized successfully.")
412
+
413
+ # 計算總參數數量
414
+ total_trainable_params = sum(p.numel() for p in integrated_model_ra.parameters() if p.requires_grad)
415
+ print(f"Total trainable parameters: {total_trainable_params / 1_000_000:.2f}M")
416
+ total_all_params = sum(p.numel() for p in integrated_model_ra.parameters())
417
+ print(f"Total all parameters (including frozen): {total_all_params / 1_000_000:.2f}M")
418
+
419
+ # --- 模擬 Agent Matrix 框架交互 (包含 RA 命令) ---
420
+ agent_interface_ra = AgentMatrixInterface(integrated_model_ra)
421
+ print("Agent Matrix Interface with RA initialized.")
422
+
423
+ # 模擬輸入
424
+ dummy_text_input = torch.tensor([[100]], dtype=torch.long).to(device) # Batch size 1, 1 token
425
+ dummy_image_input = torch.randn(1, 3, 224, 224).to(device) # Batch size 1, 3 channels, 224x224
426
+
427
+ print("\n--- Simulating Agent Matrix Commands (with RA) ---")
428
+
429
+ # 模擬 'analyze_image_text' 命令
430
+ try:
431
+ print("Executing command: analyze_image_text")
432
+ fused_features = agent_interface_ra(command="analyze_image_text", text_input=dummy_text_input, image_input=dummy_image_input)
433
+ print(f"Analyzed features shape: {fused_features.shape}")
434
+ except Exception as e:
435
+ print(f"Error executing analyze_image_text: {e}")
436
+
437
+ # 模擬 'generate_response' 命令 (不啟用 RA)
438
+ try:
439
+ print("Executing command: generate_response (without RA)")
440
+ output_logits, planning_params, length_control_signal = agent_interface_ra(command="generate_response", text_input=dummy_text_input, image_input=dummy_image_input)
441
+ print(f"Generated response logits shape: {output_logits.shape}")
442
+ print(f"MGO Planning Params: {planning_params.tolist()}")
443
+ print(f"MGO Length Control Signal: {length_control_signal.item():.4f}")
444
+ except Exception as e:
445
+ print(f"Error executing generate_response (without RA): {e}")
446
+
447
+ # 模擬 'generate_anchored_response' 命令 (啟用 RA)
448
+ try:
449
+ print("Executing command: generate_anchored_response (with RA)")
450
+ output_logits_ra, consistency_score, planning_params_ra, length_control_signal_ra = agent_interface_ra(command="generate_anchored_response", text_input=dummy_text_input, image_input=dummy_image_input)
451
+ print(f"Generated anchored response logits shape: {output_logits_ra.shape}")
452
+ print(f"Fact consistency score: {consistency_score.item():.4f}")
453
+ print(f"MGO Planning Params (RA): {planning_params_ra.tolist()}")
454
+ print(f"MGO Length Control Signal (RA): {length_control_signal_ra.item():.4f}")
455
+ except Exception as e:
456
+ print(f"Error executing generate_anchored_response (with RA): {e}")
457
+
458
+ # 模擬 'retrieve_memory' 命令
459
+ try:
460
+ print("Executing command: retrieve_memory")
461
+ retrieved_memory = agent_interface_ra(command="retrieve_memory", query_text_input=dummy_text_input, query_image_input=dummy_image_input)
462
+ print(f"Retrieved memory shape: {retrieved_memory.shape}")
463
+ except Exception as e:
464
+ print(f"Error executing retrieve_memory: {e}")
465
+
466
+ # 保存整合後的模型 (包含 RA)
467
+ state_dict_to_save_ra = integrated_model_ra.state_dict()
468
+ keys_to_remove_ra = [key for key in state_dict_to_save_ra.keys() if 'vision_encoder' in key]
469
+ for key in keys_to_remove_ra:
470
+ del state_dict_to_save_ra[key]
471
+
472
+ for key in state_dict_to_save_ra:
473
+ if isinstance(state_dict_to_save_ra[key], torch.Tensor):
474
+ state_dict_to_save_ra[key] = state_dict_to_save_ra[key].contiguous()
475
+
476
+ safetensors_save_file(state_dict_to_save_ra, "fully_integrated_model_with_ra_mgo.safetensors")
477
+ print("Fully integrated model with RA and MGO saved to fully_integrated_model_with_ra_mgo.safetensors")
478
+