|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
An naive implementation of split placment example |
|
|
""" |
|
|
from pprint import pprint |
|
|
from verl import DataProto |
|
|
from verl.trainer.ppo.ray_trainer_new import compute_advantage, apply_kl_penalty, reduce_metrics, compute_data_metrics, _timer, compute_timing_metrics, AdvantageEstimator |
|
|
from copy import deepcopy |
|
|
import numpy as np |
|
|
import torch |
|
|
import uuid |
|
|
|
|
|
|
|
|
def fit(self): |
|
|
""" |
|
|
The training loop of PPO. |
|
|
The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. |
|
|
The light-weight advantage computation is done on the driver process. |
|
|
""" |
|
|
from verl.utils.tracking import Tracking |
|
|
from omegaconf import OmegaConf |
|
|
|
|
|
logger = Tracking(project_name=self.config.trainer.project_name, |
|
|
experiment_name=self.config.trainer.experiment_name, |
|
|
default_backend=self.config.trainer.logger, |
|
|
config=OmegaConf.to_container(self.config, resolve=True)) |
|
|
|
|
|
self.global_steps = 0 |
|
|
|
|
|
|
|
|
self._load_checkpoint() |
|
|
|
|
|
|
|
|
|
|
|
if self.val_reward_fn is not None and self.config.trainer.get('val_before_train', True): |
|
|
val_metrics = self._validate() |
|
|
pprint(f'Initial validation metrics: {val_metrics}') |
|
|
logger.log(data=val_metrics, step=self.global_steps) |
|
|
if self.config.trainer.get('val_only', False): |
|
|
return |
|
|
|
|
|
|
|
|
self.global_steps += 1 |
|
|
last_val_metrics = None |
|
|
|
|
|
for epoch in range(self.config.trainer.total_epochs): |
|
|
for batch_dict in self.train_dataloader: |
|
|
metrics = {} |
|
|
timing_raw = {} |
|
|
|
|
|
batch: DataProto = DataProto.from_single_dict(batch_dict) |
|
|
|
|
|
|
|
|
gen_batch = batch.pop(batch_keys=['input_ids', 'attention_mask', 'position_ids']) |
|
|
is_last_step = self.global_steps >= self.total_training_steps |
|
|
|
|
|
with _timer('step', timing_raw): |
|
|
|
|
|
with _timer('gen', timing_raw): |
|
|
gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) |
|
|
|
|
|
if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: |
|
|
with _timer('gen_max', timing_raw): |
|
|
gen_baseline_batch = deepcopy(gen_batch) |
|
|
gen_baseline_batch.meta_info['do_sample'] = False |
|
|
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) |
|
|
|
|
|
batch = batch.union(gen_baseline_output) |
|
|
reward_baseline_tensor = self.reward_fn(batch) |
|
|
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) |
|
|
|
|
|
batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) |
|
|
|
|
|
batch.batch['reward_baselines'] = reward_baseline_tensor |
|
|
|
|
|
del gen_baseline_batch, gen_baseline_output |
|
|
|
|
|
batch.non_tensor_batch['uid'] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))], |
|
|
dtype=object) |
|
|
|
|
|
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) |
|
|
batch = batch.union(gen_batch_output) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._balance_batch(batch, metrics=metrics) |
|
|
|
|
|
|
|
|
batch.meta_info['global_token_num'] = torch.sum(batch.batch['attention_mask'], dim=-1).tolist() |
|
|
|
|
|
|
|
|
with _timer('old_log_prob', timing_raw): |
|
|
old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) |
|
|
batch = batch.union(old_log_prob) |
|
|
|
|
|
if self.use_reference_policy: |
|
|
|
|
|
with _timer('ref', timing_raw): |
|
|
ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) |
|
|
batch = batch.union(ref_log_prob) |
|
|
|
|
|
|
|
|
if self.use_critic: |
|
|
with _timer('values', timing_raw): |
|
|
values = self.critic_wg.compute_values(batch) |
|
|
batch = batch.union(values) |
|
|
|
|
|
with _timer('adv', timing_raw): |
|
|
|
|
|
|
|
|
|
|
|
if self.use_rm: |
|
|
|
|
|
reward_tensor = self.rm_wg.compute_rm_score(batch) |
|
|
batch = batch.union(reward_tensor) |
|
|
|
|
|
|
|
|
reward_tensor = self.reward_fn(batch) |
|
|
batch.batch['token_level_scores'] = reward_tensor |
|
|
|
|
|
|
|
|
if self.config.algorithm.use_kl_in_reward: |
|
|
batch, kl_metrics = apply_kl_penalty(batch, |
|
|
kl_ctrl=self.kl_ctrl_in_reward, |
|
|
kl_penalty=self.config.algorithm.kl_penalty) |
|
|
metrics.update(kl_metrics) |
|
|
else: |
|
|
batch.batch['token_level_rewards'] = batch.batch['token_level_scores'] |
|
|
|
|
|
|
|
|
batch = compute_advantage(batch, |
|
|
adv_estimator=self.config.algorithm.adv_estimator, |
|
|
gamma=self.config.algorithm.gamma, |
|
|
lam=self.config.algorithm.lam, |
|
|
num_repeat=self.config.actor_rollout_ref.rollout.n) |
|
|
|
|
|
|
|
|
if self.use_critic: |
|
|
with _timer('update_critic_call', timing_raw): |
|
|
critic_output = self.critic_wg.update_critic(batch) |
|
|
|
|
|
|
|
|
if self.config.trainer.critic_warmup <= self.global_steps: |
|
|
|
|
|
with _timer('update_actor_call', timing_raw): |
|
|
actor_output = self.actor_rollout_wg.update_actor(batch) |
|
|
|
|
|
|
|
|
with _timer('update_actor_critic', timing_raw): |
|
|
critic_output = critic_output.get() |
|
|
critic_output_metrics = reduce_metrics(critic_output.meta_info['metrics']) |
|
|
metrics.update(critic_output_metrics) |
|
|
|
|
|
actor_output = actor_output.get() |
|
|
actor_output_metrics = reduce_metrics(actor_output.meta_info['metrics']) |
|
|
metrics.update(actor_output_metrics) |
|
|
|
|
|
|
|
|
if self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and \ |
|
|
(is_last_step or self.global_steps % self.config.trainer.test_freq == 0): |
|
|
with _timer('testing', timing_raw): |
|
|
val_metrics: dict = self._validate() |
|
|
if is_last_step: |
|
|
last_val_metrics = val_metrics |
|
|
metrics.update(val_metrics) |
|
|
|
|
|
if self.config.trainer.save_freq > 0 and (is_last_step or \ |
|
|
self.global_steps % self.config.trainer.save_freq == 0): |
|
|
with _timer('save_checkpoint', timing_raw): |
|
|
self._save_checkpoint() |
|
|
|
|
|
|
|
|
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) |
|
|
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) |
|
|
|
|
|
|
|
|
logger.log(data=metrics, step=self.global_steps) |
|
|
|
|
|
if self.global_steps >= self.total_training_steps: |
|
|
pprint(f'Final validation metrics: {last_val_metrics}') |
|
|
return |
|
|
|
|
|
self.global_steps += 1 |
|
|
|