Datasets:

ArXiv:
happynew111 commited on
Commit
c558c84
·
verified ·
1 Parent(s): 8c7f9ab

Upload Qwen 2.5 3B Instruct model checkpoint

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .github/dependabot.yml +9 -0
  3. .github/workflows/checkpoints.yml +64 -0
  4. .github/workflows/dataset.yml +56 -0
  5. .github/workflows/e2e_ascend.yml +47 -0
  6. .github/workflows/e2e_eval_aime24.yml +48 -0
  7. .github/workflows/e2e_grpo.yml +86 -0
  8. .github/workflows/e2e_gsm8k.yml +95 -0
  9. .github/workflows/e2e_gsm8k_dapo.yml +50 -0
  10. .github/workflows/e2e_gsm8k_megatron.yml +58 -0
  11. .github/workflows/e2e_gsm8k_prime.yml +50 -0
  12. .github/workflows/e2e_lora.yml +54 -0
  13. .github/workflows/e2e_sft.yml +66 -0
  14. .github/workflows/e2e_sglang_gsm8k.yml +53 -0
  15. .github/workflows/e2e_vlm_geo3k.yml +48 -0
  16. .github/workflows/model.yml +76 -0
  17. .github/workflows/pylint.yml +40 -0
  18. .github/workflows/ray_test.yml +55 -0
  19. .github/workflows/sandbox.yml +47 -0
  20. .github/workflows/sanity.yml +54 -0
  21. .github/workflows/scorecard.yml +64 -0
  22. .github/workflows/secrets_scan.yml +21 -0
  23. .github/workflows/vllm.yml +64 -0
  24. .github/workflows/yapf_format.yml +56 -0
  25. .gitignore +128 -0
  26. .readthedocs.yaml +19 -0
  27. .style.yapf +5 -0
  28. LICENSE +202 -0
  29. Notice.txt +1 -0
  30. README.md +228 -0
  31. docker/Dockerfile.megatron +9 -0
  32. docker/Dockerfile.ngc.vllm +47 -0
  33. docker/Dockerfile.ngc.vllm0.8 +66 -0
  34. docker/Dockerfile.ngc.vllm0.8.sagemaker +46 -0
  35. docker/Dockerfile.rocm +45 -0
  36. docker/Dockerfile.sglang +55 -0
  37. docker/Dockerfile.vemlp.vllm.te +41 -0
  38. docs/Makefile +20 -0
  39. docs/README.md +19 -0
  40. docs/README_vllm0.7.md +71 -0
  41. docs/README_vllm0.8.md +54 -0
  42. docs/_static/logo.png +3 -0
  43. docs/advance/checkpoint.rst +122 -0
  44. docs/advance/dpo_extension.rst +271 -0
  45. docs/advance/fsdp_extension.rst +95 -0
  46. docs/advance/megatron_extension.rst +26 -0
  47. docs/advance/placement.rst +11 -0
  48. docs/amd_tutorial/amd_build_dockerfile_page.rst +512 -0
  49. docs/conf.py +83 -0
  50. docs/data.rst +59 -0
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ test_data/gsm8k_test filter=lfs diff=lfs merge=lfs -text
61
+ verl/data/deepscaler_uniform_train_3004.json filter=lfs diff=lfs merge=lfs -text
.github/dependabot.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ## Enabled the dependabot to check the dependencies of the project
2
+ ## Dependabot will open pull requests to update dependencies automatically
3
+
4
+ version: 2
5
+ updates:
6
+ - package-ecosystem: pip
7
+ directory: "/"
8
+ schedule:
9
+ interval: weekly
.github/workflows/checkpoints.yml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: checkpoints
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ paths:
10
+ - "**/*.py"
11
+ - "verl/trainer/config/*.yaml"
12
+ - .github/workflows/checkpoints.yml
13
+ - "tests/checkpoint/*.sh"
14
+
15
+ # Cancel jobs on the same ref if a new one is triggered
16
+ concurrency:
17
+ group: ${{ github.workflow }}-${{ github.ref }}
18
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
19
+
20
+ # Declare permissions just read content.
21
+ permissions:
22
+ contents: read
23
+
24
+ jobs:
25
+ e2e_gsm8k_megatron:
26
+ runs-on: [self-hosted, l20-0]
27
+ timeout-minutes: 40 # Increase this timeout value as needed
28
+ env:
29
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
30
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
31
+ NO_PROXY: "localhost,127.0.0.1"
32
+ HF_HUB_ENABLE_HF_TRANSFER: 1
33
+ container:
34
+ image: whatcanyousee/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te2.0-megatron0.11.0-v0.0.6
35
+ options: --gpus all --shm-size=10g
36
+ steps:
37
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
38
+ with:
39
+ fetch-depth: 0
40
+ - name: Install the current repository
41
+ run: |
42
+ pip3 install hf_transfer
43
+ pip3 install -e .[test]
44
+ - name: Prepare gsm8k dataset
45
+ run: |
46
+ python3 examples/data_preprocess/gsm8k.py
47
+ - name: Running Checkpoint Integration Test (Qwen Megatron)
48
+ run: |
49
+ ray stop --force
50
+ export PYTHONPATH=$PYTHONPATH:/opt/nvidia/Megatron-LM
51
+ bash tests/checkpoint/run_qwen_megatron_ckpt.sh
52
+ - name: Running Checkpoint Integration Test (Deepseek Megatron)
53
+ run: |
54
+ ray stop --force
55
+ export PYTHONPATH=$PYTHONPATH:/opt/nvidia/Megatron-LM
56
+ bash tests/checkpoint/run_deepseek_megatron_ckpt.sh
57
+ - name: Test Megatron checkpoints merging function (Qwen Actor and Critic)
58
+ run: |
59
+ python scripts/model_merger.py --backend megatron --tie-word-embedding --hf_model_path Qwen/Qwen2.5-0.5B --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor --test --test_hf_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor/huggingface
60
+ python scripts/model_merger.py --backend megatron --is-value-model --hf_model_path Qwen/Qwen2.5-0.5B --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/critic --test --test_hf_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/critic/huggingface
61
+ - name: Test Megatron checkpoints merging function (Deepseek Actor and Critic)
62
+ run: |
63
+ python scripts/model_merger.py --backend megatron --hf_model_path deepseek-ai/deepseek-coder-1.3b-instruct --local_dir checkpoints/verl_megatron_gsm8k_examples/deepseek_megatron_checkpoint_saveload/global_step_1/actor --test --test_hf_dir checkpoints/verl_megatron_gsm8k_examples/deepseek_megatron_checkpoint_saveload/global_step_1/actor/huggingface
64
+ python scripts/model_merger.py --backend megatron --is-value-model --hf_model_path deepseek-ai/deepseek-coder-1.3b-instruct --local_dir checkpoints/verl_megatron_gsm8k_examples/deepseek_megatron_checkpoint_saveload/global_step_1/critic --test --test_hf_dir checkpoints/verl_megatron_gsm8k_examples/deepseek_megatron_checkpoint_saveload/global_step_1/critic/huggingface
.github/workflows/dataset.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: dataset
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ paths:
10
+ - "verl/utils/dataset/*.py"
11
+ - .github/workflows/dataset.yml
12
+ - "!verl/workers/fsdp_workers.py"
13
+ - "!verl/workers/megatron_workers.py"
14
+ - "!recipe/**"
15
+
16
+ # Cancel jobs on the same ref if a new one is triggered
17
+ concurrency:
18
+ group: ${{ github.workflow }}-${{ github.ref }}
19
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
20
+
21
+ # Declare permissions just read content.
22
+ permissions:
23
+ contents: read
24
+
25
+ jobs:
26
+ ray:
27
+ runs-on: [self-hosted, l20-1]
28
+ timeout-minutes: 10 # Increase this timeout value as needed
29
+ env:
30
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
31
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
32
+ NO_PROXY: "localhost,127.0.0.1"
33
+ HF_HUB_ENABLE_HF_TRANSFER: 1
34
+ container:
35
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
36
+ options: --gpus all --shm-size=10g
37
+ steps:
38
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
39
+ with:
40
+ fetch-depth: 0
41
+ - name: Install the current repository
42
+ run: |
43
+ pip install hf_transfer
44
+ pip install -e .[test]
45
+ pip install --upgrade "ray>=2.40.0"
46
+ pip install cupy-cuda12x
47
+ - name: Running dataset tests
48
+ run: |
49
+ [ ! -d "$HOME/verl-data" ] && git clone --depth 1 https://github.com/eric-haibin-lin/verl-data ~/verl-data
50
+ pytest -s -x tests/verl/utils/dataset/test_rl_dataset.py
51
+ pytest -s -x tests/verl/utils/dataset/test_sft_dataset.py
52
+ # pytest -s -x tests/verl/utils/dataset/test_rm_dataset.py
53
+ - name: Running ray test using cupy (move it to L20 when dockerfile ready)
54
+ run: |
55
+ cd tests/ray
56
+ pytest -s -x test_rvdz.py
.github/workflows/e2e_ascend.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_ascend
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ paths:
10
+ - "**/*.py"
11
+ - .github/workflows/e2e_ascend.yml
12
+
13
+ permissions:
14
+ contents: read
15
+
16
+ jobs:
17
+ test:
18
+ name: verl Ascend test (self-host)
19
+ runs-on: [self-hosted, npu-0]
20
+ timeout-minutes: 5 # Increase this timeout value as needed
21
+ env:
22
+ HF_HUB_ENABLE_HF_TRANSFER: 1
23
+ container:
24
+ image: quay.io/ascend/cann:8.0.0-910b-ubuntu22.04-py3.10
25
+ volumes:
26
+ - /usr/local/dcmi:/usr/local/dcmi
27
+ - /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
28
+ - /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/
29
+ # Use self-host cache speed up pip and model download
30
+ # - /home/action/actions-runner/_work/cache:/github/home/.cache/
31
+ options: >-
32
+ --device /dev/davinci0
33
+ --device /dev/davinci_manager
34
+ --device /dev/devmm_svm
35
+ --device /dev/hisi_hdc
36
+ --privileged
37
+ --network "host"
38
+ steps:
39
+ - name: Check npu and CANN info
40
+ run: |
41
+ cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
42
+ npu-smi info
43
+ - name: Checkout volcengine/verl repo
44
+ uses: actions/checkout@v4
45
+ - name: Run test
46
+ run: |
47
+ lscpu
.github/workflows/e2e_eval_aime24.yml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_eval_aime24
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ paths:
10
+ - "**/*.py"
11
+ - "verl/trainer/config/*.yaml"
12
+ - .github/workflows/e2e_eval_aime24.yml
13
+ - "tests/e2e/run_r1_distill_qwen_aime24_eval.sh"
14
+ - "!verl/trainer/main_ppo.py"
15
+ - "!verl/trainer/fsdp_sft_trainer.py"
16
+
17
+ # Declare permissions just read content.
18
+ permissions:
19
+ contents: read
20
+
21
+ jobs:
22
+ e2e_eval_aime24:
23
+ runs-on: [self-hosted, l20-1]
24
+ timeout-minutes: 40 # Increase this timeout value as needed
25
+ env:
26
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
27
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
28
+ NO_PROXY: "localhost,127.0.0.1"
29
+ HF_HUB_ENABLE_HF_TRANSFER: 1
30
+ container:
31
+ image: hiyouga/verl:ngc-th2.6.0-cu120-vllm0.8.2
32
+ options: --gpus all --shm-size=10g
33
+ steps:
34
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
35
+ with:
36
+ fetch-depth: 0
37
+ - name: Install the current repository
38
+ run: |
39
+ pip3 install hf_transfer
40
+ pip3 install -e .[test,gpu,math]
41
+ - name: Prepare aime24 dataset
42
+ run: |
43
+ ray stop --force
44
+ python3 recipe/r1/data_process.py --task aime2024
45
+ - name: Running generation and evaluation in aime2024
46
+ run: |
47
+ ray stop --force
48
+ bash tests/e2e/run_r1_distill_qwen_aime24_eval.sh
.github/workflows/e2e_grpo.yml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_grpo
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ paths:
10
+ - "**/*.py"
11
+ - "verl/trainer/config/*.yaml"
12
+ - .github/workflows/e2e_grpo.yml
13
+ - "tests/e2e/*.sh"
14
+ - "!verl/trainer/fsdp_sft_trainer.py"
15
+ - "!recipe/**"
16
+
17
+ # Cancel jobs on the same ref if a new one is triggered
18
+ concurrency:
19
+ group: ${{ github.workflow }}-${{ github.ref }}
20
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
21
+
22
+ # Declare permissions just read content.
23
+ permissions:
24
+ contents: read
25
+
26
+ jobs:
27
+ e2e_gsm8k_megatron-l20-0:
28
+ runs-on: [self-hosted, l20-0]
29
+ timeout-minutes: 40 # Increase this timeout value as needed
30
+ env:
31
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
32
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
33
+ NO_PROXY: "localhost,127.0.0.1"
34
+ HF_HUB_ENABLE_HF_TRANSFER: 1
35
+ container:
36
+ image: whatcanyousee/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te2.0-megatron0.11.0-v0.0.6
37
+ options: --gpus all --shm-size=10g
38
+ steps:
39
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
40
+ with:
41
+ fetch-depth: 0
42
+ - name: Install the current repository
43
+ run: |
44
+ pip3 install hf_transfer
45
+ pip3 install -e .[test]
46
+ - name: Prepare gsm8k dataset
47
+ run: |
48
+ python3 examples/data_preprocess/gsm8k.py
49
+ - name: Running GRPO gsm8k e2e training tests with FSDP on 8 L20 GPUs (Qwen)
50
+ run: |
51
+ ray stop --force
52
+ bash tests/e2e/run_qwen_grpo.sh
53
+ - name: Running GRPO gsm8k e2e training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen)
54
+ run: |
55
+ ray stop --force
56
+ bash tests/e2e/run_qwen_grpo_megatron.sh
57
+ e2e_gsm8k_megatron-l20-1:
58
+ runs-on: [self-hosted, l20-1]
59
+ timeout-minutes: 40 # Increase this timeout value as needed
60
+ env:
61
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
62
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
63
+ NO_PROXY: "localhost,127.0.0.1"
64
+ HF_HUB_ENABLE_HF_TRANSFER: 1
65
+ container:
66
+ image: whatcanyousee/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te2.0-megatron0.11.0-v0.0.6
67
+ options: --gpus all --shm-size=10g
68
+ steps:
69
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
70
+ with:
71
+ fetch-depth: 0
72
+ - name: Install the current repository
73
+ run: |
74
+ pip3 install hf_transfer
75
+ pip3 install -e .[test]
76
+ - name: Prepare gsm8k dataset
77
+ run: |
78
+ python3 examples/data_preprocess/gsm8k.py
79
+ - name: Running GRPO gsm8k e2e training tests with FSDP on 8 L20 GPUs (Deepseek)
80
+ run: |
81
+ ray stop --force
82
+ bash tests/e2e/run_deepseek_grpo.sh
83
+ - name: Running GRPO gsm8k e2e training tests with 3D parallelism on 8 L20 GPUs with Megatron (Deepseek)
84
+ run: |
85
+ ray stop --force
86
+ bash tests/e2e/run_deepseek_grpo_megatron.sh
.github/workflows/e2e_gsm8k.yml ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_gsm8k
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - "verl/trainer/config/*.yaml"
13
+ - .github/workflows/e2e_gsm8k.yml
14
+ - "tests/e2e/*gsm8k*.sh"
15
+ - "!recipe/**"
16
+
17
+ # Cancel jobs on the same ref if a new one is triggered
18
+ concurrency:
19
+ group: ${{ github.workflow }}-${{ github.ref }}
20
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
21
+
22
+ # Declare permissions just read content.
23
+ permissions:
24
+ contents: read
25
+
26
+ jobs:
27
+ e2e_gsm8k:
28
+ runs-on: [self-hosted, l20-1]
29
+ timeout-minutes: 40 # Increase this timeout value as needed
30
+ env:
31
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
32
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
33
+ NO_PROXY: "localhost,127.0.0.1"
34
+ HF_HUB_ENABLE_HF_TRANSFER: 1
35
+ container:
36
+ image: hiyouga/verl:ngc-th2.6.0-cu120-vllm0.8.2
37
+ options: --gpus all --shm-size=10g
38
+ steps:
39
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
40
+ with:
41
+ fetch-depth: 0
42
+ - name: Install the current repository
43
+ run: |
44
+ pip3 install hf_transfer
45
+ pip3 install -e .[test,gpu]
46
+ - name: Prepare gsm8k dataset
47
+ run: |
48
+ ray stop --force
49
+ python3 examples/data_preprocess/gsm8k.py
50
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm and save ckpt
51
+ run: |
52
+ ray stop --force
53
+ bash tests/e2e/run_qwen_gsm8k_function_rm.sh
54
+ - name: Running gsm8k e2e without rmpad using function rm and load ckpt from previous step
55
+ run: |
56
+ ray stop --force
57
+ bash tests/e2e/run_qwen_gsm8k_function_rm_no_rmpad.sh
58
+ rm -rf ~/ckpt/*
59
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm (GRPO)
60
+ run: |
61
+ ray stop --force
62
+ bash tests/e2e/run_qwen_gsm8k_function_rm_grpo.sh
63
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm (ReMax)
64
+ run: |
65
+ ray stop --force
66
+ bash tests/e2e/run_qwen_gsm8k_function_rm_remax.sh
67
+ - name: Running gsm8k e2e with rmpad using model rm
68
+ run: |
69
+ ray stop --force
70
+ bash tests/e2e/run_qwen_gsm8k_model_rm.sh
71
+ - name: Running gsm8k e2e without rmpad using model rm
72
+ run: |
73
+ ray stop --force
74
+ bash tests/e2e/run_qwen_gsm8k_model_rm_no_rmpad.sh
75
+ - name: Running gsm8k e2e with rmpad using model rm and ulysses sp=2
76
+ run: |
77
+ ray stop --force
78
+ bash tests/e2e/run_qwen_gsm8k_model_rm_ulysses.sh
79
+ - name: Running gsm8k e2e with rmpad using model rm and dynamic batch size
80
+ run: |
81
+ ray stop --force
82
+ bash tests/e2e/run_qwen_gsm8k_model_rm_seq_balance.sh
83
+ - name: Running gsm8k e2e with rmpad using model rm with Liger Kernel enabled
84
+ run: |
85
+ ray stop --force
86
+ bash tests/e2e/run_qwen_gsm8k_model_rm_liger_kernel.sh
87
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using customized reward function
88
+ run: |
89
+ ray stop --force
90
+ bash tests/e2e/run_qwen_gsm8k_custom_function_rm.sh
91
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm with in-reward kl and kl loss
92
+ run: |
93
+ ray stop --force
94
+ bash tests/e2e/run_qwen_gsm8k_function_rm_both_kl.sh
95
+
.github/workflows/e2e_gsm8k_dapo.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_gsm8k_dapo
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - "verl/trainer/config/*.yaml"
13
+ - .github/workflows/e2e_gsm8k_dapo.yml
14
+ - "tests/e2e/*dapo.sh"
15
+ - "!verl/trainer/main_ppo.py"
16
+ - "!verl/trainer/fsdp_sft_trainer.py"
17
+ - "!verl/workers/megatron_workers.py"
18
+
19
+ # Declare permissions just read content.
20
+ permissions:
21
+ contents: read
22
+
23
+ jobs:
24
+ e2e_gsm8k_dapo:
25
+ runs-on: [self-hosted, l20-1]
26
+ timeout-minutes: 40 # Increase this timeout value as needed
27
+ env:
28
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
29
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
30
+ NO_PROXY: "localhost,127.0.0.1"
31
+ HF_HUB_ENABLE_HF_TRANSFER: 1
32
+ container:
33
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
34
+ options: --gpus all --shm-size=10g
35
+ steps:
36
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
37
+ with:
38
+ fetch-depth: 0
39
+ - name: Install the current repository
40
+ run: |
41
+ pip3 install hf_transfer
42
+ pip3 install -e .[test,gpu]
43
+ - name: Prepare gsm8k dataset
44
+ run: |
45
+ ray stop --force
46
+ python3 examples/data_preprocess/gsm8k.py
47
+ - name: Running gsm8k e2e with dapo alg
48
+ run: |
49
+ ray stop --force
50
+ bash tests/e2e/run_qwen_gsm8k_dapo.sh
.github/workflows/e2e_gsm8k_megatron.yml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_gsm8k_megatron
2
+ # latest version: Megatron-LM core_r0.11.0 https://github.com/NVIDIA/Megatron-LM/tree/core_r0.11.0
3
+
4
+ on:
5
+ # Trigger the workflow on push or pull request,
6
+ # but only for the main branch
7
+ pull_request:
8
+ branches:
9
+ - main
10
+ - v0.3.x
11
+ paths:
12
+ - "**/*.py"
13
+ - "verl/trainer/config/*.yaml"
14
+ - .github/workflows/e2e_gsm8k_megatron.yml
15
+ - "tests/e2e/*megatron*.sh"
16
+ - "!verl/workers/fsdp_workers.py"
17
+ - "!recipe/**"
18
+
19
+ # Cancel jobs on the same ref if a new one is triggered
20
+ concurrency:
21
+ group: ${{ github.workflow }}-${{ github.ref }}
22
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
23
+
24
+ # Declare permissions just read content.
25
+ permissions:
26
+ contents: read
27
+
28
+ jobs:
29
+ e2e_gsm8k_megatron:
30
+ runs-on: [self-hosted, l20-0]
31
+ timeout-minutes: 40 # Increase this timeout value as needed
32
+ env:
33
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
34
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
35
+ NO_PROXY: "localhost,127.0.0.1"
36
+ HF_HUB_ENABLE_HF_TRANSFER: 1
37
+ container:
38
+ image: whatcanyousee/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te2.0-megatron0.11.0-v0.0.6
39
+ options: --gpus all --shm-size=10g
40
+ steps:
41
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
42
+ with:
43
+ fetch-depth: 0
44
+ - name: Install the current repository
45
+ run: |
46
+ pip3 install hf_transfer
47
+ pip3 install -e .[test]
48
+ - name: Prepare gsm8k dataset
49
+ run: |
50
+ python3 examples/data_preprocess/gsm8k.py
51
+ - name: Running gsm8k e2e training tests with 3D parallelism on 8 L20 GPUs with Megatron (Deepseek)
52
+ run: |
53
+ ray stop --force
54
+ bash tests/e2e/run_deepseek_megatron_parallelism.sh
55
+ - name: Running gsm8k e2e training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen)
56
+ run: |
57
+ ray stop --force
58
+ bash tests/e2e/run_qwen_megatron_parallelism.sh
.github/workflows/e2e_gsm8k_prime.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_gsm8k_prime
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - "verl/trainer/config/*.yaml"
13
+ - .github/workflows/e2e_gsm8k_prime.yml
14
+ - "tests/e2e/*prime.sh"
15
+ - "!verl/trainer/main_ppo.py"
16
+ - "!verl/trainer/fsdp_sft_trainer.py"
17
+ - "!verl/workers/megatron_workers.py"
18
+
19
+ # Declare permissions just read content.
20
+ permissions:
21
+ contents: read
22
+
23
+ jobs:
24
+ e2e_gsm8k_prime:
25
+ runs-on: [self-hosted, l20-1]
26
+ timeout-minutes: 40 # Increase this timeout value as needed
27
+ env:
28
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
29
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
30
+ NO_PROXY: "localhost,127.0.0.1"
31
+ HF_HUB_ENABLE_HF_TRANSFER: 1
32
+ container:
33
+ image: hiyouga/verl:ngc-th2.6.0-cu120-vllm0.8.2
34
+ options: --gpus all --shm-size=10g
35
+ steps:
36
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
37
+ with:
38
+ fetch-depth: 0
39
+ - name: Install the current repository
40
+ run: |
41
+ pip3 install hf_transfer
42
+ pip3 install -e .[test,gpu]
43
+ - name: Prepare gsm8k dataset
44
+ run: |
45
+ ray stop --force
46
+ python3 examples/data_preprocess/gsm8k.py
47
+ - name: Running gsm8k e2e with prime alg
48
+ run: |
49
+ ray stop --force
50
+ bash tests/e2e/run_qwen_gsm8k_prime.sh
.github/workflows/e2e_lora.yml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_lora
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - .github/workflows/e2e_lora.yml
13
+ - "tests/sft/*"
14
+ - "!verl/trainer/main_ppo.py"
15
+ - "!recipe/**"
16
+
17
+ # Cancel jobs on the same ref if a new one is triggered
18
+ concurrency:
19
+ group: ${{ github.workflow }}-${{ github.ref }}
20
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
21
+
22
+ # Declare permissions just read content.
23
+ permissions:
24
+ contents: read
25
+
26
+ jobs:
27
+ e2e_lora:
28
+ runs-on: [self-hosted, l20-1]
29
+ timeout-minutes: 5 # Increase this timeout value as needed
30
+ env:
31
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
32
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
33
+ NO_PROXY: "localhost,127.0.0.1"
34
+ HF_HUB_ENABLE_HF_TRANSFER: 1
35
+ container:
36
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
37
+ options: --gpus all --shm-size=10g
38
+ steps:
39
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
40
+ with:
41
+ fetch-depth: 0
42
+ - name: Install the current repository
43
+ run: |
44
+ pip3 install hf_transfer peft
45
+ pip3 install -e .[test]
46
+ - name: Prepare gsm8k dataset
47
+ run: |
48
+ ray stop --force
49
+ python3 examples/data_preprocess/gsm8k.py
50
+ - name: Running gsm8k e2e training tests with LoRA
51
+ run: |
52
+ ray stop --force
53
+ bash tests/sft/run_sft_qwen05_peft.sh 8 $HOME/ckpts/
54
+ rm -rf $HOME/ckpts/*
.github/workflows/e2e_sft.yml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_sft
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - .github/workflows/e2e_sft.yml
13
+ - "tests/e2e/*.sh"
14
+ - "!verl/trainer/main_ppo.py"
15
+ - "!recipe/**"
16
+
17
+ # Cancel jobs on the same ref if a new one is triggered
18
+ concurrency:
19
+ group: ${{ github.workflow }}-${{ github.ref }}
20
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
21
+
22
+ # Declare permissions just read content.
23
+ permissions:
24
+ contents: read
25
+
26
+ jobs:
27
+ e2e_sft:
28
+ runs-on: [self-hosted, l20-1]
29
+ timeout-minutes: 5 # Increase this timeout value as needed
30
+ env:
31
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
32
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
33
+ NO_PROXY: "localhost,127.0.0.1"
34
+ HF_HUB_ENABLE_HF_TRANSFER: 1
35
+ container:
36
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
37
+ options: --gpus all --shm-size=10g
38
+ steps:
39
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
40
+ with:
41
+ fetch-depth: 0
42
+ - name: Install the current repository
43
+ run: |
44
+ pip3 install hf_transfer
45
+ pip3 install -e .[test,gpu]
46
+ - name: Prepare gsm8k dataset
47
+ run: |
48
+ ray stop --force
49
+ python3 examples/data_preprocess/gsm8k.py
50
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm
51
+ run: |
52
+ ray stop --force
53
+ bash tests/sft/run_sft.sh
54
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with sequence parallism
55
+ run: |
56
+ ray stop --force
57
+ bash examples/sft/gsm8k/run_qwen_05_sp2.sh 8 $HOME/ckpts/
58
+ - name: Check loss difference between sequence parallel vs. default implementation
59
+ run: |
60
+ ray stop --force
61
+ bash tests/sft/run_sft_sp_loss_match.sh
62
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with sequence parallism and liger
63
+ run: |
64
+ ray stop --force
65
+ bash tests/sft/run_sft_qwen05_sp2_liger.sh 8 $HOME/ckpts/
66
+ rm -rf $HOME/ckpts/
.github/workflows/e2e_sglang_gsm8k.yml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_sglang_gsm8k
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ paths:
10
+ - "**/*.py"
11
+ - "verl/trainer/config/*.yaml"
12
+ - .github/workflows/e2e_sglang_gsm8k.yml
13
+ - "tests/e2e/*.sh"
14
+ - "!recipe/**"
15
+
16
+ # Cancel jobs on the same ref if a new one is triggered
17
+ concurrency:
18
+ group: ${{ github.workflow }}-${{ github.ref }}
19
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
20
+
21
+ # Declare permissions just read content.
22
+ permissions:
23
+ contents: read
24
+
25
+ jobs:
26
+ e2e_sglang_gsm8k:
27
+ runs-on: [self-hosted, l20-1]
28
+ timeout-minutes: 40 # Increase this timeout value as needed
29
+ env:
30
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
31
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
32
+ NO_PROXY: "localhost,127.0.0.1"
33
+ HF_HUB_ENABLE_HF_TRANSFER: 1
34
+ container:
35
+ image: ocss884/verl-sglang:ngc-th2.5.1-cu126-sglang0.4.4.post4
36
+ options: --gpus all --shm-size=10g
37
+ steps:
38
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
39
+ with:
40
+ fetch-depth: 0
41
+ - name: Install the current repository
42
+ run: |
43
+ pip3 install hf_transfer
44
+ pip3 install -e .[test,gpu,sglang] --no-deps
45
+ - name: Prepare gsm8k dataset
46
+ run: |
47
+ ray stop --force
48
+ python3 examples/data_preprocess/gsm8k.py
49
+ - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm and save ckpt
50
+ run: |
51
+ ray stop --force
52
+ bash tests/e2e/run_qwen_gsm8k_function_rm.sh sglang
53
+
.github/workflows/e2e_vlm_geo3k.yml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e2e_vlm_geo3k
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.3.x
10
+ paths:
11
+ - "**/*.py"
12
+ - .github/workflows/e2e_vlm_geo3k.yml
13
+ - "tests/e2e/*vl*.sh"
14
+ - "!recipe/**"
15
+
16
+ # Declare permissions just read content.
17
+ permissions:
18
+ contents: read
19
+
20
+ jobs:
21
+ e2e_vlm_geo3k:
22
+ runs-on: [self-hosted, l20-1]
23
+ timeout-minutes: 10 # Increase this timeout value as needed
24
+ env:
25
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
26
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
27
+ NO_PROXY: "localhost,127.0.0.1"
28
+ HF_HUB_ENABLE_HF_TRANSFER: 1
29
+ container:
30
+ image: hiyouga/verl:ngc-th2.6.0-cu120-vllm0.8.2
31
+ options: --gpus all --shm-size=40g
32
+ steps:
33
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
34
+ with:
35
+ fetch-depth: 0
36
+ - name: Install the current repository
37
+ run: |
38
+ pip3 install hf_transfer
39
+ pip3 install -e .[test,geo,vllm]
40
+ python -c "import transformers; print(transformers.__version__)"
41
+ - name: Prepare geo3k dataset
42
+ run: |
43
+ ray stop --force
44
+ python3 examples/data_preprocess/geo3k.py
45
+ - name: Running geo3k vlm e2e training tests on 8 L20 GPUs with rmpad using function rm
46
+ run: |
47
+ ray stop --force
48
+ bash tests/e2e/run_qwen2vl_geo3k_function_rm.sh
.github/workflows/model.yml ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: model_rmpad
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.3.x
10
+ paths:
11
+ - "**/*.py"
12
+ - "tests/model/*"
13
+ - .github/workflows/model.yml
14
+ - "!recipe/**"
15
+
16
+ # Declare permissions just read content.
17
+ permissions:
18
+ contents: read
19
+
20
+ jobs:
21
+ model_rmpad:
22
+ runs-on: [self-hosted, l20-1]
23
+ timeout-minutes: 20 # Increase this timeout value as needed
24
+ env:
25
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
26
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
27
+ NO_PROXY: "localhost,127.0.0.1"
28
+ HF_HUB_ENABLE_HF_TRANSFER: 1
29
+ container:
30
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
31
+ options: --gpus all --shm-size=10g
32
+ steps:
33
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
34
+ with:
35
+ fetch-depth: 0
36
+ - name: Install the current repository and upgrade to latest transformers/flash_attn
37
+ run: |
38
+ pip3 install -e .[test]
39
+ pip3 install --upgrade transformers
40
+ - name: Running rmpad model tests on 8 L20 GPUs + flash_attn 2.5.8
41
+ run: |
42
+ pytest -s tests/model/test_transformer.py
43
+ - name: Running rmpad model tests on 8 L20 GPUs + latest flash_attn
44
+ run: |
45
+ pip3 install --upgrade flash_attn --no-build-isolation
46
+ pytest -s tests/model/test_transformer.py
47
+ - name: Running FSDP rmpad model tests on 8 L20 GPUs + latest flash_attn
48
+ run: |
49
+ pip3 install hf_transfer
50
+ torchrun --nproc_per_node=8 tests/checkpoint/test_fsdp_ckpt.py
51
+ - name: Running transformers ulysses tests on 8 L20 GPUs + latest transformers
52
+ run: |
53
+ torchrun --nproc_per_node=8 -m pytest tests/model/test_transformers_ulysses.py
54
+ - name: Running transformers ulysses tests on 8 L20 GPUs + transformers 4.49.0
55
+ run: |
56
+ pip3 install transformers==4.49.0
57
+ torchrun --nproc_per_node=8 -m pytest tests/model/test_transformers_ulysses.py
58
+ - name: Running transformers ulysses tests on 8 L20 GPUs + transformers 4.48.0
59
+ run: |
60
+ pip3 install transformers==4.48.0
61
+ torchrun --nproc_per_node=8 -m pytest tests/model/test_transformers_ulysses.py
62
+ - name: Running transformers ulysses tests on 8 L20 GPUs + transformers 4.47.0
63
+ run: |
64
+ pip3 install transformers==4.47.0
65
+ torchrun --nproc_per_node=8 -m pytest tests/model/test_transformers_ulysses.py
66
+ - name: Running transformers ulysses tests on 8 L20 GPUs + transformers 4.46.0
67
+ run: |
68
+ pip3 install transformers==4.46.0
69
+ torchrun --nproc_per_node=8 -m pytest tests/model/test_transformers_ulysses.py
70
+ - name: Running transformers ulysses tests on 8 L20 GPUs + transformers 4.45.0
71
+ run: |
72
+ pip3 install transformers==4.45.0
73
+ torchrun --nproc_per_node=8 -m pytest tests/model/test_transformers_ulysses.py
74
+ - name: Run distributed test
75
+ run: |
76
+ bash tests/distributed/run_all.sh
.github/workflows/pylint.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Pylint Check
2
+
3
+ on:
4
+ push:
5
+ paths:
6
+ - '**.py'
7
+ - 'requirements.txt'
8
+ - 'pyproject.toml'
9
+ pull_request:
10
+ paths:
11
+ - '**.py'
12
+ - 'requirements.txt'
13
+ - 'pyproject.toml'
14
+
15
+ jobs:
16
+ lint:
17
+ runs-on: ubuntu-latest
18
+
19
+ steps:
20
+ - name: Checkout code
21
+ uses: actions/checkout@v3
22
+
23
+ - name: Set up Python
24
+ uses: actions/setup-python@v4
25
+ with:
26
+ python-version: '3.12'
27
+
28
+ - name: Install pylint (version from requirements.txt)
29
+ run: |
30
+ PYLINT_VERSION=$(grep '^pylint' requirements.txt)
31
+ if [ -z "$PYLINT_VERSION" ]; then
32
+ echo "No pylint version found in requirements.txt"
33
+ exit 1
34
+ fi
35
+ # only install pylint to avoid dependency problems on CPU
36
+ pip install "$PYLINT_VERSION"
37
+
38
+ - name: Run pylint
39
+ run: |
40
+ pylint --recursive=y --rcfile=pyproject.toml ./
.github/workflows/ray_test.yml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ray
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ push:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "verl/single_controller/*.py"
12
+ - .github/workflows/ray_test.yml
13
+ pull_request:
14
+ branches:
15
+ - main
16
+ - v0.2.x
17
+ paths:
18
+ - "verl/single_controller/*.py"
19
+ - .github/workflows/ray_test.yml
20
+ - "!recipe/**"
21
+
22
+ # Cancel jobs on the same ref if a new one is triggered
23
+ concurrency:
24
+ group: ${{ github.workflow }}-${{ github.ref }}
25
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
26
+
27
+ # Declare permissions just read content.
28
+ permissions:
29
+ contents: read
30
+
31
+ jobs:
32
+ ray:
33
+ runs-on: [self-hosted, l20-0]
34
+ timeout-minutes: 5 # Increase this timeout value as needed
35
+ env:
36
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
37
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
38
+ NO_PROXY: "localhost,127.0.0.1"
39
+ HF_HUB_ENABLE_HF_TRANSFER: 1
40
+ container:
41
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
42
+ options: --gpus all --shm-size=10g
43
+ steps:
44
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
45
+ with:
46
+ fetch-depth: 0
47
+ - name: Install the current repository
48
+ run: |
49
+ pip install hf_transfer
50
+ pip install -e .[test]
51
+ pip install --upgrade "ray>=2.40.0"
52
+ - name: Running ray tests that need 8 GPUs
53
+ run: |
54
+ cd tests/ray
55
+ pytest -s -x --ignore=test_check_worker_alive.py --ignore=test_rvdz.py .
.github/workflows/sandbox.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: sandbox
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.3.x
10
+ paths:
11
+ - "**/*.py"
12
+ - .github/workflows/sandbox.yml
13
+
14
+ # Cancel jobs on the same ref if a new one is triggered
15
+ concurrency:
16
+ group: ${{ github.workflow }}-${{ github.ref }}
17
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
18
+
19
+ # Declare permissions just read content.
20
+ permissions:
21
+ contents: read
22
+
23
+ jobs:
24
+ sandbox:
25
+ runs-on: [self-hosted, l20-0]
26
+ timeout-minutes: 3 # Increase this timeout value as needed
27
+ env:
28
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
29
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
30
+ NO_PROXY: "localhost,127.0.0.1"
31
+ HF_HUB_ENABLE_HF_TRANSFER: 1
32
+ container:
33
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
34
+ options: --gpus all --shm-size=10g
35
+ steps:
36
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
37
+ with:
38
+ fetch-depth: 0
39
+ - name: Install the current repository
40
+ run: |
41
+ pip3 install hf_transfer
42
+ pip3 install -e .[test,prime]
43
+ pip3 install vllm==0.5.4
44
+ - name: Running sandbox tests on 8 L20 GPUs
45
+ run: |
46
+ cd tests/sandbox
47
+ pytest -s -x .
.github/workflows/sanity.yml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: sanity
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ push:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - .github/workflows/sanity.yml
13
+ pull_request:
14
+ branches:
15
+ - main
16
+ - v0.2.x
17
+ paths:
18
+ - "**/*.py"
19
+ - .github/workflows/sanity.yml
20
+
21
+ # Cancel jobs on the same ref if a new one is triggered
22
+ concurrency:
23
+ group: ${{ github.workflow }}-${{ github.ref }}
24
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
25
+
26
+ # Declare permissions just read content.
27
+ permissions:
28
+ contents: read
29
+
30
+ jobs:
31
+ sanity:
32
+ runs-on: ubuntu-latest
33
+ timeout-minutes: 5 # Increase this timeout value as needed
34
+ strategy:
35
+ matrix:
36
+ python-version: ["3.10"]
37
+ steps:
38
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
39
+ - name: Set up Python ${{ matrix.python-version }}
40
+ uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
41
+ with:
42
+ python-version: ${{ matrix.python-version }}
43
+ - name: Install the current repository
44
+ run: |
45
+ pip install -e .[test]
46
+ - name: Run sanity test
47
+ run: |
48
+ pytest -s -x tests/sanity
49
+ - name: Run utility test
50
+ run: |
51
+ pytest -s -x tests/utility
52
+ - name: Run license test
53
+ run: |
54
+ python3 tests/sanity/check_license.py --directory .
.github/workflows/scorecard.yml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow uses actions that are not certified by GitHub. They are provided
2
+ # by a third-party and are governed by separate terms of service, privacy
3
+ # policy, and support documentation.
4
+
5
+ name: Scorecard supply-chain security
6
+ on:
7
+ # For Branch-Protection check. Only the default branch is supported. See
8
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
9
+ branch_protection_rule:
10
+ # To guarantee Maintained check is occasionally updated. See
11
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
12
+ schedule:
13
+ - cron: '27 7 * * 1'
14
+ push:
15
+ branches: [ "main" ]
16
+
17
+ # Declare default permissions as read only.
18
+ permissions: read-all
19
+
20
+ jobs:
21
+ analysis:
22
+ name: Scorecard analysis
23
+ runs-on: ubuntu-latest
24
+ permissions:
25
+ # Needed to upload the results to code-scanning dashboard.
26
+ security-events: write
27
+ # Needed to publish results and get a badge (see publish_results below).
28
+ id-token: write
29
+ # Uncomment the permissions below if installing in a private repository.
30
+ # contents: read
31
+ # actions: read
32
+
33
+ steps:
34
+ - name: "Checkout code"
35
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
36
+ with:
37
+ persist-credentials: false
38
+
39
+ - name: "Run analysis"
40
+ uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
41
+ with:
42
+ results_file: results.sarif
43
+ results_format: sarif
44
+ # (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
45
+ # - you want to enable the Branch-Protection check on a *public* repository, or
46
+ # - you are installing Scorecard on a *private* repository
47
+ # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
48
+ # repo_token: ${{ secrets.SCORECARD_TOKEN }}
49
+
50
+ # Public repositories:
51
+ # - Publish results to OpenSSF REST API for easy access by consumers
52
+ # - Allows the repository to include the Scorecard badge.
53
+ # - See https://github.com/ossf/scorecard-action#publishing-results.
54
+ # For private repositories:
55
+ # - `publish_results` will always be set to `false`, regardless
56
+ # of the value entered here.
57
+ publish_results: true
58
+
59
+ # Upload the results to GitHub's code scanning dashboard (optional).
60
+ # Commenting out will disable upload of results to your repo's Code Scanning dashboard
61
+ - name: "Upload to code-scanning"
62
+ uses: github/codeql-action/upload-sarif@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 #v3.28.9
63
+ with:
64
+ sarif_file: results.sarif
.github/workflows/secrets_scan.yml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ on:
2
+ push:
3
+ branches:
4
+ - main
5
+ pull_request:
6
+
7
+ permissions:
8
+ contents: read
9
+
10
+ jobs:
11
+ test:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - name: Checkout code
15
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
16
+ with:
17
+ fetch-depth: 0
18
+ - name: Secret Scanning
19
+ uses: trufflesecurity/trufflehog@7dc056a193116ba8d82154bf0549381c8fb8545c # v3.88.14
20
+ with:
21
+ extra_args: --results=verified,unknown
.github/workflows/vllm.yml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: vllm
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ pull_request:
7
+ branches:
8
+ - main
9
+ - v0.3.x
10
+ paths:
11
+ - "**/*.py"
12
+ - "verl/trainer/config/*.yaml"
13
+ - .github/workflows/vllm.yml
14
+ - "!recipe/**"
15
+
16
+ # Cancel jobs on the same ref if a new one is triggered
17
+ concurrency:
18
+ group: ${{ github.workflow }}-${{ github.ref }}
19
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
20
+
21
+ # Declare permissions just read content.
22
+ permissions:
23
+ contents: read
24
+
25
+ jobs:
26
+ vllm:
27
+ runs-on: [self-hosted, l20-0]
28
+ timeout-minutes: 20 # Increase this timeout value as needed
29
+ env:
30
+ HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
31
+ HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
32
+ NO_PROXY: "localhost,127.0.0.1"
33
+ HF_HUB_ENABLE_HF_TRANSFER: 1
34
+ container:
35
+ image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
36
+ options: --gpus all --shm-size=10g
37
+ steps:
38
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
39
+ with:
40
+ fetch-depth: 0
41
+ - name: Install the current repository
42
+ run: |
43
+ pip3 install hf_transfer
44
+ pip3 install -e .[test]
45
+ pip3 install vllm==0.5.4
46
+ - name: Running vllm tests on 8 L20 GPUs
47
+ run: |
48
+ cd tests/rollout
49
+ torchrun --standalone --nnodes=1 --nproc_per_node=8 $(which pytest) -s test_vllm_hf_loader.py
50
+ - name: Test the latest vLLM
51
+ run: |
52
+ pip3 install --upgrade vllm==0.7.3
53
+ cd tests/rollout
54
+ torchrun --standalone --nnodes=1 --nproc_per_node=4 $(which pytest) -s test_vllm_spmd.py
55
+ - name: Run Qwen 0.5B generation test
56
+ run: |
57
+ cd tests/generation
58
+ bash ./run_gen_qwen05.sh 4 $HOME/data/gen/qwen_05_gen_test.parquet 2
59
+ rm -rf $HOME/data/gen/qwen_05_gen_test.parquet
60
+ - name: Run Qwen 0.5B generation test when world_size == 1
61
+ run: |
62
+ cd tests/generation
63
+ bash ./run_gen_qwen05.sh 1 $HOME/data/gen/qwen_05_gen_test.parquet 1
64
+ rm -rf $HOME/data/gen/qwen_05_gen_test.parquet
.github/workflows/yapf_format.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: yapf
2
+
3
+ on:
4
+ # Trigger the workflow on push or pull request,
5
+ # but only for the main branch
6
+ push:
7
+ branches:
8
+ - main
9
+ - v0.2.x
10
+ paths:
11
+ - "**/*.py"
12
+ - .github/workflows/yapf_format.yml
13
+ pull_request:
14
+ branches:
15
+ - main
16
+ - v0.2.x
17
+ paths:
18
+ - "**/*.py"
19
+ - .github/workflows/yapf_format.yml
20
+
21
+ # Cancel jobs on the same ref if a new one is triggered
22
+ concurrency:
23
+ group: ${{ github.workflow }}-${{ github.ref }}
24
+ cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
25
+
26
+ # Declare permissions just read content.
27
+ permissions:
28
+ contents: read
29
+
30
+ jobs:
31
+ yapf:
32
+ runs-on: ubuntu-latest
33
+ strategy:
34
+ matrix:
35
+ python-version: ["3.12"]
36
+ steps:
37
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
38
+ # - name: checkout
39
+ # run: |
40
+ # commits=${{ github.event.pull_request.commits }}
41
+ # if [[ -n "$commits" ]]; then
42
+ # # Prepare enough depth for diffs with main
43
+ # git fetch --depth="$(( commits + 1 ))"
44
+ # fi
45
+ - name: Set up Python ${{ matrix.python-version }}
46
+ uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
47
+ with:
48
+ python-version: ${{ matrix.python-version }}
49
+ - name: Install dependencies
50
+ run: |
51
+ python -m pip install --upgrade pip
52
+ pip install --upgrade yapf
53
+ pip install toml==0.10.2
54
+ - name: Running yapf
55
+ run: |
56
+ yapf -r -vv -d --style=./.style.yapf verl tests examples recipe
.gitignore ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ **/*.pt
3
+ **/checkpoints
4
+ **/wget-log
5
+ **/_build/
6
+ **/*.ckpt
7
+ **/outputs
8
+ **/*.tar.gz
9
+ **/playground
10
+ **/wandb
11
+
12
+ # Byte-compiled / optimized / DLL files
13
+ __pycache__/
14
+ *.py[cod]
15
+ *$py.class
16
+ dataset/*
17
+ tensorflow/my_graph/*
18
+ .idea/
19
+ # C extensions
20
+ *.so
21
+
22
+ # Distribution / packaging
23
+ .Python
24
+ env/
25
+ build/
26
+ develop-eggs/
27
+ dist/
28
+ downloads/
29
+ eggs/
30
+ .eggs/
31
+ lib/
32
+ lib64/
33
+ parts/
34
+ sdist/
35
+ var/
36
+ tmp/
37
+ *.egg-info/
38
+ .installed.cfg
39
+ *.egg
40
+
41
+ # PyInstaller
42
+ # Usually these files are written by a python script from a template
43
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
44
+ *.manifest
45
+ *.spec
46
+
47
+ # Installer logs
48
+ pip-log.txt
49
+ pip-delete-this-directory.txt
50
+
51
+ # Unit test / coverage reports
52
+ htmlcov/
53
+ .tox/
54
+ .coverage
55
+ .coverage.*
56
+ .cache
57
+ nosetests.xml
58
+ coverage.xml
59
+ *,cover
60
+ .hypothesis/
61
+
62
+ # Translations
63
+ *.mo
64
+ *.pot
65
+
66
+ # Django stuff:
67
+ *.log
68
+ local_settings.py
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ target/
82
+
83
+ # IPython Notebook
84
+ .ipynb_checkpoints
85
+
86
+ # pyenv
87
+ .python-version
88
+
89
+ # celery beat schedule file
90
+ celerybeat-schedule
91
+
92
+ # dotenv
93
+ .env
94
+
95
+ # virtualenv
96
+ venv/
97
+ .venv/
98
+ ENV/
99
+
100
+ # Spyder project settings
101
+ .spyderproject
102
+
103
+ # Rope project settings
104
+ .ropeproject
105
+
106
+ # vscode
107
+ .vscode
108
+
109
+ # Mac
110
+ .DS_Store
111
+
112
+ # output logs
113
+ tests/e2e/toy_examples/deepspeed/synchronous/output.txt
114
+
115
+ # vim
116
+ *.swp
117
+
118
+ # ckpt
119
+ *.lock
120
+
121
+ # data
122
+ *.parquet
123
+
124
+
125
+ # local logs
126
+ logs
127
+ log
128
+ outputs
.readthedocs.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the Docs configuration file
2
+ # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3
+
4
+ version: 2
5
+
6
+ build:
7
+ os: ubuntu-22.04
8
+ tools:
9
+ python: "3.11"
10
+ rust: "1.70"
11
+
12
+ sphinx:
13
+ configuration: docs/conf.py
14
+
15
+ python:
16
+ install:
17
+ - requirements: docs/requirements-docs.txt
18
+ - method: pip
19
+ path: .
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = google
3
+ column_limit = 120
4
+ indent_width = 4
5
+ split_arguments_when_comma_terminated: true
LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
Notice.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Copyright 2023-2024 Bytedance Ltd. and/or its affiliates
README.md ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 style="text-align: center;">AdaRFT: Adaptive Curriculum Reinforcement Finetuning</h1>
2
+
3
+ 📢 **New extension to `verl`!** We propose an adaptive curriculum learning method for efficient and scalable reinforcement finetuning (RFT) of LLMs — now implemented as an extension to this repo.
4
+
5
+ > **Efficient Reinforcement Finetuning via Adaptive Curriculum Learning**
6
+ > Taiwei Shi†, Yiyang Wu†, Linxin Song†, Tianyi Zhou▽, Jieyu Zhao†
7
+ > †University of Southern California, ▽University of Maryland
8
+ > [[Paper]](https://arxiv.org/abs/2504.05520)
9
+
10
+ ### 🧠 Highlights
11
+ - Dynamically adapts training difficulty using a lightweight curriculum scheduler
12
+ - Compatible with standard RFT algorithms like PPO, GRPO, REINFORCE++
13
+ - Improves both **sample efficiency** and **final accuracy** on math reasoning benchmarks
14
+ - Up to **2× faster convergence** vs PPO baseline
15
+ - Seamlessly integrated into `verl` without modifying reward functions or model architectures
16
+
17
+ ### 📦 Preprocessed Data
18
+ - **Difficulty annotations**: [DeepScaleR](https://huggingface.co/datasets/lime-nlp/DeepScaleR_Difficulty)
19
+ - **Training data**: [verl/data](https://github.com/uscnlp-lime/verl/tree/main/verl/data)
20
+
21
+ ### 🚀 Usage
22
+ To use AdaRFT, you can simply use our example [script](https://github.com/uscnlp-lime/verl/blob/main/examples/adarft/run_qwen2.5-1.5b_seq_balance.sh).
23
+
24
+ You can also enable it in [ppo_trainer.yaml](https://github.com/uscnlp-lime/verl/blob/main/verl/trainer/config/ppo_trainer.yaml#L18-L24) or via command-line by setting the following flags:
25
+
26
+ ```bash
27
+ python3 -m verl.trainer.main_ppo \
28
+ ... \
29
+ data.adarft.enable=True \
30
+ data.adarft.beta=0.5 \ # Target reward (success rate) the model aims to maintain
31
+ data.adarft.alpha=2 \ # Sensitivity of difficulty updates based on reward difference
32
+ data.adarft.eta=50 \ # Step size to scale reward signal to difficulty space
33
+ data.adarft.d_min=0 \ # Minimum difficulty bound
34
+ data.adarft.d_max=100 \ # Maximum difficulty bound
35
+ ...
36
+ ```
37
+
38
+ Make sure your dataset includes difficulty scores (e.g., from [here](https://github.com/uscnlp-lime/verl/tree/main/verl/data)) for AdaRFT to function properly.
39
+
40
+ ### 📚 Citation
41
+ ✉️ Feel free to reach out to **Taiwei Shi ([email protected])** or **Jieyu Zhao ([email protected])** with questions or collaborations!
42
+
43
+ ```bibtex
44
+ @misc{shi2025efficient,
45
+ title={Efficient Reinforcement Finetuning via Adaptive Curriculum Learning},
46
+ author={Taiwei Shi and Yiyang Wu and Linxin Song and Tianyi Zhou and Jieyu Zhao},
47
+ year={2025},
48
+ eprint={2504.05520},
49
+ archivePrefix={arXiv},
50
+ primaryClass={cs.LG}
51
+ }
52
+ ```
53
+
54
+ ---
55
+
56
+ <h1 style="text-align: center;">verl: Volcano Engine Reinforcement Learning for LLMs</h1>
57
+
58
+ [![GitHub Repo stars](https://img.shields.io/github/stars/volcengine/verl)](https://github.com/volcengine/verl/stargazers)
59
+ ![GitHub forks](https://img.shields.io/github/forks/volcengine/verl)
60
+ [![Twitter](https://img.shields.io/twitter/follow/verl_project)](https://twitter.com/verl_project)
61
+ <a href="https://join.slack.com/t/verlgroup/shared_invite/zt-2w5p9o4c3-yy0x2Q56s_VlGLsJ93A6vA"><img src="https://img.shields.io/badge/Slack-verl-blueviolet?logo=slack&amp"></a>
62
+ <a href="https://arxiv.org/pdf/2409.19256"><img src="https://img.shields.io/static/v1?label=EuroSys&message=Paper&color=red"></a>
63
+ ![GitHub contributors](https://img.shields.io/github/contributors/volcengine/verl)
64
+ [![Documentation](https://img.shields.io/badge/documentation-blue)](https://verl.readthedocs.io/en/latest/)
65
+ <a href="https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/WeChat.JPG"><img src="https://img.shields.io/badge/微信-green?logo=wechat&amp"></a>
66
+
67
+
68
+ verl is a flexible, efficient and production-ready RL training library for large language models (LLMs).
69
+
70
+ verl is the open-source version of **[HybridFlow: A Flexible and Efficient RLHF Framework](https://arxiv.org/abs/2409.19256v2)** paper.
71
+
72
+ verl is flexible and easy to use with:
73
+
74
+ - **Easy extension of diverse RL algorithms**: The hybrid-controller programming model enables flexible representation and efficient execution of complex Post-Training dataflows. Build RL dataflows such as GRPO, PPO in a few lines of code.
75
+
76
+ - **Seamless integration of existing LLM infra with modular APIs**: Decouples computation and data dependencies, enabling seamless integration with existing LLM frameworks, such as FSDP, Megatron-LM, vLLM, SGLang, etc
77
+
78
+ - **Flexible device mapping**: Supports various placement of models onto different sets of GPUs for efficient resource utilization and scalability across different cluster sizes.
79
+
80
+ - Ready integration with popular HuggingFace models
81
+
82
+
83
+ verl is fast with:
84
+
85
+ - **State-of-the-art throughput**: SOTA LLM training and inference engine integrations and SOTA RL throughput.
86
+
87
+ - **Efficient actor model resharding with 3D-HybridEngine**: Eliminates memory redundancy and significantly reduces communication overhead during transitions between training and generation phases.
88
+
89
+ </p>
90
+
91
+ ## News
92
+ - [2025/03] verl v0.3.0.post1 is released! See [release note](https://github.com/volcengine/verl/releases/) for details.
93
+ - [2025/03] [DAPO](https://dapo-sia.github.io/) is the open-sourced SOTA RL algorithm that achieves 50 points on AIME 2024 based on the Qwen2.5-32B pre-trained model, surpassing the previous SOTA achieved by DeepSeek's GRPO (DeepSeek-R1-Zero-Qwen-32B). DAPO's training is fully powered by verl and the reproduction code is [publicly available](https://github.com/volcengine/verl/tree/gm-tyx/puffin/main/recipe/dapo) now.
94
+ - [2025/03] We will present verl(HybridFlow) at EuroSys 2025. See you in Rotterdam!
95
+ - [2025/03] We introduced the programming model of verl at the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/n77GibL2corAtQHtVEAzfg) and [verl intro and updates](https://github.com/eric-haibin-lin/verl-community/blob/main/slides/verl-lmsys-meetup.pdf) at the [LMSys Meetup](https://lu.ma/ntjrr7ig) in Sunnyvale mid March.
96
+ - [2025/02] verl v0.2.0.post2 is released!
97
+ - [2025/01] [Doubao-1.5-pro](https://team.doubao.com/zh/special/doubao_1_5_pro) is released with SOTA-level performance on LLM & VLM. The RL scaling preview model is trained using verl, reaching OpenAI O1-level performance on math benchmarks (70.0 pass@1 on AIME).
98
+ <details><summary> more... </summary>
99
+ <ul>
100
+ <li>[2025/02] We presented verl in the <a href="https://lu.ma/ji7atxux">Bytedance/NVIDIA/Anyscale Ray Meetup</a>. See you in San Jose!</li>
101
+ <li>[2024/12] verl is presented at Ray Forward 2024. Slides available <a href="https://github.com/eric-haibin-lin/verl-community/blob/main/slides/Ray_Forward_2024_%E5%B7%AB%E9%94%A1%E6%96%8C.pdf">here</a></li>
102
+ <li>[2024/10] verl is presented at Ray Summit. <a href="https://www.youtube.com/watch?v=MrhMcXkXvJU&list=PLzTswPQNepXntmT8jr9WaNfqQ60QwW7-U&index=37">Youtube video</a> available.</li>
103
+ <li>[2024/12] The team presented <a href="https://neurips.cc/Expo/Conferences/2024/workshop/100677">Post-training LLMs: From Algorithms to Infrastructure</a> at NeurIPS 2024. <a href="https://github.com/eric-haibin-lin/verl-data/tree/neurips">Slides</a> and <a href="https://neurips.cc/Expo/Conferences/2024/workshop/100677">video</a> available.</li>
104
+ <li>[2024/08] HybridFlow (verl) is accepted to EuroSys 2025.</li>
105
+ </ul>
106
+ </details>
107
+
108
+ ## Key Features
109
+
110
+ - **FSDP** and **Megatron-LM** for training.
111
+ - **vLLM**, **SGLang**(experimental) and **HF Transformers** for rollout generation.
112
+ - Compatible with Hugging Face Transformers and Modelscope Hub: Qwen-2.5, Llama3.1, Gemma2, DeepSeek-LLM, etc
113
+ - Supervised fine-tuning.
114
+ - Reinforcement learning with [PPO](examples/ppo_trainer/), [GRPO](examples/grpo_trainer/), [ReMax](examples/remax_trainer/), [REINFORCE++](https://verl.readthedocs.io/en/latest/examples/config.html#algorithm), [RLOO](examples/rloo_trainer/), [PRIME](recipe/prime/), etc.
115
+ - Support model-based reward and function-based reward (verifiable reward)
116
+ - Support vision-language models (VLMs) and [multi-modal RL](examples/grpo_trainer/run_qwen2_5_vl-7b.sh)
117
+ - Flash attention 2, [sequence packing](examples/ppo_trainer/run_qwen2-7b_seq_balance.sh), [sequence parallelism](examples/ppo_trainer/run_deepseek7b_llm_sp2.sh) support via DeepSpeed Ulysses, [LoRA](examples/sft/gsm8k/run_qwen_05_peft.sh), [Liger-kernel](examples/sft/gsm8k/run_qwen_05_sp2_liger.sh).
118
+ - Scales up to 70B models and hundreds of GPUs.
119
+ - Experiment tracking with wandb, swanlab, mlflow and tensorboard.
120
+
121
+ ## Upcoming Features
122
+ - Roadmap https://github.com/volcengine/verl/issues/710
123
+ - DeepSeek 671b optimizations with Megatron v0.11 https://github.com/volcengine/verl/issues/708
124
+ - Multi-turn rollout optimizations
125
+ - Environment interactions
126
+
127
+ ## Getting Started
128
+
129
+ <a href="https://verl.readthedocs.io/en/latest/index.html"><b>Documentation</b></a>
130
+
131
+ **Quickstart:**
132
+ - [Installation](https://verl.readthedocs.io/en/latest/start/install.html)
133
+ - [Quickstart](https://verl.readthedocs.io/en/latest/start/quickstart.html)
134
+ - [Programming Guide](https://verl.readthedocs.io/en/latest/hybrid_flow.html)
135
+
136
+ **Running a PPO example step-by-step:**
137
+ - Data and Reward Preparation
138
+ - [Prepare Data for Post-Training](https://verl.readthedocs.io/en/latest/preparation/prepare_data.html)
139
+ - [Implement Reward Function for Dataset](https://verl.readthedocs.io/en/latest/preparation/reward_function.html)
140
+ - Understanding the PPO Example
141
+ - [PPO Example Architecture](https://verl.readthedocs.io/en/latest/examples/ppo_code_architecture.html)
142
+ - [Config Explanation](https://verl.readthedocs.io/en/latest/examples/config.html)
143
+ - [Run GSM8K Example](https://verl.readthedocs.io/en/latest/examples/gsm8k_example.html)
144
+
145
+ **Reproducible algorithm baselines:**
146
+ - [PPO, GRPO, ReMax](https://verl.readthedocs.io/en/latest/experiment/ppo.html)
147
+
148
+ **For code explanation and advance usage (extension):**
149
+ - PPO Trainer and Workers
150
+ - [PPO Ray Trainer](https://verl.readthedocs.io/en/latest/workers/ray_trainer.html)
151
+ - [PyTorch FSDP Backend](https://verl.readthedocs.io/en/latest/workers/fsdp_workers.html)
152
+ - [Megatron-LM Backend](https://verl.readthedocs.io/en/latest/index.html)
153
+ - Advance Usage and Extension
154
+ - [Ray API design tutorial](https://verl.readthedocs.io/en/latest/advance/placement.html)
155
+ - [Extend to Other RL(HF) algorithms](https://verl.readthedocs.io/en/latest/advance/dpo_extension.html)
156
+ - [Add Models with the FSDP Backend](https://verl.readthedocs.io/en/latest/advance/fsdp_extension.html)
157
+ - [Add Models with the Megatron-LM Backend](https://verl.readthedocs.io/en/latest/advance/megatron_extension.html)
158
+ - [Deployment using Separate GPU Resources](https://github.com/volcengine/verl/tree/main/examples/split_placement)
159
+
160
+ **Blogs from the community**
161
+ - [使用verl进行GRPO分布式强化学习训练最佳实践](https://www.volcengine.com/docs/6459/1463942)
162
+ - [HybridFlow veRL 原文浅析](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/readme.md)
163
+ - [最高提升20倍吞吐量!豆包大模型团队发布全新 RLHF 框架,现已开源!](https://team.doubao.com/en/blog/%E6%9C%80%E9%AB%98%E6%8F%90%E5%8D%8720%E5%80%8D%E5%90%9E%E5%90%90%E9%87%8F-%E8%B1%86%E5%8C%85%E5%A4%A7%E6%A8%A1%E5%9E%8B%E5%9B%A2%E9%98%9F%E5%8F%91%E5%B8%83%E5%85%A8%E6%96%B0-rlhf-%E6%A1%86%E6%9E%B6-%E7%8E%B0%E5%B7%B2%E5%BC%80%E6%BA%90)
164
+
165
+
166
+ ## Performance Tuning Guide
167
+ The performance is essential for on-policy RL algorithm. We have written a detailed [performance tuning guide](https://verl.readthedocs.io/en/latest/perf/perf_tuning.html) to help you optimize performance.
168
+
169
+ ## Use vLLM v0.8.2
170
+ veRL now supports vLLM>=0.8.2 when using FSDP as the training backend. Please refer to [this document](https://github.com/volcengine/verl/blob/main/docs/README_vllm0.8.md) for installation guide and more information. Please avoid vllm 0.7.x which contains bugs that may lead to OOMs and unexpected errors.
171
+
172
+ ## Citation and acknowledgement
173
+
174
+ If you find the project helpful, please cite:
175
+ - [HybridFlow: A Flexible and Efficient RLHF Framework](https://arxiv.org/abs/2409.19256v2)
176
+ - [A Framework for Training Large Language Models for Code Generation via Proximal Policy Optimization](https://i.cs.hku.hk/~cwu/papers/gmsheng-NL2Code24.pdf)
177
+
178
+ ```bibtex
179
+ @article{sheng2024hybridflow,
180
+ title = {HybridFlow: A Flexible and Efficient RLHF Framework},
181
+ author = {Guangming Sheng and Chi Zhang and Zilingfeng Ye and Xibin Wu and Wang Zhang and Ru Zhang and Yanghua Peng and Haibin Lin and Chuan Wu},
182
+ year = {2024},
183
+ journal = {arXiv preprint arXiv: 2409.19256}
184
+ }
185
+ ```
186
+
187
+ verl is inspired by the design of Nemo-Aligner, Deepspeed-chat and OpenRLHF. The project is adopted and contributed by Bytedance, Anyscale, LMSys.org, [Alibaba Qwen team](https://github.com/QwenLM/), Shanghai AI Lab, Tsinghua University, UC Berkeley, UCLA, UIUC, University of Hong Kong, ke.com, [All Hands AI](https://www.all-hands.dev/), [ModelBest](http://modelbest.cn/), [OpenPipe](https://openpipe.ai/), JD AI Lab, Microsoft Research, [StepFun](https://www.stepfun.com/), Amazon, Linkedin, Meituan, [Camel-AI](https://www.camel-ai.org/), [OpenManus](https://github.com/OpenManus), [Prime Intellect](https://www.primeintellect.ai/), NVIDIA research, [Baichuan](https://www.baichuan-ai.com/home), and many more.
188
+
189
+ ## Awesome work using verl
190
+ - [TinyZero](https://github.com/Jiayi-Pan/TinyZero): a reproduction of **DeepSeek R1 Zero** recipe for reasoning tasks ![GitHub Repo stars](https://img.shields.io/github/stars/Jiayi-Pan/TinyZero)
191
+ - [DAPO](https://dapo-sia.github.io/): the fully open source SOTA RL algorithm that beats DeepSeek-R1-zero-32B ![GitHub Repo stars](https://img.shields.io/github/stars/volcengine/verl)
192
+ - [SkyThought](https://github.com/NovaSky-AI/SkyThought): RL training for Sky-T1-7B by NovaSky AI team. ![GitHub Repo stars](https://img.shields.io/github/stars/NovaSky-AI/SkyThought)
193
+ - [simpleRL-reason](https://github.com/hkust-nlp/simpleRL-reason): SimpleRL-Zoo: Investigating and Taming Zero Reinforcement Learning for Open Base Models in the Wild ![GitHub Repo stars](https://img.shields.io/github/stars/hkust-nlp/simpleRL-reason)
194
+ - [Easy-R1](https://github.com/hiyouga/EasyR1): **Multi-modal** RL training framework ![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/EasyR1)
195
+ - [OpenManus-RL](https://github.com/OpenManus/OpenManus-RL): LLM Agents RL tunning framework for multiple agent environments. ![GitHub Repo stars](https://img.shields.io/github/stars/OpenManus/OpenManus-RL)
196
+ - [deepscaler](https://github.com/agentica-project/deepscaler): iterative context scaling with GRPO ![GitHub Repo stars](https://img.shields.io/github/stars/agentica-project/deepscaler)
197
+ - [PRIME](https://github.com/PRIME-RL/PRIME): Process reinforcement through implicit rewards ![GitHub Repo stars](https://img.shields.io/github/stars/PRIME-RL/PRIME)
198
+ - [RAGEN](https://github.com/ZihanWang314/ragen): a general-purpose reasoning **agent** training framework ![GitHub Repo stars](https://img.shields.io/github/stars/ZihanWang314/ragen)
199
+ - [Logic-RL](https://github.com/Unakar/Logic-RL): a reproduction of DeepSeek R1 Zero on 2K Tiny Logic Puzzle Dataset. ![GitHub Repo stars](https://img.shields.io/github/stars/Unakar/Logic-RL)
200
+ - [Search-R1](https://github.com/PeterGriffinJin/Search-R1): RL with reasoning and **searching (tool-call)** interleaved LLMs ![GitHub Repo stars](https://img.shields.io/github/stars/PeterGriffinJin/Search-R1)
201
+ - [ReSearch](https://github.com/Agent-RL/ReSearch): Learning to **Re**ason with **Search** for LLMs via Reinforcement Learning ![GitHub Repo stars](https://img.shields.io/github/stars/Agent-RL/ReSearch)
202
+ - [DeepRetrieval](https://github.com/pat-jj/DeepRetrieval): Hacking **Real Search Engines** and **retrievers** with LLMs via RL for **information retrieval** ![GitHub Repo stars](https://img.shields.io/github/stars/pat-jj/DeepRetrieval)
203
+ - [cognitive-behaviors](https://github.com/kanishkg/cognitive-behaviors): Cognitive Behaviors that Enable Self-Improving Reasoners, or, Four Habits of Highly Effective STaRs ![GitHub Repo stars](https://img.shields.io/github/stars/kanishkg/cognitive-behaviors)
204
+ - [PURE](https://github.com/CJReinforce/PURE): **Credit assignment** is the key to successful reinforcement fine-tuning using **process reward model** ![GitHub Repo stars](https://img.shields.io/github/stars/CJReinforce/PURE)
205
+ - [MetaSpatial](https://github.com/PzySeere/MetaSpatial): Reinforcing **3D Spatial Reasoning** in **VLMs** for the **Metaverse** ![GitHub Repo stars](https://img.shields.io/github/stars/PzySeere/MetaSpatial)
206
+ - [DeepEnlighten](https://github.com/DolbyUUU/DeepEnlighten): Reproduce R1 with **social reasoning** tasks and analyze key findings ![GitHub Repo stars](https://img.shields.io/github/stars/DolbyUUU/DeepEnlighten)
207
+ - [Code-R1](https://github.com/ganler/code-r1): Reproducing R1 for **Code** with Reliable Rewards ![GitHub Repo stars](https://img.shields.io/github/stars/ganler/code-r1)
208
+ - [DeepResearcher](https://github.com/GAIR-NLP/DeepResearcher): Scaling deep research via reinforcement learning in real-world environments ![GitHub Repo stars](https://img.shields.io/github/stars/GAIR-NLP/DeepResearcher)
209
+ - [self-rewarding-reasoning-LLM](https://arxiv.org/pdf/2502.19613): self-rewarding and correction with **generative reward models** ![GitHub Repo stars](https://img.shields.io/github/stars/RLHFlow/Self-rewarding-reasoning-LLM)
210
+ - [critic-rl](https://github.com/HKUNLP/critic-rl): LLM critics for code generation ![GitHub Repo stars](https://img.shields.io/github/stars/HKUNLP/critic-rl)
211
+ - [DQO](https://arxiv.org/abs/2410.09302): Enhancing multi-Step reasoning abilities of language models through direct Q-function optimization
212
+ - [FIRE](https://arxiv.org/abs/2410.21236): Flaming-hot initiation with regular execution sampling for large language models
213
+ - [Rec-R1](https://arxiv.org/pdf/2503.24289): Bridging Generative Large Language Models and Recommendation Systems via Reinforcement Learning
214
+
215
+
216
+ ## Contribution Guide
217
+ Contributions from the community are welcome! Please check out our [project roadmap](https://github.com/volcengine/verl/issues/710) and [good first issues](https://github.com/volcengine/verl/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) to see where you can contribute.
218
+
219
+ ### Code formatting
220
+ We use yapf (Google style) to enforce strict code formatting when reviewing PRs. To reformat your code locally, make sure you have installed the **latest** version of `yapf`
221
+ ```bash
222
+ pip3 install yapf --upgrade
223
+ ```
224
+ Then, make sure you are at top level of verl repo and run
225
+ ```bash
226
+ bash scripts/format.sh
227
+ ```
228
+ We are HIRING! Send us an [email](mailto:[email protected]) if you are interested in internship/FTE opportunities in MLSys/LLM reasoning/multimodal alignment.
docker/Dockerfile.megatron ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ FROM verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
2
+
3
+ RUN pip install git+https://github.com/NVIDIA/TransformerEngine.git@stable
4
+
5
+ RUN cd /opt/nvidia && git clone --single-branch --branch core_r0.11.0 https://github.com/NVIDIA/Megatron-LM.git Megatron-LM
6
+
7
+ # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed
8
+ # unset for now
9
+ RUN cd /opt/nvidia/Megatron-LM && pip3 install --no-deps -e .
docker/Dockerfile.ngc.vllm ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # docker buildx build --platform linux/x86_64 -t "verlai/verl:ngc-th2.4.0-cu124-vllm0.6.3-ray2.4-te1.7-v0.0.6" -f docker/Dockerfile.ngc.vllm . --builder cloud-verlai-verl-builder --progress=plain --push
2
+ FROM nvcr.io/nvidia/pytorch:24.05-py3
3
+
4
+ # uninstall nv-pytorch fork
5
+ RUN pip3 uninstall pytorch-quantization \
6
+ pytorch-triton \
7
+ torch \
8
+ torch-tensorrt \
9
+ torchvision \
10
+ xgboost transformer_engine flash_attn \
11
+ apex megatron-core -y
12
+
13
+ RUN pip3 install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124
14
+
15
+ # =============== Megatron dependencies (optional) =================
16
+ # install apex, set MAX_JOBS to avoid OOMs
17
+ RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
18
+ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
19
+ git+https://github.com/NVIDIA/apex
20
+ # =============== End of Megatron dependencies (optional) =================
21
+
22
+ RUN pip3 install --no-cache-dir \
23
+ accelerate \
24
+ codetiming \
25
+ datasets \
26
+ dill \
27
+ hydra-core \
28
+ numpy \
29
+ 'pandas' \
30
+ 'peft' \
31
+ 'pyarrow>=15.0.0' \
32
+ 'pybind11' \
33
+ 'pylatexenc' \
34
+ 'ray>=2.10' \
35
+ 'tensordict<0.6' \
36
+ 'transformers' \
37
+ 'vllm==0.6.3.post1' \
38
+ 'wandb'
39
+
40
+ # full dependencies
41
+ RUN pip3 install pytest yapf py-spy pyext liger-kernel
42
+
43
+ # =============== Megatron dependencies (optional) =================
44
+ # install Transformer Engine, which requires FA 2.5.8. Do it in a separate step for docker cache
45
+ RUN MAX_JOBS=4 NINJA_FLAGS="-j4" pip3 install flash-attn==2.5.8 --no-cache-dir --no-build-isolation
46
+ RUN MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 pip3 install git+https://github.com/eric-haibin-lin/[email protected]
47
+ # =============== End of Megatron dependencies (optional) =================
docker/Dockerfile.ngc.vllm0.8 ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Start from the NVIDIA official image (ubuntu-22.04 + python-3.10)
2
+ # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html
3
+ FROM nvcr.io/nvidia/pytorch:24.08-py3
4
+
5
+ # uninstall nv-pytorch fork
6
+ RUN pip3 uninstall -y pytorch-quantization \
7
+ pytorch-triton torch torch-tensorrt torchvision \
8
+ xgboost transformer_engine flash_attn apex megatron-core
9
+
10
+ # Define environments
11
+ ENV MAX_JOBS=32
12
+ ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
13
+ ENV DEBIAN_FRONTEND=noninteractive
14
+ ENV NODE_OPTIONS=""
15
+ ENV HF_HUB_ENABLE_HF_TRANSFER="1"
16
+
17
+ # Define installation arguments
18
+ ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
19
+ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
20
+
21
+ # Set apt source
22
+ RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
23
+ { \
24
+ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
25
+ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
26
+ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
27
+ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
28
+ } > /etc/apt/sources.list
29
+
30
+ # Install systemctl
31
+ RUN apt-get update && \
32
+ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \
33
+ apt-get clean
34
+
35
+ # Install tini
36
+ RUN apt-get update && \
37
+ apt-get install -y tini && \
38
+ apt-get clean
39
+
40
+ # Change pip source
41
+ RUN pip config set global.index-url "${PIP_INDEX}" && \
42
+ pip config set global.extra-index-url "${PIP_INDEX}" && \
43
+ python -m pip install --upgrade pip
44
+
45
+ # Install torch-2.6.0 + vllm-0.8.2
46
+ RUN pip install --no-cache-dir vllm==0.8.2 torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 tensordict torchdata \
47
+ transformers>=4.49.0 accelerate datasets peft hf-transfer \
48
+ ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \
49
+ pytest yapf py-spy pyext pre-commit ruff
50
+
51
+ # Install flash_attn-2.7.4.post1
52
+ RUN pip uninstall -y transformer-engine flash-attn && \
53
+ wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \
54
+ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
55
+
56
+ # Fix cv2
57
+ RUN pip uninstall -y pynvml nvidia-ml-py && \
58
+ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 && \
59
+ pip install --no-cache-dir --upgrade optree>=0.13.0
60
+
61
+ # Install verl
62
+ RUN pip install --no-cache-dir verl[vllm] -U
63
+
64
+ # Reset pip config
65
+ RUN pip config unset global.index-url && \
66
+ pip config unset global.extra-index-url
docker/Dockerfile.ngc.vllm0.8.sagemaker ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Using a pre-built image from AWS DLC which contains the current version of python (3.10) and supported cuda version (12.1)
2
+ FROM 763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:2.1.0-transformers4.36.0-gpu-py310-cu121-ubuntu20.04
3
+
4
+ # uninstall nv-pytorch fork
5
+ RUN pip3 uninstall -y pytorch-quantization \
6
+ pytorch-triton torch torch-tensorrt torchvision \
7
+ xgboost transformer_engine flash_attn apex megatron-core
8
+
9
+ # Define environments
10
+ ENV MAX_JOBS=32
11
+ ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
12
+ ENV DEBIAN_FRONTEND=noninteractive
13
+ ENV NODE_OPTIONS=""
14
+ ENV HF_HUB_ENABLE_HF_TRANSFER="1"
15
+
16
+ # Install systemctl
17
+ RUN apt-get update && \
18
+ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \
19
+ apt-get clean
20
+
21
+ # Install tini
22
+ RUN apt-get update && \
23
+ apt-get install -y tini && \
24
+ apt-get clean
25
+
26
+ # Install torch-2.6.0 + vllm-0.8.2
27
+ RUN pip install --no-cache-dir vllm==0.8.2 torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 tensordict torchdata==0.11.0 \
28
+ transformers>=4.49.0 accelerate datasets peft hf-transfer \
29
+ ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \
30
+ pytest yapf py-spy pyext pre-commit ruff
31
+
32
+ # Install flash_attn-2.7.4.post1
33
+ RUN pip uninstall -y transformer-engine flash-attn && \
34
+ pip install flash-attn==2.7.4.post1 --no-build-isolation
35
+
36
+ # Fix cv2
37
+ RUN pip uninstall -y pynvml nvidia-ml-py && \
38
+ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 && \
39
+ pip install --no-cache-dir --upgrade optree>=0.13.0
40
+
41
+ # Install verl
42
+ RUN pip install --no-cache-dir verl[vllm] -U
43
+
44
+ # Reset pip config
45
+ RUN pip config unset global.index-url && \
46
+ pip config unset global.extra-index-url
docker/Dockerfile.rocm ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Build the docker in the repo dir:
2
+ # docker build -f docker/Dockerfile.rocm -t verl-rocm:03.04.2015 .
3
+ # docker images # you can find your built docker
4
+
5
+
6
+ FROM rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
7
+
8
+ # Set working directory
9
+ # WORKDIR $PWD/app
10
+
11
+ # Set environment variables
12
+ ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942"
13
+
14
+ # Install vllm
15
+ RUN pip uninstall -y vllm && \
16
+ rm -rf vllm && \
17
+ git clone -b v0.6.3 https://github.com/vllm-project/vllm.git && \
18
+ cd vllm && \
19
+ MAX_JOBS=$(nproc) python3 setup.py install && \
20
+ cd .. && \
21
+ rm -rf vllm
22
+
23
+ # Copy the entire project directory
24
+ COPY . .
25
+
26
+ # Install dependencies
27
+ RUN pip install "tensordict<0.6" --no-deps && \
28
+ pip install accelerate \
29
+ codetiming \
30
+ datasets \
31
+ dill \
32
+ hydra-core \
33
+ liger-kernel \
34
+ numpy \
35
+ pandas \
36
+ peft \
37
+ "pyarrow>=15.0.0" \
38
+ pylatexenc \
39
+ "ray[data,train,tune,serve]" \
40
+ torchdata \
41
+ transformers \
42
+ wandb \
43
+ orjson \
44
+ pybind11 && \
45
+ pip install -e . --no-deps
docker/Dockerfile.sglang ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Start from the NVIDIA official image (ubuntu-22.04 + python-3.10)
2
+ # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html
3
+ FROM nvcr.io/nvidia/pytorch:24.08-py3
4
+
5
+ # Define environments
6
+ ENV MAX_JOBS=32
7
+ ENV DEBIAN_FRONTEND=noninteractive
8
+ ENV NODE_OPTIONS=""
9
+
10
+ # Define installation arguments
11
+ ARG APT_SOURCE=https://mirrors.ustc.edu.cn/ubuntu/
12
+
13
+ # Set apt source
14
+ RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
15
+ { \
16
+ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
17
+ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
18
+ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
19
+ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
20
+ } > /etc/apt/sources.list
21
+
22
+ # Install systemctl
23
+ RUN apt-get update && \
24
+ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \
25
+ apt-get clean
26
+
27
+ # Install tini
28
+ RUN apt-get update && \
29
+ apt-get install -y tini && \
30
+ apt-get clean
31
+
32
+ # Change pip source
33
+ ARG PIP_INDEX=https://mirrors.aliyun.com/pypi/simple/
34
+
35
+ RUN pip config set global.index-url "${PIP_INDEX}" && \
36
+ pip config set global.extra-index-url "${PIP_INDEX}" && \
37
+ python -m pip install --upgrade pip
38
+
39
+ # Install sglang-0.4.4.post4 and torch-memory-saver
40
+ RUN pip install "sglang[all]==0.4.4.post4" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python && pip install torch-memory-saver --no-cache-dir
41
+
42
+ # Install torch-2.5.1
43
+ RUN pip install --no-cache-dir torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 tensordict torchdata \
44
+ transformers>=4.49.0 accelerate datasets peft hf_transfer \
45
+ ray codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb liger-kernel \
46
+ pytest yapf py-spy pyext
47
+
48
+ # Install flash_attn-2.7.4.post1
49
+ RUN pip uninstall -y transformer-engine flash-attn && \
50
+ wget -v https://ghfast.top/https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \
51
+ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
52
+
53
+ # Fix cv2
54
+ RUN pip uninstall -y pynvml nvidia-ml-py && \
55
+ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6
docker/Dockerfile.vemlp.vllm.te ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # docker buildx build --platform linux/x86_64 -t "verlai/verl:$TAG" -f docker/$FILE .
2
+
3
+ # the one in docker.io is an alias for the one veturbo
4
+ # FROM vemlp-cn-beijing.cr.volces.com/veturbo/pytorch:2.4-cu124
5
+ FROM docker.io/haibinlin/verl:v0.0.5-th2.4.0-cu124-base
6
+
7
+ # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed
8
+ # unset for now
9
+ RUN pip3 config unset global.index-url
10
+
11
+ # transformers 4.47.0 contains the following bug:
12
+ # AttributeError: 'Gemma2Attention' object has no attribute '_flash_attn_uses_top_left_mask'
13
+ RUN pip3 install --no-cache-dir \
14
+ torch==2.4.0 \
15
+ accelerate \
16
+ codetiming \
17
+ dill \
18
+ hydra-core \
19
+ numpy \
20
+ pybind11 \
21
+ tensordict \
22
+ "transformers <= 4.46.0"
23
+
24
+ RUN pip3 install --no-cache-dir flash-attn==2.7.0.post2 --no-build-isolation
25
+
26
+ # vllm depends on ray, and veRL does not support ray > 2.37
27
+ RUN pip3 install --no-cache-dir vllm==0.6.3 ray==2.10
28
+
29
+ # install apex
30
+ RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
31
+ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
32
+ git+https://github.com/NVIDIA/apex
33
+
34
+ # install Transformer Engine
35
+ # - flash-attn pinned to 2.5.3 by TransformerEngine, switch to eric-haibin-lin/[email protected] to relax version req
36
+ # - install with: MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 to avoid OOM
37
+ # - cudnn is required by TransformerEngine
38
+ # RUN CUDNN_PATH=/opt/conda/lib/python3.11/site-packages/nvidia/cudnn \
39
+ # pip3 install git+https://github.com/eric-haibin-lin/[email protected]
40
+ RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install flash-attn==2.5.3 --no-cache-dir --no-build-isolation
41
+ RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install git+https://github.com/NVIDIA/[email protected]
docs/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line.
5
+ SPHINXOPTS =
6
+ SPHINXBUILD = sphinx-build
7
+ SPHINXPROJ = verl
8
+ SOURCEDIR = .
9
+ BUILDDIR = _build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
docs/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # verl documents
2
+
3
+ ## Build the docs
4
+
5
+ ```bash
6
+ # Install dependencies.
7
+ pip install -r requirements-docs.txt
8
+
9
+ # Build the docs.
10
+ make clean
11
+ make html
12
+ ```
13
+
14
+ ## Open the docs with your browser
15
+
16
+ ```bash
17
+ python -m http.server -d _build/html/
18
+ ```
19
+ Launch your browser and navigate to http://localhost:8000 to view the documentation.
docs/README_vllm0.7.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Upgrading to vllm >= 0.7
2
+
3
+ ## Installation
4
+
5
+ Note: This version of veRL+vllm 0.7+ supports **FSDP** for training and **vLLM** for rollout.
6
+
7
+ ```
8
+ # Create the conda environment
9
+ conda create -n verl python==3.10
10
+ conda activate verl
11
+
12
+ # Install verl
13
+ git clone https://github.com/volcengine/verl.git
14
+ cd verl
15
+ pip3 install -e .
16
+
17
+ # Install the latest stable version of vLLM
18
+ pip3 install vllm==0.7.3
19
+
20
+ # Install flash-attn
21
+ pip3 install flash-attn --no-build-isolation
22
+
23
+ ```
24
+
25
+ Note that if you are installing lower versions of vLLM (0.7.0, 0.7.1, 0.7.2), you need to make some tiny patches manually on vllm (/path/to/site-packages/vllm after installation) after the above steps:
26
+
27
+ - vllm/distributed/parallel_state.py: Remove the assertion below:
28
+
29
+ ```
30
+ if (world_size
31
+ != tensor_model_parallel_size * pipeline_model_parallel_size):
32
+ raise RuntimeError(
33
+ f"world_size ({world_size}) is not equal to "
34
+ f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
35
+ f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
36
+
37
+ ```
38
+
39
+ - vllm/executor/uniproc_executor.py: change `local_rank = rank` to `local_rank = int(os.environ["LOCAL_RANK"])`
40
+ - vllm/model_executor/model_loader/weight_utils.py: remove the `torch.cuda.empty_cache()` in `pt_weights_iterator`
41
+
42
+ ## Features
43
+
44
+ ### Use cuda graph
45
+
46
+ After installation, examples using FSDP as training backends can be used. By default, the `enforce_eager` is set to True, which disables the cuda graph. To enjoy cuda graphs and the sleep mode of vLLM>=0.7, add the following lines to the bash script:
47
+
48
+ ```
49
+ actor_rollout_ref.rollout.enforce_eager=False \
50
+ actor_rollout_ref.rollout.free_cache_engine=False \
51
+
52
+ ```
53
+
54
+ For a typical job like examples/ppo_trainer/run_qwen2-7b_seq_balance.sh, the rollout generation time is 115 seconds with vLLM0.6.3, while it is 85 seconds with vLLM0.7.0. By enabling the cudagraph, the generation duration is further reduced to 62 seconds.
55
+
56
+ **Note:** Currently, if the `n` is greater than 1 in `SamplingParams` in vLLM>=0.7, there is a potential performance issue on the stability of rollout generation time (Some iterations would see generation time bursts) using vLLM's V0 Engine.
57
+
58
+ ### Use vLLM V1 Engine
59
+
60
+ Using the vLLM V1 engine can avoid instability issues and achieve additional performance improvements. To use the V1 engine, you can first uninstall the previously installed vLLM and then follow the steps below to install the newer version.
61
+
62
+ ```
63
+ git clone https://github.com/vllm-project/vllm.git
64
+ cd vllm
65
+ git checkout 2275784
66
+ sed -i "903a\ data_parallel_size = world_size // pipeline_model_parallel_size // tensor_model_parallel_size" ./vllm/distributed/parallel_state.py
67
+ VLLM_USE_PRECOMPILED=1 pip install --editable .
68
+ ```
69
+
70
+ Then you can enable the V1 engine by setting `export VLLM_USE_V1=1`. In some benchmark tests, the V1 engine demonstrates a 1.5x speed improvement over the vLLM V0 engine.
71
+ The stable support of the vLLM V1 engine will come soon.
docs/README_vllm0.8.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Upgrading to vLLM >= 0.8
2
+
3
+ ## Installation
4
+
5
+ Note: This version of veRL+vLLM 0.8+ supports **FSDP** for training and **vLLM** for rollout.
6
+
7
+ ```bash
8
+ # Create the conda environment
9
+ conda create -n verl python==3.10
10
+ conda activate verl
11
+
12
+ # Install verl
13
+ git clone https://github.com/volcengine/verl.git
14
+ cd verl
15
+ pip3 install -e .
16
+
17
+ # Install the latest stable version of vLLM
18
+ pip3 install vllm==0.8.2
19
+
20
+ # Install flash-attn
21
+ pip3 install flash-attn --no-build-isolation
22
+
23
+ ```
24
+
25
+ We have a pre-built docker image for veRL+vLLM 0.8.2. You can direct import it with the following command:
26
+
27
+ ```bash
28
+ docker pull hiyouga/verl:ngc-th2.6.0-cu120-vllm0.8.2
29
+ ```
30
+
31
+ ## Features
32
+
33
+ vLLM 0.8+ supports cuda graph and V1 engine by default in veRL. To enable these features, remember to add the following lines to the bash script:
34
+
35
+ ```bash
36
+ actor_rollout_ref.rollout.enforce_eager=False \
37
+ actor_rollout_ref.rollout.free_cache_engine=False \
38
+ ```
39
+
40
+ and also **remove** the environment variable if it exists:
41
+
42
+ ```bash
43
+ export VLLM_ATTENTION_BACKEND=XFORMERS
44
+ ```
45
+
46
+ ## Notes
47
+
48
+ When you just directly upgrade vllm>=0.8, some dependency packages may undergo version changes. If you encounter the following problems:
49
+
50
+ ```bash
51
+ in <module> from torch.multiprocessing.reductions import ForkingPickler ImportError: cannot import name 'ForkingPickler' from 'torch.multiprocessing.reductions' (/opt/conda/lib/python3.11/site-packages/torch/multiprocessing/reductions.py)
52
+ ```
53
+
54
+ You need to upgrade `tensordict` to version 0.6.2 using the command `pip install tensordict==0.6.2`.
docs/_static/logo.png ADDED

Git LFS Details

  • SHA256: fd27c16b2122527e513ea8884e0ad175f59c73af2ca1e10b1acaab38196a8638
  • Pointer size: 130 Bytes
  • Size of remote file: 84.7 kB
docs/advance/checkpoint.rst ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Using Checkpoints to Support Fault Tolerance Training
2
+ =====================================================
3
+
4
+ There could be training errors or machine failure during the whole RLHF training process,
5
+ so it is recommended to enable checkpoints to minimize your loss.
6
+
7
+ The API Interface has already been listed in :ref:`config-explain-page`,
8
+ and we will not repeat them. But there are still some technique details
9
+ we hope to clarify.
10
+
11
+ .. note::
12
+
13
+ Notice that the ``checkpoint.contents`` field has no effect to FSDP checkpoint except ``hf_model``,
14
+ the other 3 fields are binded together to save and load. We recommend to include ``model``, ``optimizer`` and ``extra`` all.
15
+
16
+ Checkpoint Saving Directory Structure
17
+ -------------------------------------
18
+
19
+ Commonly, we use the ``default_local_dir`` declared in ``ppo_trainer.yaml`` or ``ppo_megatron_trainer.yml``
20
+ to work as preffix when saving checkpoints, which is ``checkpoints/${trainer.project_name}/${trainer.experiment_name}``.
21
+
22
+ So the inner checkpoint structure of **FSDP** is like:
23
+
24
+ .. code::
25
+
26
+ checkpoints/${trainer.project_name}/${trainer.experiment_name}
27
+ ├── global_steps_${i}
28
+ │ ├── actor
29
+ │ │ ├── model_world_size_{self.world_size}_rank_{self.rank}.pt
30
+ │ │ ├── optim_world_size_{self.world_size}_rank_{self.rank}.pt
31
+ │ │ └── extra_state_world_size_{self.world_size}_rank_{self.rank}.pt
32
+ │ ├── actor_huggingface
33
+ │ ├── critic
34
+ │ │ ├── model_world_size_{self.world_size}_rank_{self.rank}.pt
35
+ │ │ ├── optim_world_size_{self.world_size}_rank_{self.rank}.pt
36
+ │ │ └── extra_state_world_size_{self.world_size}_rank_{self.rank}.pt
37
+ │ └── critic_huggingface
38
+ └── latest_checkpointed_iteration.txt
39
+
40
+ All model shards, optimizers and extra states are stored togather, in a sharded and distributed way.
41
+
42
+ While **Megatron** current checkpoint structure is:
43
+
44
+ .. code::
45
+
46
+ checkpoints/${trainer.project_name}/${trainer.experiment_name}
47
+ ├── global_steps_${i}
48
+ │ ├── actor
49
+ │ │ ├── huggingface # default save tokenizer, save huggingface model if include ``hf_mode`` in checkpoint.contents
50
+ │ │ ├── model # save sharded model, naming the same as Megatron
51
+ │ │ │ ├── mp_rank_xx_yyy # xx is tp_rank in 2 digits, yyy is pp_rank in 3 digits
52
+ │ │ │ │ └── model_states.pt
53
+ │ │ │ └── mp_rank_xx_xxx
54
+ │ │ ├── optim
55
+ │ │ │ ├── distrib_optim_pp{x}_tp{y}.pt
56
+ │ │ │ └── distrib_optim_pp{x}_tp{y}.pt
57
+ │ │ └── rng_states
58
+ │ └── critic
59
+ │ │ ├── huggingface
60
+ │ │ ├── model
61
+ │ │ ├── optim
62
+ │ │ └── rng_states
63
+ └── latest_checkpointed_iteration.txt
64
+
65
+ Convert FSDP and Megatron Checkpoints to HuggingFace Format Model
66
+ -----------------------------------------------------------------
67
+
68
+ We provide a tool to convert the FSDP and Megatron checkpoints to HuggingFace format model.
69
+ The tool is located in ``scripts/model_merger.py``.
70
+
71
+ The arguments are as follows:
72
+
73
+ .. code:: bash
74
+
75
+ usage: model_merger.py [-h] [--backend {fsdp,megatron}]
76
+ [--tie-word-embedding whether the model share embedding weights]
77
+ [--is-value-model whether the model is critic model]
78
+ [--hf_model_path $original_model_path, like {Qwen/Qwen2-7B}]
79
+ [--local_dir $local_directory saved fsdp or megatron models]
80
+ [--target_dir $target_dir to save converted models, default is tmp]
81
+ [--hf_upload_path $huggingface_repo to upload]
82
+
83
+ So example use of Megatron model merger is:
84
+
85
+ .. code:: bash
86
+
87
+ python3 scripts/model_merger.py --backend megatron \
88
+ --is-value-model \
89
+ --hf_model_path Qwen/Qwen2-7B \
90
+ --local_dir checkpoints/verl_megatron_gsm8k_examples/deepseek_megatron_checkpoint_saveload/global_step_1/actor/model
91
+
92
+ Megatron Merger details
93
+ -----------------------
94
+
95
+ Current implement of decoder layers uses ``nn.ModuleList`` to store the layers,
96
+ and thus the model layers on every PP rank and VPP rank starts their index from 0.
97
+
98
+ There are 3 ways to correct this behavior:
99
+
100
+ 1. Modify the decoder layer's state_dict, add ``offset`` to each layer's index, thus rewrite ``nn.ModuleList`` implementation.
101
+ 2. Modify the layer index when saving checkpoint and recover them when loading checkpoint.
102
+ 3. The Checkpoint merger do this work, calculate the actual ``offset`` from ``state_dict`` only, a little complex.
103
+
104
+ Current implementation use solution 2.
105
+
106
+ Original Checkpoint Utils
107
+ -------------------------
108
+
109
+ Original Checkpoint Utils refer to original checkpoint implementation in ``verl/models/[model]/megatron/checkpoint_utils``.
110
+
111
+ We only need ``[model]_loader.py`` in original checkpoint utils now, since we get rid of storing ``hf_model`` every time (which is not recommended for large model training, try only saving sharded models if you can).
112
+
113
+ .. note::
114
+
115
+ Note that ``[model]_loader`` only support environments where **storage clusters are able to connect with every calculation nodes**.
116
+ Because it utilizes **sharded load way to minimize the loading checkpoint overhead**.
117
+ Every rank loads its own data from ``state_dict`` which can be accessed by all of them.
118
+ While there is also no need to broadcast among DP ranks, since the saved state_dict is only produced by DP rank 0.
119
+
120
+ For users who can **only place the huggingface model on one device**, we keep the original costly implementation in ``[model]_loader_deprecated``. In this implementation, rank 0 broadcast all weights to each tp and pp rank, and then dp rank 0 broadcast to all dp ranks. There may be at risks of OOM.
121
+
122
+ To use deprecated loader, change the import package of ``load_state_dict_to_megatron_llama``.
docs/advance/dpo_extension.rst ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Extend to other RL(HF) algorithms
2
+ =================================
3
+
4
+ We already implemented the complete training pipeline of the PPO
5
+ algorithms. To extend to other algorithms, we analyze the high-level
6
+ principle to use verl and provide a tutorial to implement the DPO
7
+ algorithm. Users can follow the similar paradigm to extend to other RL algorithms.
8
+
9
+ .. note:: **Key ideas**: Single process drives multi-process computation and data communication.
10
+
11
+ Overall Approach
12
+ ----------------
13
+
14
+ Step 1: Consider what multi-machine multi-GPU computations are needed
15
+ for each model, such as ``generate_sequence`` , ``compute_log_prob`` and
16
+ ``update_policy`` in the actor_rollout model. Implement distributed
17
+ single-process-multiple-data (SPMD) computation and encapsulate them
18
+ into APIs
19
+
20
+ Step 2: Based on different distributed scenarios, including FSDP and 3D
21
+ parallelism in Megatron-LM, implement single-process control of data
22
+ interaction among multi-process computations.
23
+
24
+ Step 3: Utilize the encapsulated APIs to implement the control flow
25
+
26
+ Example: Online DPO
27
+ -------------------
28
+
29
+ We use verl to implement a simple online DPO algorithm. The algorithm
30
+ flow of Online DPO is as follows:
31
+
32
+ 1. There is a prompt (rollout) generator which has the same weight as
33
+ the actor model. After a batch of prompts are fed into the generator,
34
+ it generates N responses for each prompt.
35
+ 2. Send all the prompts + responses to a verifier for scoring, which can
36
+ be reward model or a rule-based function. Then sort them in pairs to
37
+ form a training batch.
38
+ 3. Use this training batch to train the actor model using DPO. During
39
+ the process, a reference policy is needed.
40
+
41
+ Step 1: What are the multi-machine multi-GPU computations
42
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43
+
44
+ **Sample Generator**
45
+
46
+ Implementation details:
47
+
48
+ .. code:: python
49
+
50
+ from verl.single_controller.base import Worker
51
+ from verl.single_controller.ray import RayWorkerGroup, RayClassWithInitArgs, RayResourcePool
52
+ import ray
53
+
54
+ @ray.remote
55
+ class SampleGenerator(Worker):
56
+ def __init__(self, config):
57
+ super().__init__()
58
+ self.config = config
59
+
60
+ def generate_sequences(self, data):
61
+ pass
62
+
63
+ Here, ``SampleGenerator`` can be viewed as a multi-process pulled up by
64
+ ``torchrun``, with each process running the same code (SPMD).
65
+ ``SampleGenerator`` needs to implement a ``generate_sequences`` API for
66
+ the control flow to call. The implementation details inside can use any
67
+ inference engine including vllm, sglang and huggingface. Users can
68
+ largely reuse the code in
69
+ verl/verl/workers/rollout/vllm_rollout/vllm_rollout.py and we won't
70
+ go into details here.
71
+
72
+ **ReferencePolicy inference**
73
+
74
+ API: compute reference log probability
75
+
76
+ .. code:: python
77
+
78
+ from verl.single_controller.base import Worker
79
+ import ray
80
+
81
+ @ray.remote
82
+ class ReferencePolicy(Worker):
83
+ def __init__(self):
84
+ super().__init__()
85
+ self.model = Model()
86
+
87
+ def infer(self, data):
88
+ return self.model(data)
89
+
90
+ **Actor update**
91
+
92
+ API: Update actor model parameters
93
+
94
+ .. code:: python
95
+
96
+ from verl.single_controller.base import Worker
97
+ import ray
98
+
99
+ @ray.remote
100
+ class DPOActor(Worker):
101
+ def __init__(self):
102
+ super().__init__()
103
+ self.model = Model()
104
+ self.model = FSDP(self.model) # or other distributed strategy
105
+ self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
106
+ self.loss_fn = xxx
107
+
108
+ def update(self, data):
109
+ self.optimizer.zero_grad()
110
+ logits = self.model(data)
111
+ loss = self.loss_fn(logits)
112
+ loss.backward()
113
+ self.optimizer.step()
114
+
115
+ **Notes: How to distinguish between control processes and distributed computation processes**
116
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
117
+
118
+ - Control processes are generally functions directly decorated with
119
+ ``@ray.remote``
120
+ - Computation processes are all wrapped into a ``RayWorkerGroup``.
121
+
122
+ Users can reuse most of the distribtued computation logics implemented
123
+ in PPO algorithm, including FSDP and Megatron-LM backend in
124
+ verl/verl/trainer/ppo.
125
+
126
+ Step 2: Based on different distributed scenarios, implement single-process control of multi-process data interaction
127
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
128
+
129
+ **The core problem to solve here is how a single process sends data to
130
+ multiple processes, drives multi-process computation, and how the
131
+ control process obtains the results of multi-process computation.**
132
+ First, we initialize the multi-process ``WorkerGroup`` in the control
133
+ process.
134
+
135
+ .. code:: python
136
+
137
+ @ray.remote(num_cpus=1)
138
+ def main_task(config):
139
+ # construct SampleGenerator
140
+ resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs
141
+ ray_cls = RayClassWithInitArgs(SampleGenerator, config=config)
142
+ # put SampleGenerator onto resource pool
143
+ worker_group = RayWorkerGroup(resource_pool, ray_cls)
144
+
145
+ # construct reference policy
146
+
147
+ As we can see, in the control process, multiple processes are wrapped
148
+ into a ``RayWorkerGroup``. Inside this ``WorkerGroup``, there is a
149
+ ``self._workers`` member, where each worker is a RayActor
150
+ (https://docs.ray.io/en/latest/ray-core/actors.html) of SampleGenerator.
151
+ ray_trainer.md also provide an implementation of
152
+ ``MegatronRayWorkerGroup``.
153
+
154
+ Assuming the model is distributed using FSDP, and there is a batch of
155
+ data on the control process, for data parallelism, the underlying
156
+ calling process is:
157
+
158
+ .. code:: python
159
+
160
+ data = xxx
161
+ data_list = data.chunk(dp_size)
162
+
163
+ output = []
164
+ for d in data_list:
165
+ # worker_group._workers[i] is a SampleGenerator
166
+ output.append(worker_group._workers[i].generate_sequences.remote(d))
167
+
168
+ output = ray.get(output)
169
+ output = torch.cat(output)
170
+
171
+ Single process calling multiple processes involves the following 3
172
+ steps:
173
+
174
+ 1. Split the data into DP parts on the control process.
175
+ 2. Send the data to remote, call the remote computation through RPC, and
176
+ utilize multi-process computation.
177
+ 3. Obtain the computation results of each worker on the control process
178
+ and merge them.
179
+
180
+ Frequently calling these 3 steps on the controller process greatly hurts
181
+ code readability. **In verl, we have abstracted and encapsulated these 3
182
+ steps, so that the worker's method + dispatch + collect can be
183
+ registered into the worker_group**
184
+
185
+ .. code:: python
186
+
187
+ from verl.single_controller.base.decorator import register
188
+
189
+ def dispatch_data(worker_group, data):
190
+ return data.chunk(worker_group.world_size)
191
+
192
+ def collect_data(worker_group, data):
193
+ return torch.cat(data)
194
+
195
+ dispatch_mode = {
196
+ 'dispatch_fn': dispatch_data,
197
+ 'collect_fn': collect_data
198
+ }
199
+
200
+ @register(dispatch_mode=dispatch_mode)
201
+ def generate_sequences(self, data):
202
+ pass
203
+
204
+ In this way, we can directly call the method inside the worker through
205
+ the ``worker_group`` on the control (driver) process (which is a single
206
+ process):
207
+
208
+ .. code:: python
209
+
210
+ output = worker_group.generate_sequences(data)
211
+
212
+ This single line includes data splitting, data distribution and
213
+ computation, and data collection.
214
+
215
+ Furthermore, the model parallelism size of each model is usually fixed,
216
+ including dp, tp, pp. So for these common distributed scenarios, we have
217
+ pre-implemented specific dispatch and collect methods,in `decorator.py <https://github.com/volcengine/verl/blob/main/verl/single_controller/base/decorator.py>`_, which can be directly used to wrap the computations.
218
+
219
+ .. code:: python
220
+
221
+ from verl.single_controller.base.decorator import register, Dispatch
222
+
223
+ @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
224
+ def generate_sequences(self, data: DataProto) -> DataProto:
225
+ pass
226
+
227
+ Here it requires the data interface to be ``DataProto``. Definition of
228
+ ``DataProto`` is in `protocol.py <https://github.com/volcengine/verl/blob/main/verl/protocol.py>`_.
229
+
230
+ Step 3: Main training loop
231
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
232
+
233
+ With the above training flows, we can implement the algorithm's control
234
+ flow. It is recommended that ``main_task`` is also a ray remote process.
235
+
236
+ .. code:: python
237
+
238
+ @ray.remote(num_cpus=1)
239
+ def main_task(config):
240
+ # construct SampleGenerator
241
+ resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs
242
+ ray_cls = RayClassWithInitArgs(SampleGenerator, config=config)
243
+ # put SampleGenerator onto resource pool
244
+ sample_gen = RayWorkerGroup(resource_pool, ray_cls)
245
+
246
+ # construct reference policy
247
+ ray_cls = RayClassWithInitArgs(ReferencePolicy)
248
+ ref_policy = RayWorkerGroup(resource_pool, ray_cls)
249
+
250
+ # construct actor
251
+ ray_cls = RayClassWithInitArgs(DPOActor)
252
+ dpo_policy = RayWorkerGroup(resource_pool, ray_cls)
253
+
254
+ dataloader = DataLoader()
255
+
256
+ for data in dataloader:
257
+ # generate data
258
+ data = sample_gen.generate_sequences(data)
259
+ # generate scores for each data
260
+ data = generate_scores(data)
261
+ # generate pairwise data using scores
262
+ data = generate_pairwise_data(data)
263
+ # generate ref_log_prob
264
+ data.batch['ref_log_prob'] = ref_policy.infer(data)
265
+ # update using dpo
266
+ dpo_policy.update(data)
267
+ # logging
268
+
269
+ Here, different ``WorkerGroups`` can be placed in the same resource pool or
270
+ in different resource pools using ``create_colocated_worker_cls``
271
+ similar as in `ray_trainer.py <https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/ray_trainer.py>`_.
docs/advance/fsdp_extension.rst ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Add models with the FSDP backend
3
+ ==================================
4
+
5
+ Model
6
+ --------------------------
7
+
8
+ In principle, our FSDP backend can support any HF model and we can
9
+ sychronoize the actor model weight with vLLM using `hf_weight_loader.py <https://github.com/volcengine/verl/blob/main/verl/third_party/vllm/vllm_v_0_6_3/hf_weight_loader.py>`_.
10
+ However, ``hf_weight_loader`` is will gather the full state_dict of a
11
+ model during synchronization, which may cause OOM. We suggest using
12
+ ``dtensor_weight_loader`` which gather the full model parameter layer by
13
+ layer to reduce the peak memory usage. We already support dtensor weight
14
+ loader for the models below in `dtensor_weight_loader.py <https://github.com/volcengine/verl/blob/main/verl/third_party/vllm/vllm_v_0_5_4/dtensor_weight_loaders.py>`_.:
15
+
16
+ - ``GPT2LMHeadModel``
17
+ - ``LlamaForCausalLM``
18
+ - ``LLaMAForCausalLM``
19
+ - ``MistralForCausalLM``
20
+ - ``InternLMForCausalLM``
21
+ - ``AquilaModel``
22
+ - ``AquilaForCausalLM``
23
+ - ``Phi3ForCausalLM``
24
+ - ``GemmaForCausalLM``
25
+ - ``Gemma2ForCausalLM``
26
+ - ``GPTBigCodeForCausalLM``
27
+ - ``Starcoder2ForCausalLM``
28
+ - ``Qwen2ForCausalLM``
29
+ - ``DeepseekV2ForCausalLM``
30
+
31
+ To implement ``dtensor_weight_loader`` of a model that's supported in
32
+ vLLM, follow the guide of gemma model below:
33
+
34
+ 1. Copy the
35
+ ``load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]])`` from the vllm model class
36
+ to ``dtensor_weight_loaders.py``
37
+ 2. Modify the arguments to
38
+ ``(actor_weights: Dict, vllm_model: nn.Module)``
39
+ 3. Replace the ``self`` to ``vllm_model``
40
+ 4. Add the
41
+ ``local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)``
42
+ before each ``param = params_dict[name]`` and modify the following
43
+ weight loading using ``local_loaded_weight``.
44
+ 5. Register the implemented dtensor weight loader to ``__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__``.
45
+
46
+ .. code-block:: diff
47
+
48
+ - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
49
+ + def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
50
+ stacked_params_mapping = [
51
+ # (param_name, shard_name, shard_id)
52
+ ("qkv_proj", "q_proj", "q"),
53
+ ("qkv_proj", "k_proj", "k"),
54
+ ("qkv_proj", "v_proj", "v"),
55
+ ("gate_up_proj", "gate_proj", 0),
56
+ ("gate_up_proj", "up_proj", 1),
57
+ ]
58
+ - params_dict = dict(self.named_parameters())
59
+ + params_dict = dict(vllm_model.named_parameters())
60
+ loaded_params = set()
61
+ - for name, loaded_weight in weights:
62
+ + for name, loaded_weight in actor_weights.items():
63
+ for (param_name, shard_name, shard_id) in stacked_params_mapping:
64
+ if shard_name not in name:
65
+ continue
66
+ name = name.replace(shard_name, param_name)
67
+ # Skip loading extra bias for GPTQ models.
68
+ if name.endswith(".bias") and name not in params_dict:
69
+ continue
70
+ + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
71
+ param = params_dict[name]
72
+ weight_loader = param.weight_loader
73
+ - weight_loader(param, loaded_weight, shard_id)
74
+ + weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
75
+ break
76
+ else:
77
+ # lm_head is not used in vllm as it is tied with embed_token.
78
+ # To prevent errors, skip loading lm_head.weight.
79
+ if "lm_head.weight" in name:
80
+ continue
81
+ # Skip loading extra bias for GPTQ models.
82
+ if name.endswith(".bias") and name not in params_dict:
83
+ continue
84
+ + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
85
+ param = params_dict[name]
86
+ weight_loader = getattr(param, "weight_loader",
87
+ default_weight_loader)
88
+ - weight_loader(param, loaded_weight)
89
+ + weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
90
+ loaded_params.add(name)
91
+ unloaded_params = params_dict.keys() - loaded_params
92
+ if unloaded_params:
93
+ raise RuntimeError(
94
+ "Some weights are not initialized from checkpoints: "
95
+ f"{unloaded_params}")
docs/advance/megatron_extension.rst ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Add models with the Megatron-LM backend
2
+ =========================================
3
+
4
+ Model
5
+ -----------
6
+
7
+ The most challenging aspect to use the Megatron-LM backend is implementing
8
+ the models for training. Currently, we implement Llama model that
9
+ support data parallelism, tensor parallelism, pipeline parallelism (also
10
+ vPP) and sequence parallelism. We also implement remove padding (sequence packing) on Llama
11
+ model, which can be found in `modeling_llama_megatron.py <https://github.com/volcengine/verl/blob/main/verl/models/llama/megatron/modeling_llama_megatron.py>`_.
12
+
13
+ To support other model, users are required to implement:
14
+
15
+ 1. Implemnt a model similar to ``modeling_llama_megatron.py`` that satisfy the
16
+ parallelism requirements of Megatron-LM. Then register your model in
17
+ the `registry.py <https://github.com/volcengine/verl/blob/main/verl/models/registry.py>`_.
18
+ 2. Checkpoint utils that can load full checkpoint (e.g. huggingface
19
+ checkpoint) to partitioned models during the runtime. Then register
20
+ your loader to ``weight_loader_registry`` in `weight_loader_registry.py <https://github.com/volcengine/verl/blob/main/verl/models/weight_loader_registry.py>`_.
21
+ 3. Weight loader that synchronize the weight from Megatron to rollout
22
+ (vLLM) model. Note that both the actor model and rollout model are
23
+ partitioned during runtime. So, it's advisable to map the model name
24
+ in actor model implementation. Otherwise, you may need an additional
25
+ name mapping and even weight transformation. The weight loader implementation
26
+ is in `megatron_weight_loaders.py <https://github.com/volcengine/verl/blob/main/verl/third_party/vllm/vllm_v_0_6_3/megatron_weight_loaders.py>`_.
docs/advance/placement.rst ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Ray API Design Tutorial
2
+ =======================================
3
+
4
+ We provide a tutorial for our Ray API design, including:
5
+
6
+ - Ray basic concepts
7
+ - Resource Pool and RayWorkerGroup
8
+ - Data Dispatch, Execution and Collection
9
+ - Initialize the RayWorkerGroup and execute the distributed computation in the given Resource Pool
10
+
11
+ See details in `tutorial.ipynb <https://github.com/volcengine/verl/blob/main/examples/ray/tutorial.ipynb>`_.
docs/amd_tutorial/amd_build_dockerfile_page.rst ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Getting started with AMD (ROCM Kernel)
2
+ =====================================================
3
+
4
+ Author: `Yusheng Su <https://yushengsu-thu.github.io/>`_
5
+
6
+ Setup
7
+ -----
8
+
9
+ If you run on AMD GPUs (MI300) with ROCM platform, you cannot use the previous quickstart to run VeRL. You should follow the following steps to build a docker and assign ``HIP_VISIBLE_DEVICES`` and ``ROCR_VISIBLE_DEVICES`` when starting RLHF training.
10
+
11
+
12
+
13
+ docker/Dockerfile.rocm
14
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15
+
16
+ .. code-block:: bash
17
+
18
+ # Build the docker in the repo dir:
19
+ # docker build -f docker/Dockerfile.rocm -t verl-rocm:03.04.2015 .
20
+ # docker images # you can find your built docker
21
+ FROM rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
22
+
23
+ # Set working directory
24
+ # WORKDIR $PWD/app
25
+
26
+ # Set environment variables
27
+ ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942"
28
+
29
+ # Install vllm
30
+ RUN pip uninstall -y vllm && \
31
+ rm -rf vllm && \
32
+ git clone -b v0.6.3 https://github.com/vllm-project/vllm.git && \
33
+ cd vllm && \
34
+ MAX_JOBS=$(nproc) python3 setup.py install && \
35
+ cd .. && \
36
+ rm -rf vllm
37
+
38
+ # Copy the entire project directory
39
+ COPY . .
40
+
41
+ # Install dependencies
42
+ RUN pip install "tensordict<0.6" --no-deps && \
43
+ pip install accelerate \
44
+ codetiming \
45
+ datasets \
46
+ dill \
47
+ hydra-core \
48
+ liger-kernel \
49
+ numpy \
50
+ pandas \
51
+ datasets \
52
+ peft \
53
+ "pyarrow>=15.0.0" \
54
+ pylatexenc \
55
+ "ray[data,train,tune,serve]" \
56
+ torchdata \
57
+ transformers \
58
+ wandb \
59
+ orjson \
60
+ pybind11 && \
61
+ pip install -e . --no-deps
62
+
63
+ Build the image:
64
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
65
+
66
+ .. code-block:: bash
67
+
68
+ docker build -t verl-rocm .
69
+
70
+ Run the container
71
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
72
+
73
+
74
+ Optional: Running without root and with user permissions
75
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
76
+
77
+ .. code-block:: bash
78
+
79
+ docker run --rm -it \
80
+ --device /dev/dri \
81
+ --device /dev/kfd \
82
+ -p 8265:8265 \
83
+ --group-add video \
84
+ --cap-add SYS_PTRACE \
85
+ --security-opt seccomp=unconfined \
86
+ --privileged \
87
+ -v $HOME/.ssh:/root/.ssh \
88
+ -v $HOME:$HOME \
89
+ --shm-size 128G \
90
+ -w $PWD \
91
+ verl-rocm \
92
+ /bin/bash
93
+
94
+ (Optional): If you do not want to root mode and require assign yuorself as the user
95
+ Please add ``-e HOST_UID=$(id -u)`` and ``-e HOST_GID=$(id -g)`` into the above docker launch script.
96
+
97
+ Example
98
+ -------
99
+
100
+ Due to to special setting in AMD (ROCM) torch, you need to assign ``HIP_VISIBLE_DEVICES`` and ``ROCR_VISIBLE_DEVICES`` when starting Ray in VeRL's RLHF training.
101
+
102
+ PPO
103
+ ~~~
104
+
105
+ .. code-block:: bash
106
+
107
+ YOUR_PROJECT_NAME=r1-verl-ppo-upstream
108
+ YOUR_RUN_NAME=r1-training_ppo-upstream
109
+ # export HYDRA_FULL_ERROR=1
110
+ export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
111
+ export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES
112
+ GPUS_PER_NODE=8
113
+ MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct
114
+ python3 examples/data_preprocess/gsm8k.py --local_dir data/gsm8k
115
+ python3 -c "import transformers; transformers.pipeline('text-generation', model='$MODEL_PATH')"
116
+ PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \
117
+ data.train_files=data/gsm8k/train.parquet \
118
+ data.val_files=data/gsm8k/test.parquet \
119
+ data.train_batch_size=256 \
120
+ data.val_batch_size=1312 \
121
+ data.max_prompt_length=512 \
122
+ data.max_response_length=256 \
123
+ actor_rollout_ref.model.path=$MODEL_PATH \
124
+ actor_rollout_ref.actor.optim.lr=1e-6 \
125
+ actor_rollout_ref.actor.ppo_mini_batch_size=64 \
126
+ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
127
+ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
128
+ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
129
+ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \
130
+ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
131
+ critic.optim.lr=1e-5 \
132
+ critic.model.path=$MODEL_PATH \
133
+ critic.ppo_micro_batch_size_per_gpu=4 \
134
+ algorithm.kl_ctrl.kl_coef=0.001 \
135
+ trainer.logger=['console'] \
136
+ trainer.project_name=$YOUR_PROJECT_NAME \
137
+ trainer.experiment_name=$YOUR_RUN_NAME \
138
+ trainer.val_before_train=False \
139
+ trainer.default_hdfs_dir=null \
140
+ trainer.n_gpus_per_node=$GPUS_PER_NODE \
141
+ trainer.nnodes=1 \
142
+ trainer.save_freq=10 \
143
+ trainer.test_freq=10 \
144
+ trainer.total_epochs=15 #2>&1 | tee verl_demo.log
145
+
146
+ GRPO
147
+ ~~~~
148
+
149
+ .. code-block:: bash
150
+
151
+ YOUR_PROJECT_NAME=r1-verl-grpo-upstream
152
+ YOUR_RUN_NAME=r1-training_grpo-upstream
153
+ # export HYDRA_FULL_ERROR=1
154
+ # export FSDP_VERBOSE=1
155
+ export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
156
+ export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES
157
+ GPUS_PER_NODE=8
158
+ MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct
159
+ # MODEL_PATH=Qwen/Qwen2-7B-Instruct
160
+ python3 examples/data_preprocess/gsm8k.py --local_dir data/gsm8k
161
+ python3 -c "import transformers; transformers.pipeline('text-generation', model='$MODEL_PATH')"
162
+ python3 -m verl.trainer.main_ppo \
163
+ algorithm.adv_estimator=grpo \
164
+ data.train_files=data/gsm8k/train.parquet \
165
+ data.val_files=data/gsm8k/test.parquet \
166
+ data.train_batch_size=1024 \
167
+ data.val_batch_size=1312 \
168
+ data.max_prompt_length=512 \
169
+ data.max_response_length=1024 \
170
+ actor_rollout_ref.model.path=$MODEL_PATH \
171
+ actor_rollout_ref.actor.optim.lr=1e-6 \
172
+ actor_rollout_ref.model.use_remove_padding=True \
173
+ actor_rollout_ref.actor.ppo_mini_batch_size=256 \
174
+ actor_rollout_ref.actor.use_dynamic_bsz=True \
175
+ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \
176
+ actor_rollout_ref.actor.use_kl_loss=True \
177
+ actor_rollout_ref.actor.kl_loss_coef=0.001 \
178
+ actor_rollout_ref.actor.kl_loss_type=low_var_kl \
179
+ actor_rollout_ref.model.enable_gradient_checkpointing=Flase \
180
+ actor_rollout_ref.actor.fsdp_config.param_offload=False \
181
+ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
182
+ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
183
+ actor_rollout_ref.rollout.name=vllm \
184
+ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \
185
+ actor_rollout_ref.rollout.n=5 \
186
+ actor_rollout_ref.ref.fsdp_config.param_offload=False \
187
+ algorithm.kl_ctrl.kl_coef=0.001 \
188
+ trainer.critic_warmup=0 \
189
+ trainer.logger=['console'] \
190
+ trainer.project_name=$YOUR_PROJECT_NAME \
191
+ trainer.experiment_name=$YOUR_RUN_NAME \
192
+ trainer.n_gpus_per_node=$GPUS_PER_NODE \
193
+ trainer.val_before_train=False \
194
+ trainer.nnodes=1 \
195
+ trainer.save_freq=-1 \
196
+ trainer.test_freq=10 \
197
+ trainer.total_epochs=15
198
+
199
+
200
+
201
+ Multi-node training: slurm with Docker/Podman container
202
+ ---------------------------------------------------------------------------------------
203
+
204
+ If you want to run multi-node training with slurm, you can use the following script.
205
+
206
+ .. note::
207
+ 1. You need to use ``podman`` or ``docker`` in the following script. We will release the apptainer script later.
208
+ 2. If you want to use ``podman``, you just replace ``docker`` with ``podman`` in the following script.
209
+
210
+ The script includes the following steps:
211
+
212
+ 1. SLURM Configuration
213
+ 2. Environment Setup
214
+ 3. Docker/Podman Container Setup
215
+ 4. Ray Cluster Initialization
216
+ 5. Data Preprocessing
217
+ 6. Model Setup
218
+ 7. Training Launch
219
+
220
+
221
+ slurm_script.sh
222
+ ~~~~~~~~~~~~~~~~~~~~
223
+
224
+ .. code-block:: bash
225
+
226
+ #!/bin/bash
227
+
228
+ #SBATCH --job-name=verl-ray-on-slurm
229
+ #SBATCH --nodes=2
230
+ #SBATCH --ntasks-per-node=2
231
+ #SBATCH --mem=200G
232
+ #SBATCH --time=30-00:00:00
233
+ #SBATCH --gpus-per-node=8
234
+ #SBATCH --cpus-per-task=28
235
+ #SBATCH --output=../verl_log/slurm-%j.out
236
+ #SBATCH --error=../verl_log/slurm-%j.err
237
+ #SBATCH --nodelist=gpu-[0,1]
238
+
239
+
240
+ # load necessary modules
241
+ ### Run this setup
242
+ # [Cluster]: Use docker
243
+ # docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
244
+
245
+
246
+ ##########################################################################
247
+ ###The following setting should be set in different project and cluster###
248
+ ##########################################################################
249
+
250
+ ### Project
251
+ CONTAINER_NAME="multinode_verl_training"
252
+ IMG="verl.rocm"
253
+ DOCKERFILE="docker/Dockerfile.rocm"
254
+ # echo $PWD
255
+ verl_workdir="${HOME}/projects/verl_upstream"
256
+ export TRANSFORMERS_CACHE="${HOME}/.cache/huggingface"
257
+ export HF_HOME=$TRANSFORMERS_CACHE
258
+
259
+ ### Cluster Network Setting
260
+ export NCCL_DEBUG=TRACE
261
+ export GPU_MAX_HW_QUEUES=2
262
+ export TORCH_NCCL_HIGH_PRIORITY=1
263
+ export NCCL_CHECKS_DISABLE=1
264
+ # export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7
265
+ export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9
266
+ export NCCL_IB_GID_INDEX=3
267
+ export NCCL_CROSS_NIC=0
268
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
269
+ export NCCL_PROTO=Simple
270
+ export RCCL_MSCCL_ENABLE=0
271
+ export TOKENIZERS_PARALLELISM=false
272
+ export HSA_NO_SCRATCH_RECLAIM=1
273
+ ##########################################################################
274
+
275
+ ### For rocm and training script
276
+ export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
277
+ export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES
278
+ export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES
279
+
280
+
281
+ # Build and launch the Docker container
282
+ srun bash -c "
283
+ # Exit on any error
284
+ set -e
285
+
286
+ # Clean up dangling images (images with <none> tag)
287
+ docker image prune -f
288
+
289
+ # Need to pull the docker first
290
+ docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4
291
+
292
+ if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "${IMG}"; then
293
+ echo \"Building ${IMG} image...\"
294
+ docker build -f \"${DOCKERFILE}\" -t \"${IMG}\" .
295
+ else
296
+ echo \"${IMG} image already exists, skipping build\"
297
+ fi
298
+
299
+ # Removing old container if exists
300
+ docker rm \"${CONTAINER_NAME}\" 2>/dev/null || true
301
+
302
+ # Checking network devices
303
+ ibdev2netdev
304
+
305
+ # Launch the docker
306
+ docker run --rm -d \
307
+ -e HYDRA_FULL_ERROR=1 \
308
+ -e HIP_VISIBLE_DEVICES=${HIP_VISIBLE_DEVICES} \
309
+ -e ROCR_VISIBLE_DEVICES=${ROCR_VISIBLE_DEVICES} \
310
+ -e CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} \
311
+ -e NCCL_DEBUG=${NCCL_DEBUG} \
312
+ -e GPU_MAX_HW_QUEUES=${GPU_MAX_HW_QUEUES} \
313
+ -e TORCH_NCCL_HIGH_PRIORITY=${TORCH_NCCL_HIGH_PRIORITY} \
314
+ -e NCCL_CHECKS_DISABLE=${NCCL_CHECKS_DISABLE} \
315
+ -e NCCL_IB_HCA=${NCCL_IB_HCA} \
316
+ -e NCCL_IB_GID_INDEX=${NCCL_IB_GID_INDEX} \
317
+ -e NCCL_CROSS_NIC=${NCCL_CROSS_NIC} \
318
+ -e CUDA_DEVICE_MAX_CONNECTIONS=${CUDA_DEVICE_MAX_CONNECTIONS} \
319
+ -e NCCL_PROTO=${NCCL_PROTO} \
320
+ -e RCCL_MSCCL_ENABLE=${RCCL_MSCCL_ENABLE} \
321
+ -e TOKENIZERS_PARALLELISM=${TOKENIZERS_PARALLELISM} \
322
+ -e HSA_NO_SCRATCH_RECLAIM=${HSA_NO_SCRATCH_RECLAIM} \
323
+ -e TRANSFORMERS_CACHE=${TRANSFORMERS_CACHE} \
324
+ -e HF_HOME=${HF_HOME} \
325
+ --network host \
326
+ --device /dev/dri \
327
+ --device /dev/kfd \
328
+ --device /dev/infiniband \
329
+ --group-add video \
330
+ --cap-add SYS_PTRACE \
331
+ --security-opt seccomp=unconfined \
332
+ --privileged \
333
+ -v \${HOME}:\${HOME} \
334
+ -v \${HOME}/.ssh:/root/.ssh \
335
+ -w "${verl_workdir}" \
336
+ --shm-size 128G \
337
+ --name \"${CONTAINER_NAME}\" \
338
+ \"${IMG}\" \
339
+ tail -f /dev/null
340
+
341
+ echo \"Container setup completed\"
342
+ "
343
+ # (Optional): If you do not want to root mode and require assign yuorself as the user
344
+ # Please add `-e HOST_UID=$(id -u)` and `-e HOST_GID=$(id -g)` into the above docker launch script.
345
+
346
+
347
+
348
+
349
+
350
+ ### Ray launch the nodes before training
351
+
352
+ # Getting the node names
353
+ nodes_array=($(scontrol show hostnames "$SLURM_JOB_NODELIST" | tr '\n' ' '))
354
+
355
+ head_node=${nodes_array[0]}
356
+ head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address)
357
+
358
+ # if we detect a space character in the head node IP, we'll
359
+ # convert it to an ipv4 address. This step is optional.
360
+ if [[ "$head_node_ip" == *" "* ]]; then
361
+ IFS=' ' read -ra ADDR <<<"$head_node_ip"
362
+ if [[ ${#ADDR[0]} -gt 16 ]]; then
363
+ head_node_ip=${ADDR[1]}
364
+ else
365
+ head_node_ip=${ADDR[0]}
366
+ fi
367
+ echo "IPV6 address detected. We split the IPV4 address as $head_node_ip"
368
+ fi
369
+
370
+ port=6379
371
+ ip_head=$head_node_ip:$port
372
+ export ip_head
373
+ echo "IP Head: $ip_head"
374
+
375
+ # make sure we set environment variables before Ray initialization
376
+ export VLLM_ATTENTION_BACKEND=XFORMERS
377
+
378
+ # Print out all env variables
379
+ printenv
380
+
381
+ echo "Starting HEAD at $head_node"
382
+ srun --nodes=1 --ntasks=1 -w "$head_node" \
383
+ docker exec "${CONTAINER_NAME}" \
384
+ ray start --head --node-ip-address="$head_node_ip" --port=$port \
385
+ --dashboard-port=8266 \
386
+ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block &
387
+ # optional, though may be useful in certain versions of Ray < 1.0.
388
+ sleep 10
389
+
390
+ # number of nodes other than the head node
391
+ worker_num=$((SLURM_JOB_NUM_NODES - 1))
392
+
393
+ for ((i = 1; i <= worker_num; i++)); do
394
+ node_i=${nodes_array[$i]}
395
+ echo "Debug: Starting worker on node_i = ${node_i}"
396
+ if [ -z "$node_i" ]; then
397
+ echo "Error: Empty node name for worker $i"
398
+ continue
399
+ fi
400
+ echo "Starting WORKER $i at $node_i"
401
+ srun --nodes=1 --ntasks=1 -w "$node_i" \
402
+ docker exec "${CONTAINER_NAME}" \
403
+ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block &
404
+ sleep 5
405
+ done
406
+
407
+
408
+
409
+
410
+ # Ray initlization test (See whether any error in the above excution)
411
+ echo "Testing Ray initialization in the slurm nodes..."
412
+ docker exec "${CONTAINER_NAME}" python3 -c '
413
+ import ray
414
+ try:
415
+ ray.init(address="auto")
416
+ print("\n=== Ray Cluster Status ===")
417
+ print(f"Number of nodes: {len(ray.nodes())}")
418
+ for node in ray.nodes():
419
+ print("Node: {}, Status: {}".format(node["NodeManagerHostname"], node["Alive"]))
420
+ # print(f"Node: {node}")
421
+ ray.shutdown()
422
+ print("Ray initialization successful!")
423
+ except Exception as e:
424
+ print(f"Ray initialization failed: {str(e)}")
425
+ '
426
+ echo "=== Ray test completed ==="
427
+ ######
428
+
429
+
430
+
431
+ # Run data preprocessing
432
+
433
+ echo "Starting data preprocessing..."
434
+ docker exec "${CONTAINER_NAME}" \
435
+ python3 "examples/data_preprocess/gsm8k.py" "--local_dir" "../data/gsm8k"
436
+
437
+ echo "Starting data preprocessing..."
438
+ docker exec "${CONTAINER_NAME}" \
439
+ python3 "examples/data_preprocess/math_dataset.py" "--local_dir" "../data/math"
440
+
441
+ train_files="../data/gsm8k/train.parquet"
442
+ val_files="../data/gsm8k/test.parquet"
443
+
444
+ # Download and test model
445
+ echo "Loading model..."
446
+ docker exec "${CONTAINER_NAME}" \
447
+ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')"
448
+ MODEL_PATH="Qwen/Qwen2-7B-Instruct"
449
+
450
+ # Set model path after pipeline test
451
+ MODEL_PATH="Qwen/Qwen2.5-0.5B-Instruct"
452
+
453
+ echo "== Data and model loading Done =="
454
+
455
+ echo "Start to train..."
456
+
457
+ docker exec "${CONTAINER_NAME}" \
458
+ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')"
459
+ MODEL_PATH="Qwen/Qwen2-7B-Instruct"
460
+
461
+
462
+ PYTHONUNBUFFERED=1 srun --overlap --nodes=${SLURM_NNODES} --ntasks=1 -w "$head_node" \
463
+ docker exec "${CONTAINER_NAME}" \
464
+ python3 -m verl.trainer.main_ppo \
465
+ data.train_files=$train_files \
466
+ data.val_files=$val_files \
467
+ data.train_batch_size=1024 \
468
+ data.max_prompt_length=1024 \
469
+ data.max_response_length=1024 \
470
+ actor_rollout_ref.model.path=$MODEL_PATH \
471
+ actor_rollout_ref.model.enable_gradient_checkpointing=False \
472
+ actor_rollout_ref.actor.optim.lr=1e-6 \
473
+ actor_rollout_ref.model.use_remove_padding=True \
474
+ actor_rollout_ref.actor.ppo_mini_batch_size=256 \
475
+ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
476
+ actor_rollout_ref.model.enable_gradient_checkpointing=True \
477
+ actor_rollout_ref.actor.fsdp_config.param_offload=False \
478
+ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
479
+ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
480
+ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
481
+ actor_rollout_ref.rollout.name=vllm \
482
+ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \
483
+ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
484
+ actor_rollout_ref.ref.fsdp_config.param_offload=True \
485
+ critic.optim.lr=1e-5 \
486
+ critic.model.use_remove_padding=True \
487
+ critic.model.path=$MODEL_PATH \
488
+ critic.model.enable_gradient_checkpointing=False \
489
+ critic.ppo_micro_batch_size_per_gpu=8 \
490
+ critic.model.fsdp_config.param_offload=False \
491
+ critic.model.fsdp_config.optimizer_offload=False \
492
+ algorithm.kl_ctrl.kl_coef=0.0001 \
493
+ trainer.critic_warmup=0 \
494
+ trainer.logger=['console','wandb'] \
495
+ trainer.project_name='verl_example' \
496
+ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \
497
+ trainer.n_gpus_per_node=${SLURM_GPUS_PER_NODE} \
498
+ trainer.val_before_train=False \
499
+ trainer.nnodes=${SLURM_NNODES} \
500
+ trainer.save_freq=-1 \
501
+ trainer.test_freq=10 \
502
+ trainer.total_epochs=15
503
+
504
+
505
+ Run slurm_script.sh
506
+ ~~~~~~~~~~~~~~~~~~~~
507
+ Just sbatch your slurm_script.sh
508
+
509
+ .. code-block:: bash
510
+
511
+ sbatch slurm_script.sh
512
+
docs/conf.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Configuration file for the Sphinx documentation builder.
16
+ #
17
+ # This file only contains a selection of the most common options. For a full
18
+ # list see the documentation:
19
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
20
+
21
+ # -- Path setup --------------------------------------------------------------
22
+
23
+ # If extensions (or modules to document with autodoc) are in another directory,
24
+ # add these directories to sys.path here. If the directory is relative to the
25
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
26
+ #
27
+ # import os
28
+ # import sys
29
+ # sys.path.insert(0, os.path.abspath('.'))
30
+
31
+
32
+ # -- Project information -----------------------------------------------------
33
+
34
+ project = u'verl'
35
+ # pylint: disable=W0622
36
+ copyright = u'2024 ByteDance Seed Foundation MLSys Team'
37
+ author = u'Guangming Sheng, Chi Zhang, Yanghua Peng, Haibin Lin'
38
+
39
+
40
+ # -- General configuration ---------------------------------------------------
41
+ # The master toctree document.
42
+ master_doc = 'index'
43
+
44
+ # Add any Sphinx extension module names here, as strings. They can be
45
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
46
+ # ones.
47
+ extensions = ['recommonmark',
48
+ 'sphinx.ext.autodoc',
49
+ 'sphinx.ext.autosummary',
50
+ 'sphinx.ext.autosectionlabel',
51
+ ]
52
+
53
+ # The suffix(es) of source filenames.
54
+ # You can specify multiple suffix as a list of string:
55
+ source_suffix = ['.rst', 'rest', '.md']
56
+
57
+ # Add any paths that contain templates here, relative to this directory.
58
+ templates_path = ['_templates']
59
+
60
+ # The language for content autogenerated by Sphinx. Refer to documentation
61
+ # for a list of supported languages.
62
+ #
63
+ # This is also used if you do content translation via gettext catalogs.
64
+ # Usually you set "language" from the command line for these cases.
65
+ language = u'en'
66
+
67
+ # List of patterns, relative to source directory, that match files and
68
+ # directories to ignore when looking for source files.
69
+ # This pattern also affects html_static_path and html_extra_path.
70
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
71
+
72
+
73
+ # -- Options for HTML output -------------------------------------------------
74
+
75
+ # The theme to use for HTML and HTML Help pages. See the documentation for
76
+ # a list of builtin themes.
77
+ #
78
+ html_theme = 'sphinx_rtd_theme'
79
+
80
+ # Add any paths that contain custom static files (such as style sheets) here,
81
+ # relative to this directory. They are copied after the builtin static files,
82
+ # so a file named "default.css" will overwrite the builtin "default.css".
83
+ html_static_path = ['_static']
docs/data.rst ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Data interface
2
+ =========================
3
+
4
+ DataProto is the interface for data exchange.
5
+
6
+ The :class:`verl.DataProto` class contains two key members:
7
+
8
+ - batch: a :class:`tensordict.TensorDict` object for the actual data
9
+ - meta_info: a :class:`Dict` with additional meta information
10
+
11
+ TensorDict
12
+ ~~~~~~~~~~~~
13
+
14
+ :attr:`DataProto.batch` is built on top of :class:`tensordict`, a project in the PyTorch ecosystem.
15
+ A TensorDict is a dict-like container for tensors. To instantiate a TensorDict, you must specify key-value pairs as well as the batch size.
16
+
17
+ .. code-block:: python
18
+
19
+ >>> import torch
20
+ >>> from tensordict import TensorDict
21
+ >>> tensordict = TensorDict({"zeros": torch.zeros(2, 3, 4), "ones": torch.ones(2, 3, 5)}, batch_size=[2,])
22
+ >>> tensordict["twos"] = 2 * torch.ones(2, 5, 6)
23
+ >>> zeros = tensordict["zeros"]
24
+ >>> tensordict
25
+ TensorDict(
26
+ fields={
27
+ ones: Tensor(shape=torch.Size([2, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False),
28
+ twos: Tensor(shape=torch.Size([2, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False),
29
+ zeros: Tensor(shape=torch.Size([2, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)},
30
+ batch_size=torch.Size([2]),
31
+ device=None,
32
+ is_shared=False)
33
+
34
+ One can also index a tensordict along its batch_size. The contents of the TensorDict can be manipulated collectively as well.
35
+
36
+ .. code-block:: python
37
+
38
+ >>> tensordict[..., :1]
39
+ TensorDict(
40
+ fields={
41
+ ones: Tensor(shape=torch.Size([1, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False),
42
+ twos: Tensor(shape=torch.Size([1, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False),
43
+ zeros: Tensor(shape=torch.Size([1, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)},
44
+ batch_size=torch.Size([1]),
45
+ device=None,
46
+ is_shared=False)
47
+ >>> tensordict = tensordict.to("cuda:0")
48
+ >>> tensordict = tensordict.reshape(6)
49
+
50
+ For more about :class:`tensordict.TensorDict` usage, see the official tensordict_ documentation.
51
+
52
+ .. _tensordict: https://pytorch.org/tensordict/overview.html
53
+
54
+
55
+ Core APIs
56
+ ~~~~~~~~~~~~~~~~~
57
+
58
+ .. autoclass:: verl.DataProto
59
+ :members: to, select, union, make_iterator, concat