{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 14, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 157838.923, "pct_cuda_time": 99.6717169873768, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 202.942, "pct_cuda_time": 0.12815329200423028, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 202.942, "pct_cuda_time": 0.12815329200423028, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 157558.702, "pct_cuda_time": 99.4947637512861, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 4930.366000000002, "pct_cuda_time": 3.1134148361883156, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 110.686, "pct_cuda_time": 0.06989571049255568, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4819.680000000001, "pct_cuda_time": 3.0435191256957594, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 34836.658, "pct_cuda_time": 21.998563161521545, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 16156.140000000003, "pct_cuda_time": 10.202237718566023, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 35.13300000000002, "pct_cuda_time": 0.022185696445214027, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 16121.007000000001, "pct_cuda_time": 10.180052022120806, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 3098.0089999999996, "pct_cuda_time": 1.956322752356503, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 3098.0089999999996, "pct_cuda_time": 1.956322752356503, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 5188.856, "pct_cuda_time": 3.2766454363113713, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 1252.43, "pct_cuda_time": 0.7908812739839862, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 3884.0750000000003, "pct_cuda_time": 2.452705687542898, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 52.351, "pct_cuda_time": 0.033058474784487486, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 10393.653, "pct_cuda_time": 6.5633572542876495, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 37.184999999999995, "pct_cuda_time": 0.023481488125559533, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 10356.468, "pct_cuda_time": 6.53987576616209, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 117791.67799999999, "pct_cuda_time": 74.38278575357623, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 74167.703, "pct_cuda_time": 46.8352302620553, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 37.086000000000006, "pct_cuda_time": 0.023418971860279712, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 74130.61700000001, "pct_cuda_time": 46.81181129019503, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 9826.049, "pct_cuda_time": 6.204928140773596, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 9826.049, "pct_cuda_time": 6.204928140773596, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 33797.926, "pct_cuda_time": 21.342627350747343, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 35.202000000000005, "pct_cuda_time": 0.022229268387681776, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 33762.72400000001, "pct_cuda_time": 21.320398082359667, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 77.279, "pct_cuda_time": 0.048799944086462696, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 77.279, "pct_cuda_time": 0.048799944086462696, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 366.139, "pct_cuda_time": 0.2312085136696045, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 11.328, "pct_cuda_time": 0.007153376293837258, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 1.376, "pct_cuda_time": 0.0008689129396468984, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 353.435, "pct_cuda_time": 0.22318622443612035, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 153.726, "pct_cuda_time": 0.09707449895360402, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 17.183, "pct_cuda_time": 0.01085067662932606, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 7.904, "pct_cuda_time": 0.004991197583553115, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 10.816, "pct_cuda_time": 0.006830059851177946, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 38.847, "pct_cuda_time": 0.02453100360934816, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 31.392, "pct_cuda_time": 0.01982333939054901, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 2.08, "pct_cuda_time": 0.0013134730483034512, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 11.456, "pct_cuda_time": 0.007234205404502085, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 30.72, "pct_cuda_time": 0.019398986559558667, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 3.328, "pct_cuda_time": 0.002101556877285522, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 99702.835, "cuda_time_us": 157838.923, "pct_cuda_time": 99.6717169873768, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 1106.181, "cuda_time_us": 202.942, "pct_cuda_time": 0.12815329200423028, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 202.942, "pct_cuda_time": 0.12815329200423028, "trace": "index_select(bfloat16[128256, 4096], 0, int64[7168]) <- embedding(bfloat16[128256, 4096], int64[7168], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3993.68, "cuda_time_us": 4738.433, "pct_cuda_time": 2.992213479178686, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 264.7, "cuda_time_us": 110.686, "pct_cuda_time": 0.06989571049255568, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 110.686, "pct_cuda_time": 0.06989571049255568, "trace": "_C::rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2889.431, "cuda_time_us": 1043.185, "pct_cuda_time": 0.6587477797569402, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 378.797, "cuda_time_us": 478.585, "pct_cuda_time": 0.3022156244338015, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0004843431865618977, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 477.818, "pct_cuda_time": 0.30173128124723964, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 918.399, "cuda_time_us": 94.399, "pct_cuda_time": 0.05961083763788342, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.399, "pct_cuda_time": 0.05961083763788342, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1002.872, "cuda_time_us": 155.67700000000002, "pct_cuda_time": 0.09830651141381558, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.304, "pct_cuda_time": 0.024188111366449713, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 115.902, "pct_cuda_time": 0.0731894967521474, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.471, "pct_cuda_time": 0.0009289032952184504, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 346.972, "cuda_time_us": 314.524, "pct_cuda_time": 0.19861480627143976, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 313.788, "pct_cuda_time": 0.198150038885117, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 125.301, "cuda_time_us": 75.167, "pct_cuda_time": 0.04746626376049304, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.167, "pct_cuda_time": 0.04746626376049304, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 590.767, "cuda_time_us": 3509.395, "pct_cuda_time": 2.216103725168697, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 199.642, "cuda_time_us": 2181.508, "pct_cuda_time": 1.3775730589703679, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2180.772, "pct_cuda_time": 1.3771082915840451, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 137.088, "cuda_time_us": 300.829, "pct_cuda_time": 0.18996672290773026, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.829, "pct_cuda_time": 0.18996672290773026, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 181.064, "cuda_time_us": 1027.058, "pct_cuda_time": 0.648563943290599, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1026.322, "pct_cuda_time": 0.6480991759042762, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2653.1, "cuda_time_us": 4689.666000000001, "pct_cuda_time": 2.9614182194928147, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.75, "cuda_time_us": 77.535, "pct_cuda_time": 0.04896160230779235, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.535, "pct_cuda_time": 0.04896160230779235, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1909.754, "cuda_time_us": 1031.4740000000002, "pct_cuda_time": 0.6513525476085357, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 167.241, "cuda_time_us": 474.17, "pct_cuda_time": 0.29942765159329204, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 473.434, "pct_cuda_time": 0.2989628842069693, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 565.123, "cuda_time_us": 93.823, "pct_cuda_time": 0.05924710663989169, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 93.823, "pct_cuda_time": 0.05924710663989169, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 799.75, "cuda_time_us": 153.053, "pct_cuda_time": 0.0966495146451866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 37.855, "pct_cuda_time": 0.023904578001695743, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.534, "pct_cuda_time": 0.07169415820484809, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.001050778438642761, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 217.101, "cuda_time_us": 310.428, "pct_cuda_time": 0.19602827473016526, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.692, "pct_cuda_time": 0.19556350734384254, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.157, "cuda_time_us": 74.303, "pct_cuda_time": 0.046920667263505446, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.303, "pct_cuda_time": 0.046920667263505446, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 490.275, "cuda_time_us": 3506.3540000000003, "pct_cuda_time": 2.2141834023129805, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.059, "cuda_time_us": 2181.3160000000003, "pct_cuda_time": 1.3774518153043709, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2179.588, "pct_cuda_time": 1.3763606223103957, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.309, "cuda_time_us": 301.084, "pct_cuda_time": 0.19012774965163284, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 301.084, "pct_cuda_time": 0.19012774965163284, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 161.696, "cuda_time_us": 1023.954, "pct_cuda_time": 0.6466038373569769, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1023.218, "pct_cuda_time": 0.6461390699706542, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2600.171, "cuda_time_us": 4702.241, "pct_cuda_time": 2.969359048138206, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.328, "cuda_time_us": 77.855, "pct_cuda_time": 0.049163675084454427, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.855, "pct_cuda_time": 0.049163675084454427, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1843.023, "cuda_time_us": 1034.4820000000002, "pct_cuda_time": 0.6532520317091591, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 184.404, "cuda_time_us": 476.60200000000003, "pct_cuda_time": 0.3009634046959238, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.001070985716308968, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 474.906, "pct_cuda_time": 0.2998924189796148, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.322, "cuda_time_us": 94.367, "pct_cuda_time": 0.0595906303602172, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.367, "pct_cuda_time": 0.0595906303602172, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 763.203, "cuda_time_us": 152.95800000000003, "pct_cuda_time": 0.09658952428961506, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 37.759, "pct_cuda_time": 0.02384395616869712, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.471, "pct_cuda_time": 0.07165437512694275, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 226.462, "cuda_time_us": 310.555, "pct_cuda_time": 0.19610847236340304, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.00046413590889569066, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.82, "pct_cuda_time": 0.19564433645450735, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 94.363, "cuda_time_us": 74.142, "pct_cuda_time": 0.04681899939774734, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.142, "pct_cuda_time": 0.04681899939774734, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 499.411, "cuda_time_us": 3515.7619999999997, "pct_cuda_time": 2.2201243419468453, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 179.431, "cuda_time_us": 2189.2200000000003, "pct_cuda_time": 1.382443012887924, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2188.452, "pct_cuda_time": 1.3819580382239351, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.509, "cuda_time_us": 300.604, "pct_cuda_time": 0.18982464048663972, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.604, "pct_cuda_time": 0.18982464048663972, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 160.436, "cuda_time_us": 1025.9379999999999, "pct_cuda_time": 0.6478566885722817, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0009295347726455194, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1024.466, "pct_cuda_time": 0.6469271537996363, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2541.249, "cuda_time_us": 4698.149, "pct_cuda_time": 2.96677504250664, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.262, "cuda_time_us": 76.767, "pct_cuda_time": 0.04847662764380339, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 76.767, "pct_cuda_time": 0.04847662764380339, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1818.247, "cuda_time_us": 1033.0430000000001, "pct_cuda_time": 0.652343335691607, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.748, "cuda_time_us": 474.298, "pct_cuda_time": 0.29950848070395686, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 473.562, "pct_cuda_time": 0.29904371331763413, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 524.329, "cuda_time_us": 94.079, "pct_cuda_time": 0.05940876486122134, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.079, "pct_cuda_time": 0.05940876486122134, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 767.331, "cuda_time_us": 153.27800000000002, "pct_cuda_time": 0.09679159706627713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.527, "pct_cuda_time": 0.024328930832686092, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.311, "pct_cuda_time": 0.07155333873861172, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0009093274949793123, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 215.559, "cuda_time_us": 311.38800000000003, "pct_cuda_time": 0.19663449306015152, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.0011518148269737958, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.564, "pct_cuda_time": 0.1954826782331777, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.502, "cuda_time_us": 75.776, "pct_cuda_time": 0.04785083351357804, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.776, "pct_cuda_time": 0.04785083351357804, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 487.519, "cuda_time_us": 3512.563, "pct_cuda_time": 2.218104245657652, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.647, "cuda_time_us": 2181.86, "pct_cuda_time": 1.3777953390246964, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0008082911066482778, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2180.58, "pct_cuda_time": 1.376987047918048, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.129, "cuda_time_us": 300.444, "pct_cuda_time": 0.1897236040983087, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.444, "pct_cuda_time": 0.1897236040983087, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.284, "cuda_time_us": 1030.259, "pct_cuda_time": 0.6505853025346469, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0009497420503117263, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1028.755, "pct_cuda_time": 0.6496355604843351, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2498.445, "cuda_time_us": 4699.105, "pct_cuda_time": 2.9673787349269176, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.454, "cuda_time_us": 77.086, "pct_cuda_time": 0.04867806894303838, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.086, "pct_cuda_time": 0.04867806894303838, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1778.665, "cuda_time_us": 1033.33, "pct_cuda_time": 0.6525245697131756, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.882, "cuda_time_us": 475.066, "pct_cuda_time": 0.2999934553679458, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0007880838289820708, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 473.818, "pct_cuda_time": 0.29920537153896376, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 519.361, "cuda_time_us": 94.047, "pct_cuda_time": 0.059388557583555136, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.047, "pct_cuda_time": 0.059388557583555136, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 751.475, "cuda_time_us": 153.18099999999998, "pct_cuda_time": 0.09673034375585142, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.655, "pct_cuda_time": 0.024409759943350918, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.086, "pct_cuda_time": 0.0714112563175212, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0009093274949793123, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 203.262, "cuda_time_us": 311.036, "pct_cuda_time": 0.1964122130058232, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.692, "pct_cuda_time": 0.19556350734384254, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.024, "cuda_time_us": 75.487, "pct_cuda_time": 0.047668336537155104, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.487, "pct_cuda_time": 0.047668336537155104, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.444, "cuda_time_us": 3513.2019999999998, "pct_cuda_time": 2.2185077597335487, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.323, "cuda_time_us": 2185.219, "pct_cuda_time": 1.379916471702221, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2184.483, "pct_cuda_time": 1.3794517043158983, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.768, "cuda_time_us": 301.084, "pct_cuda_time": 0.19012774965163284, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 301.084, "pct_cuda_time": 0.19012774965163284, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.692, "cuda_time_us": 1026.899, "pct_cuda_time": 0.6484635383796951, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1025.587, "pct_cuda_time": 0.6476350399953806, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2529.954, "cuda_time_us": 4703.52, "pct_cuda_time": 2.9701667077674276, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.998, "cuda_time_us": 77.118, "pct_cuda_time": 0.04869827622070459, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.118, "pct_cuda_time": 0.04869827622070459, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1816.698, "cuda_time_us": 1032.497, "pct_cuda_time": 0.6519985490164272, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.912, "cuda_time_us": 474.873, "pct_cuda_time": 0.29987158022452154, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.00046413590889569066, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 474.138, "pct_cuda_time": 0.2994074443156258, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 558.803, "cuda_time_us": 93.983, "pct_cuda_time": 0.05934814302822272, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 93.983, "pct_cuda_time": 0.05934814302822272, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.775, "cuda_time_us": 152.413, "pct_cuda_time": 0.09624536909186246, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 37.919, "pct_cuda_time": 0.023944992557028155, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 112.766, "pct_cuda_time": 0.07120918354085913, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 206.897, "cuda_time_us": 311.228, "pct_cuda_time": 0.19653345667182046, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.884, "pct_cuda_time": 0.19568475100983979, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.18, "cuda_time_us": 75.231, "pct_cuda_time": 0.04750667831582545, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.231, "pct_cuda_time": 0.04750667831582545, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.517, "cuda_time_us": 3518.674, "pct_cuda_time": 2.2219632042144704, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.61, "cuda_time_us": 2191.235, "pct_cuda_time": 1.383715439903468, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.001070985716308968, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2189.539, "pct_cuda_time": 1.382644454187159, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.28, "cuda_time_us": 301.116, "pct_cuda_time": 0.19014795692929906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 301.116, "pct_cuda_time": 0.19014795692929906, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.862, "cuda_time_us": 1026.323, "pct_cuda_time": 0.6480998073817034, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1025.587, "pct_cuda_time": 0.6476350399953806, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2386.533, "cuda_time_us": 4703.746, "pct_cuda_time": 2.970309421665945, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.727, "cuda_time_us": 77.343, "pct_cuda_time": 0.04884035864179511, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.343, "pct_cuda_time": 0.04884035864179511, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1686.956, "cuda_time_us": 1033.33, "pct_cuda_time": 0.6525245697131756, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.746, "cuda_time_us": 476.63300000000004, "pct_cuda_time": 0.30098298049616296, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.663, "pct_cuda_time": 0.0010501469612156922, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 474.97, "pct_cuda_time": 0.29993283353494726, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 476.767, "cuda_time_us": 94.303, "pct_cuda_time": 0.059550215804884794, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.303, "pct_cuda_time": 0.059550215804884794, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 722.543, "cuda_time_us": 152.542, "pct_cuda_time": 0.09632682967995436, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 37.568, "pct_cuda_time": 0.02372334398012695, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.342, "pct_cuda_time": 0.07157291453885085, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.001030571160976554, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.267, "cuda_time_us": 309.852, "pct_cuda_time": 0.1956645437321735, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.116, "pct_cuda_time": 0.19519977634585078, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 99.701, "cuda_time_us": 74.847, "pct_cuda_time": 0.047264190983830966, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.847, "pct_cuda_time": 0.047264190983830966, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 462.787, "cuda_time_us": 3518.226, "pct_cuda_time": 2.2216803023271434, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.679, "cuda_time_us": 2187.0750000000003, "pct_cuda_time": 1.381088493806861, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2185.731, "pct_cuda_time": 1.3802397881448805, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.01, "cuda_time_us": 300.892, "pct_cuda_time": 0.1900065059856356, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.892, "pct_cuda_time": 0.1900065059856356, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.932, "cuda_time_us": 1030.259, "pct_cuda_time": 0.6505853025346469, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1028.915, "pct_cuda_time": 0.6497365968726662, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2456.179, "cuda_time_us": 4701.41, "pct_cuda_time": 2.968834290396312, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.289, "cuda_time_us": 77.311, "pct_cuda_time": 0.04882015136412891, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.311, "pct_cuda_time": 0.04882015136412891, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.24, "cuda_time_us": 1036.594, "pct_cuda_time": 0.6545857120351287, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 162.591, "cuda_time_us": 477.945, "pct_cuda_time": 0.30181147888047744, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 477.177, "pct_cuda_time": 0.3013265042164885, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 504.077, "cuda_time_us": 94.111, "pct_cuda_time": 0.05942897213888755, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.111, "pct_cuda_time": 0.05942897213888755, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.151, "cuda_time_us": 153.054, "pct_cuda_time": 0.09665014612261368, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.272, "pct_cuda_time": 0.024167904088783502, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.31, "pct_cuda_time": 0.07155270726118464, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0009295347726455194, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.034, "cuda_time_us": 311.484, "pct_cuda_time": 0.1966951148931501, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.756, "pct_cuda_time": 0.1956039218991749, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.594, "cuda_time_us": 74.655, "pct_cuda_time": 0.04714294731783373, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.655, "pct_cuda_time": 0.04714294731783373, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 478.59, "cuda_time_us": 3512.8500000000004, "pct_cuda_time": 2.2182854796792206, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 172.809, "cuda_time_us": 2185.827, "pct_cuda_time": 1.380300409977879, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0007880838289820708, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2184.579, "pct_cuda_time": 1.3795123261488969, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.485, "cuda_time_us": 300.764, "pct_cuda_time": 0.18992567687497078, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.764, "pct_cuda_time": 0.18992567687497078, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.856, "cuda_time_us": 1026.259, "pct_cuda_time": 0.6480593928263709, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0009699493279779333, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1024.723, "pct_cuda_time": 0.647089443498393, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2418.564, "cuda_time_us": 4702.115, "pct_cuda_time": 2.9692794819823956, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.918, "cuda_time_us": 76.415, "pct_cuda_time": 0.04825434758947511, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 76.415, "pct_cuda_time": 0.04825434758947511, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1728.321, "cuda_time_us": 1033.0739999999998, "pct_cuda_time": 0.6523629114918459, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.792, "cuda_time_us": 474.586, "pct_cuda_time": 0.29969034620295276, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0007678765513158637, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 473.37, "pct_cuda_time": 0.2989224696516369, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.594, "cuda_time_us": 93.79, "pct_cuda_time": 0.05922626788479841, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 93.79, "pct_cuda_time": 0.05922626788479841, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 756.803, "cuda_time_us": 153.502, "pct_cuda_time": 0.09693304800994057, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.688, "pct_cuda_time": 0.024430598698444193, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.246, "pct_cuda_time": 0.07151229270585223, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.0009901566056441402, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 199.248, "cuda_time_us": 311.19599999999997, "pct_cuda_time": 0.19651324939415424, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 310.46, "pct_cuda_time": 0.19604848200783145, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.792, "cuda_time_us": 75.551, "pct_cuda_time": 0.047708751092487525, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.551, "pct_cuda_time": 0.047708751092487525, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.499, "cuda_time_us": 3517.0750000000003, "pct_cuda_time": 2.220953471808587, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.792, "cuda_time_us": 2189.251, "pct_cuda_time": 1.382462588688163, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2188.483, "pct_cuda_time": 1.3819776140241742, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.268, "cuda_time_us": 300.925, "pct_cuda_time": 0.19002734474072888, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.925, "pct_cuda_time": 0.19002734474072888, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.235, "cuda_time_us": 1026.899, "pct_cuda_time": 0.6484635383796951, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0008082911066482778, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1025.619, "pct_cuda_time": 0.6476552472730468, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2456.138, "cuda_time_us": 4703.845, "pct_cuda_time": 2.9703719379312252, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.533, "cuda_time_us": 77.759, "pct_cuda_time": 0.0491030532514558, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.759, "pct_cuda_time": 0.0491030532514558, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1733.599, "cuda_time_us": 1033.299, "pct_cuda_time": 0.6525049939129365, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.128, "cuda_time_us": 474.77799999999996, "pct_cuda_time": 0.29981158986894996, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 474.042, "pct_cuda_time": 0.29934682248262723, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.799, "cuda_time_us": 93.567, "pct_cuda_time": 0.059085448418562024, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 93.567, "pct_cuda_time": 0.059085448418562024, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 744.783, "cuda_time_us": 153.14999999999998, "pct_cuda_time": 0.09671076795561227, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.272, "pct_cuda_time": 0.024167904088783502, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.214, "pct_cuda_time": 0.07149208542818603, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.001050778438642761, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 218.634, "cuda_time_us": 311.80400000000003, "pct_cuda_time": 0.1968971876698122, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.0008891202173131054, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 310.396, "pct_cuda_time": 0.1960080674524991, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.39, "cuda_time_us": 75.839, "pct_cuda_time": 0.04789061659148338, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.839, "pct_cuda_time": 0.04789061659148338, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 485.826, "cuda_time_us": 3516.9480000000003, "pct_cuda_time": 2.220873274175349, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 174.599, "cuda_time_us": 2188.931, "pct_cuda_time": 1.382260515911501, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.001070985716308968, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2187.235, "pct_cuda_time": 1.381189530195192, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.85, "cuda_time_us": 301.149, "pct_cuda_time": 0.19016879568439232, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 301.149, "pct_cuda_time": 0.19016879568439232, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.847, "cuda_time_us": 1026.8680000000002, "pct_cuda_time": 0.6484439625794561, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.00046539886374982867, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1026.131, "pct_cuda_time": 0.6479785637157062, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2428.15, "cuda_time_us": 4701.253, "pct_cuda_time": 2.968735148440262, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.8, "cuda_time_us": 77.311, "pct_cuda_time": 0.04882015136412891, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.311, "pct_cuda_time": 0.04882015136412891, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1738.423, "cuda_time_us": 1034.964, "pct_cuda_time": 0.6535564038290063, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 159.521, "cuda_time_us": 477.082, "pct_cuda_time": 0.30126651386091685, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.76, "pct_cuda_time": 0.001111400271641382, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 475.322, "pct_cuda_time": 0.30015511358927555, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 497.63, "cuda_time_us": 93.952, "pct_cuda_time": 0.059328567227983575, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 93.952, "pct_cuda_time": 0.059328567227983575, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 731.093, "cuda_time_us": 152.82999999999998, "pct_cuda_time": 0.09650869517895021, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.016, "pct_cuda_time": 0.024006245867453847, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.15, "pct_cuda_time": 0.07145167087285362, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.001050778438642761, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.0, "cuda_time_us": 311.09999999999997, "pct_cuda_time": 0.1964526275611556, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 310.364, "pct_cuda_time": 0.19598786017483286, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.272, "cuda_time_us": 74.719, "pct_cuda_time": 0.04718336187316614, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.719, "pct_cuda_time": 0.04718336187316614, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.714, "cuda_time_us": 3514.259, "pct_cuda_time": 2.2191752313739608, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.04, "cuda_time_us": 2187.812, "pct_cuda_time": 1.3815538926706106, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2186.5, "pct_cuda_time": 1.3807253942862963, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.374, "cuda_time_us": 300.732, "pct_cuda_time": 0.1899054695973046, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.732, "pct_cuda_time": 0.1899054695973046, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.386, "cuda_time_us": 1025.715, "pct_cuda_time": 0.6477158691060454, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.0004856061414160356, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1024.946, "pct_cuda_time": 0.6472302629646294, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2473.457, "cuda_time_us": 4707.073, "pct_cuda_time": 2.9724103470658036, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.396, "cuda_time_us": 77.343, "pct_cuda_time": 0.04884035864179511, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.343, "pct_cuda_time": 0.04884035864179511, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1783.922, "cuda_time_us": 1033.394, "pct_cuda_time": 0.6525649842685081, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.386, "cuda_time_us": 475.322, "pct_cuda_time": 0.30015511358927555, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 474.586, "pct_cuda_time": 0.29969034620295276, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 469.539, "cuda_time_us": 93.822, "pct_cuda_time": 0.05924647516246462, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 93.822, "pct_cuda_time": 0.05924647516246462, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 745.765, "cuda_time_us": 151.998, "pct_cuda_time": 0.09598330595962883, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 37.728, "pct_cuda_time": 0.023824380368457986, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 112.83, "pct_cuda_time": 0.07124959809619154, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0009093274949793123, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 211.791, "cuda_time_us": 312.252, "pct_cuda_time": 0.19718008955713906, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.92, "pct_cuda_time": 0.0012124366599724167, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 310.332, "pct_cuda_time": 0.19596765289716664, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.114, "cuda_time_us": 74.591, "pct_cuda_time": 0.04710253276250131, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.591, "pct_cuda_time": 0.04710253276250131, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.01, "cuda_time_us": 3521.745, "pct_cuda_time": 2.223902471392999, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.243, "cuda_time_us": 2192.4519999999998, "pct_cuda_time": 1.3844839479322106, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2191.716, "pct_cuda_time": 1.384019180545888, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.846, "cuda_time_us": 301.02, "pct_cuda_time": 0.19008733509630044, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 301.02, "pct_cuda_time": 0.19008733509630044, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.842, "cuda_time_us": 1028.273, "pct_cuda_time": 0.6493311883644878, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0009491105728846572, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1026.77, "pct_cuda_time": 0.6483820777916033, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2528.392, "cuda_time_us": 4702.722, "pct_cuda_time": 2.969662788780626, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.909, "cuda_time_us": 76.383, "pct_cuda_time": 0.0482341403118089, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 76.383, "pct_cuda_time": 0.0482341403118089, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1805.185, "cuda_time_us": 1034.034, "pct_cuda_time": 0.6529691298218322, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.069, "cuda_time_us": 476.026, "pct_cuda_time": 0.30059967369793206, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0007678765513158637, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 474.81, "pct_cuda_time": 0.29983179714661623, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.814, "cuda_time_us": 94.015, "pct_cuda_time": 0.059368350305888926, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 94.015, "pct_cuda_time": 0.059368350305888926, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 798.142, "cuda_time_us": 153.597, "pct_cuda_time": 0.09699303836551212, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 38.623, "pct_cuda_time": 0.02438955266568471, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 113.534, "pct_cuda_time": 0.07169415820484809, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0009093274949793123, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 224.502, "cuda_time_us": 310.396, "pct_cuda_time": 0.1960080674524991, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.00046539886374982867, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 309.659, "pct_cuda_time": 0.19554266858874925, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.673, "cuda_time_us": 75.743, "pct_cuda_time": 0.04782999475848476, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.743, "pct_cuda_time": 0.04782999475848476, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 493.508, "cuda_time_us": 3516.562, "pct_cuda_time": 2.2206295238885008, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 177.823, "cuda_time_us": 2188.7400000000002, "pct_cuda_time": 1.3821399037229307, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2187.396, "pct_cuda_time": 1.3812911980609501, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.769, "cuda_time_us": 300.763, "pct_cuda_time": 0.1899250453975437, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 300.763, "pct_cuda_time": 0.1899250453975437, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 163.751, "cuda_time_us": 1027.059, "pct_cuda_time": 0.6485645747680261, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.00046539886374982867, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1026.322, "pct_cuda_time": 0.6480991759042762, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2458.376, "cuda_time_us": 5027.516, "pct_cuda_time": 3.174762868228064, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.974, "cuda_time_us": 78.015, "pct_cuda_time": 0.04926471147278545, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.015, "pct_cuda_time": 0.04926471147278545, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1758.862, "cuda_time_us": 1103.12, "pct_cuda_time": 0.6965953793483187, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.451, "cuda_time_us": 513.625, "pct_cuda_time": 0.32434259347829814, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 512.889, "pct_cuda_time": 0.3238778260919754, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 509.089, "cuda_time_us": 97.727, "pct_cuda_time": 0.06171239451516894, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 97.727, "pct_cuda_time": 0.06171239451516894, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 741.505, "cuda_time_us": 165.15, "pct_cuda_time": 0.10428849708043988, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.455, "pct_cuda_time": 0.024914941885006092, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 123.967, "pct_cuda_time": 0.07828236220145864, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 204.899, "cuda_time_us": 326.618, "pct_cuda_time": 0.20625189427441187, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.247, "pct_cuda_time": 0.0007874523515550019, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 325.371, "pct_cuda_time": 0.20546444192285684, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.189, "cuda_time_us": 76.126, "pct_cuda_time": 0.04807185061305218, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 76.126, "pct_cuda_time": 0.04807185061305218, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.331, "cuda_time_us": 3770.255, "pct_cuda_time": 2.380830926793908, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.86, "cuda_time_us": 2382.337, "pct_cuda_time": 1.5043920391712016, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0010103638833103472, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2380.737, "pct_cuda_time": 1.5033816752878912, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.705, "cuda_time_us": 311.676, "pct_cuda_time": 0.19681635855914736, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 311.676, "pct_cuda_time": 0.19681635855914736, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.212, "cuda_time_us": 1076.242, "pct_cuda_time": 0.6796225290635591, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1074.93, "pct_cuda_time": 0.6787940306792447, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 11177.586, "cuda_time_us": 5067.198, "pct_cuda_time": 3.1998211554890155, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.338, "cuda_time_us": 77.823, "pct_cuda_time": 0.049143467806788216, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.823, "pct_cuda_time": 0.049143467806788216, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1675.975, "cuda_time_us": 1126.482, "pct_cuda_time": 0.711347954999504, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.356, "cuda_time_us": 526.8409999999999, "pct_cuda_time": 0.33268819915444153, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.001030571160976554, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 525.209, "pct_cuda_time": 0.33165762799346504, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 456.183, "cuda_time_us": 98.783, "pct_cuda_time": 0.06237923467815376, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.783, "pct_cuda_time": 0.06237923467815376, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 711.98, "cuda_time_us": 168.57399999999998, "pct_cuda_time": 0.10645067579072402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 40.031, "pct_cuda_time": 0.025278672882997816, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.847, "pct_cuda_time": 0.08010101719141724, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.001070985716308968, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 208.596, "cuda_time_us": 332.284, "pct_cuda_time": 0.2098298453761846, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 331.548, "pct_cuda_time": 0.20936507798986187, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.958, "cuda_time_us": 75.487, "pct_cuda_time": 0.047668336537155104, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.487, "pct_cuda_time": 0.047668336537155104, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 9278.62, "cuda_time_us": 3787.406, "pct_cuda_time": 2.391661396145568, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 8809.84, "cuda_time_us": 2397.121, "pct_cuda_time": 1.5137278014529891, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2395.777, "pct_cuda_time": 1.5128790957910083, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 159.983, "cuda_time_us": 310.908, "pct_cuda_time": 0.1963313838951584, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.908, "pct_cuda_time": 0.1963313838951584, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 218.381, "cuda_time_us": 1079.377, "pct_cuda_time": 0.6816022107974203, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.00046413590889569066, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1078.642, "pct_cuda_time": 0.6811380748885247, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3286.995, "cuda_time_us": 5084.922, "pct_cuda_time": 3.2110134614063854, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 108.494, "cuda_time_us": 78.335, "pct_cuda_time": 0.04946678424944752, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.335, "pct_cuda_time": 0.04946678424944752, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2389.933, "cuda_time_us": 1128.592, "pct_cuda_time": 0.7126803723706195, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 192.038, "cuda_time_us": 525.753, "pct_cuda_time": 0.33200115171379063, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.0008891202173131054, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 524.345, "pct_cuda_time": 0.3311120314964775, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 607.821, "cuda_time_us": 98.303, "pct_cuda_time": 0.062076125513160656, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.303, "pct_cuda_time": 0.062076125513160656, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1121.833, "cuda_time_us": 168.509, "pct_cuda_time": 0.10640962975796454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 40.223, "pct_cuda_time": 0.025399916548995056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.782, "pct_cuda_time": 0.08005997115865776, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0009497420503117263, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 224.298, "cuda_time_us": 336.027, "pct_cuda_time": 0.21219346538570374, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.001030571160976554, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 334.395, "pct_cuda_time": 0.2111628942247272, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 139.887, "cuda_time_us": 75.391, "pct_cuda_time": 0.04760771470415649, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.391, "pct_cuda_time": 0.04760771470415649, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 564.773, "cuda_time_us": 3802.6039999999994, "pct_cuda_time": 2.4012585900821612, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 191.922, "cuda_time_us": 2413.7599999999998, "pct_cuda_time": 1.5242349543619895, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2413.024, "pct_cuda_time": 1.5237701869756668, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 111.891, "cuda_time_us": 310.747, "pct_cuda_time": 0.1962297160294003, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.747, "pct_cuda_time": 0.1962297160294003, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 202.002, "cuda_time_us": 1078.097, "pct_cuda_time": 0.6807939196907721, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.663, "pct_cuda_time": 0.0010501469612156922, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1076.434, "pct_cuda_time": 0.6797437727295564, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2996.156, "cuda_time_us": 5060.543000000001, "pct_cuda_time": 3.1956186732118717, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.847, "cuda_time_us": 78.079, "pct_cuda_time": 0.04930512602811787, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.079, "pct_cuda_time": 0.04930512602811787, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2181.552, "cuda_time_us": 1123.251, "pct_cuda_time": 0.7093076514326442, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 192.392, "cuda_time_us": 524.602, "pct_cuda_time": 0.3312743211952342, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.866, "pct_cuda_time": 0.33080955380891147, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 510.27, "cuda_time_us": 98.495, "pct_cuda_time": 0.062197369179157906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.495, "pct_cuda_time": 0.062197369179157906, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1053.058, "cuda_time_us": 167.167, "pct_cuda_time": 0.105562187050838, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.935, "pct_cuda_time": 0.025218051049999197, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 125.695, "pct_cuda_time": 0.07937355519543379, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.537, "pct_cuda_time": 0.0009705808054050022, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 234.775, "cuda_time_us": 332.98699999999997, "pct_cuda_time": 0.21027377400741407, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 332.251, "pct_cuda_time": 0.20980900662109134, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 96.453, "cuda_time_us": 75.135, "pct_cuda_time": 0.047446056482826834, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.135, "pct_cuda_time": 0.047446056482826834, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 562.84, "cuda_time_us": 3784.0780000000004, "pct_cuda_time": 2.3895598392682826, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 192.276, "cuda_time_us": 2405.024, "pct_cuda_time": 1.5187183675591152, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.343, "pct_cuda_time": 0.0008480741845536225, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2403.681, "pct_cuda_time": 1.5178702933745616, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 112.951, "cuda_time_us": 310.684, "pct_cuda_time": 0.19618993295149495, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.684, "pct_cuda_time": 0.19618993295149495, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 202.034, "cuda_time_us": 1068.3700000000001, "pct_cuda_time": 0.6746515387576724, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1067.634, "pct_cuda_time": 0.6741867713713495, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3001.982, "cuda_time_us": 5072.956999999999, "pct_cuda_time": 3.203457833991505, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.774, "cuda_time_us": 78.335, "pct_cuda_time": 0.04946678424944752, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.335, "pct_cuda_time": 0.04946678424944752, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2236.843, "cuda_time_us": 1125.712, "pct_cuda_time": 0.710861717380661, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 230.975, "cuda_time_us": 523.001, "pct_cuda_time": 0.33026332583449675, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.215, "pct_cuda_time": 0.000767245073888795, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 521.786, "pct_cuda_time": 0.329496080760608, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 532.745, "cuda_time_us": 98.815, "pct_cuda_time": 0.06239944195581997, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.815, "pct_cuda_time": 0.06239944195581997, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1057.047, "cuda_time_us": 169.18099999999998, "pct_cuda_time": 0.10683398258895489, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 40.095, "pct_cuda_time": 0.02531908743833023, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 127.134, "pct_cuda_time": 0.08028225121298604, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.952, "pct_cuda_time": 0.0012326439376386235, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 224.147, "cuda_time_us": 334.715, "pct_cuda_time": 0.21136496700138924, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 333.979, "pct_cuda_time": 0.2109001996150665, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.442, "cuda_time_us": 75.903, "pct_cuda_time": 0.0479310311468158, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.903, "pct_cuda_time": 0.0479310311468158, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 523.158, "cuda_time_us": 3793.0069999999996, "pct_cuda_time": 2.395198301214581, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 181.21, "cuda_time_us": 2412.5769999999998, "pct_cuda_time": 1.5234879165657669, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.001070985716308968, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2410.881, "pct_cuda_time": 1.522416930849458, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 133.529, "cuda_time_us": 310.556, "pct_cuda_time": 0.19610910384083008, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.556, "pct_cuda_time": 0.19610910384083008, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.026, "cuda_time_us": 1069.874, "pct_cuda_time": 0.675601280807984, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 2.048, "pct_cuda_time": 0.0012932657706372444, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1067.826, "pct_cuda_time": 0.6743080150373467, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2907.607, "cuda_time_us": 5075.326, "pct_cuda_time": 3.204953804016232, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 94.165, "cuda_time_us": 77.919, "pct_cuda_time": 0.04920408963978684, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.919, "pct_cuda_time": 0.04920408963978684, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2056.038, "cuda_time_us": 1124.882, "pct_cuda_time": 0.7103375911161937, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 186.127, "cuda_time_us": 522.969, "pct_cuda_time": 0.33024311855683064, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.001070985716308968, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 521.273, "pct_cuda_time": 0.3291721328405216, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.487, "cuda_time_us": 98.878, "pct_cuda_time": 0.062439225033725315, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.878, "pct_cuda_time": 0.062439225033725315, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 957.153, "cuda_time_us": 168.638, "pct_cuda_time": 0.10649109034605644, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.488, "pct_cuda_time": 0.024935780640099367, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.75, "pct_cuda_time": 0.08003976388099157, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.0015155458249655207, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 223.652, "cuda_time_us": 334.39700000000005, "pct_cuda_time": 0.2111641571795814, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.00046539886374982867, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 333.66, "pct_cuda_time": 0.21069875831583154, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.206, "cuda_time_us": 75.775, "pct_cuda_time": 0.04785020203615097, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.775, "pct_cuda_time": 0.04785020203615097, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 569.626, "cuda_time_us": 3796.75, "pct_cuda_time": 2.3975619212241, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 228.491, "cuda_time_us": 2413.7599999999998, "pct_cuda_time": 1.5242349543619895, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2412.448, "pct_cuda_time": 1.5234064559776752, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.039, "cuda_time_us": 310.972, "pct_cuda_time": 0.1963717984504908, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.972, "pct_cuda_time": 0.1963717984504908, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 185.173, "cuda_time_us": 1072.018, "pct_cuda_time": 0.6769551684116198, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1071.282, "pct_cuda_time": 0.676490401025297, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3210.544, "cuda_time_us": 5062.1759999999995, "pct_cuda_time": 3.1966498758502744, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 99.612, "cuda_time_us": 78.111, "pct_cuda_time": 0.04932533330578408, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.111, "pct_cuda_time": 0.04932533330578408, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2419.358, "cuda_time_us": 1124.4660000000001, "pct_cuda_time": 0.7100748965065331, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 203.094, "cuda_time_us": 524.985, "pct_cuda_time": 0.33151617704980163, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0007880838289820708, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.737, "pct_cuda_time": 0.33072809322081953, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 524.145, "cuda_time_us": 98.303, "pct_cuda_time": 0.062076125513160656, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.303, "pct_cuda_time": 0.062076125513160656, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1217.286, "cuda_time_us": 167.70999999999998, "pct_cuda_time": 0.10590507929373644, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.615, "pct_cuda_time": 0.02501597827333713, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.591, "pct_cuda_time": 0.0799393589700876, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0009497420503117263, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 274.757, "cuda_time_us": 333.468, "pct_cuda_time": 0.2105775146498343, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.665, "pct_cuda_time": 0.00105140991606983, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 331.803, "pct_cuda_time": 0.20952610473376443, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.748, "cuda_time_us": 75.839, "pct_cuda_time": 0.04789061659148338, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.839, "pct_cuda_time": 0.04789061659148338, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 527.485, "cuda_time_us": 3783.7599999999998, "pct_cuda_time": 2.3893590294464744, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 190.471, "cuda_time_us": 2395.169, "pct_cuda_time": 1.5124951575153505, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2394.433, "pct_cuda_time": 1.5120303901290277, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 113.844, "cuda_time_us": 310.685, "pct_cuda_time": 0.196190564428922, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.685, "pct_cuda_time": 0.196190564428922, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 169.105, "cuda_time_us": 1077.906, "pct_cuda_time": 0.6806733075022019, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.537, "pct_cuda_time": 0.0009705808054050022, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1076.369, "pct_cuda_time": 0.6797027266967969, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2672.193, "cuda_time_us": 5067.454, "pct_cuda_time": 3.1999828137103448, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.841, "cuda_time_us": 78.175, "pct_cuda_time": 0.04936574786111649, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.175, "pct_cuda_time": 0.04936574786111649, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1897.097, "cuda_time_us": 1125.233, "pct_cuda_time": 0.7105592396930949, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 164.994, "cuda_time_us": 523.898, "pct_cuda_time": 0.3308297610865777, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.162, "pct_cuda_time": 0.3303649937002549, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 484.911, "cuda_time_us": 98.27, "pct_cuda_time": 0.062055286758067374, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.27, "pct_cuda_time": 0.062055286758067374, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 850.96, "cuda_time_us": 168.797, "pct_cuda_time": 0.10659149525696042, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.935, "pct_cuda_time": 0.025218051049999197, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 127.294, "pct_cuda_time": 0.08038328760131708, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.0009901566056441402, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 231.403, "cuda_time_us": 334.268, "pct_cuda_time": 0.21108269659148943, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 333.532, "pct_cuda_time": 0.21061792920516667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.322, "cuda_time_us": 75.998, "pct_cuda_time": 0.047991021502387356, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.998, "pct_cuda_time": 0.047991021502387356, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 527.876, "cuda_time_us": 3788.0480000000002, "pct_cuda_time": 2.3920668046537465, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 189.65, "cuda_time_us": 2401.186, "pct_cuda_time": 1.5162947571940246, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.377, "pct_cuda_time": 0.0008695444170739676, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2399.809, "pct_cuda_time": 1.5154252127769507, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.264, "cuda_time_us": 310.62, "pct_cuda_time": 0.19614951839616251, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.62, "pct_cuda_time": 0.19614951839616251, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 190.907, "cuda_time_us": 1076.2420000000002, "pct_cuda_time": 0.6796225290635592, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1075.506, "pct_cuda_time": 0.6791577616772364, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2929.72, "cuda_time_us": 5073.532999999999, "pct_cuda_time": 3.2038215649894966, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.44, "cuda_time_us": 77.311, "pct_cuda_time": 0.04882015136412891, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.311, "pct_cuda_time": 0.04882015136412891, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2143.094, "cuda_time_us": 1125.105, "pct_cuda_time": 0.7104784105824301, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 190.474, "cuda_time_us": 524.441, "pct_cuda_time": 0.33117265332947615, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0007678765513158637, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.225, "pct_cuda_time": 0.33040477677816027, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.493, "cuda_time_us": 98.718, "pct_cuda_time": 0.06233818864539428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.718, "pct_cuda_time": 0.06233818864539428, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 988.322, "cuda_time_us": 168.286, "pct_cuda_time": 0.10626881029172817, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.68, "pct_cuda_time": 0.025057024306096607, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.878, "pct_cuda_time": 0.08012059299165639, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 234.998, "cuda_time_us": 333.65999999999997, "pct_cuda_time": 0.21069875831583149, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 332.924, "pct_cuda_time": 0.21023399092950876, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.677, "cuda_time_us": 76.191, "pct_cuda_time": 0.04811289664581166, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 76.191, "pct_cuda_time": 0.04811289664581166, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 536.509, "cuda_time_us": 3794.926, "pct_cuda_time": 2.3964101063971266, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 194.931, "cuda_time_us": 2411.712, "pct_cuda_time": 1.5229416885913525, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.0009901566056441402, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2410.144, "pct_cuda_time": 1.5219515319857082, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 109.597, "cuda_time_us": 310.812, "pct_cuda_time": 0.19627076206215976, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.812, "pct_cuda_time": 0.19627076206215976, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 174.522, "cuda_time_us": 1072.402, "pct_cuda_time": 0.6771976557436143, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0008487056619806917, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1071.058, "pct_cuda_time": 0.6763489500816336, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2843.266, "cuda_time_us": 5072.413, "pct_cuda_time": 3.2031143102711797, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.808, "cuda_time_us": 77.503, "pct_cuda_time": 0.04894139503012615, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.503, "pct_cuda_time": 0.04894139503012615, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2075.749, "cuda_time_us": 1126.289, "pct_cuda_time": 0.7112260798560797, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 181.494, "cuda_time_us": 525.369, "pct_cuda_time": 0.33175866438179613, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0009497420503117263, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.865, "pct_cuda_time": 0.3308089223314844, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.706, "cuda_time_us": 99.263, "pct_cuda_time": 0.06268234384314687, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 99.263, "pct_cuda_time": 0.06268234384314687, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 984.046, "cuda_time_us": 167.35699999999997, "pct_cuda_time": 0.10568216776198108, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.295, "pct_cuda_time": 0.02481390549667506, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.398, "pct_cuda_time": 0.07981748382666327, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.001050778438642761, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 228.584, "cuda_time_us": 334.3, "pct_cuda_time": 0.21110290386915564, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 332.988, "pct_cuda_time": 0.21027440548484116, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.264, "cuda_time_us": 74.847, "pct_cuda_time": 0.047264190983830966, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.847, "pct_cuda_time": 0.047264190983830966, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 530.52, "cuda_time_us": 3793.7739999999994, "pct_cuda_time": 2.3956826444011425, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 196.509, "cuda_time_us": 2412.0319999999997, "pct_cuda_time": 1.5231437613680143, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2411.296, "pct_cuda_time": 1.5226789939816916, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 106.729, "cuda_time_us": 311.26, "pct_cuda_time": 0.19655366394948662, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 311.26, "pct_cuda_time": 0.19655366394948662, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 177.242, "cuda_time_us": 1070.482, "pct_cuda_time": 0.6759852190836418, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1069.714, "pct_cuda_time": 0.6755002444196528, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2869.761, "cuda_time_us": 5070.044, "pct_cuda_time": 3.201618340246454, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.238, "cuda_time_us": 77.439, "pct_cuda_time": 0.04890098047479372, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.439, "pct_cuda_time": 0.04890098047479372, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2061.062, "cuda_time_us": 1126.32, "pct_cuda_time": 0.7112456556563188, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 168.567, "cuda_time_us": 525.081, "pct_cuda_time": 0.3315767988828002, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0007678765513158637, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.865, "pct_cuda_time": 0.3308089223314844, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 498.301, "cuda_time_us": 98.751, "pct_cuda_time": 0.06235902740048756, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.751, "pct_cuda_time": 0.06235902740048756, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1009.852, "cuda_time_us": 167.613, "pct_cuda_time": 0.10584382598331075, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.423, "pct_cuda_time": 0.024894734607339885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.655, "pct_cuda_time": 0.07997977352542, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.535, "pct_cuda_time": 0.0009693178505508641, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 214.54, "cuda_time_us": 334.875, "pct_cuda_time": 0.21146600338972033, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.001030571160976554, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 333.243, "pct_cuda_time": 0.21043543222874372, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 95.857, "cuda_time_us": 74.879, "pct_cuda_time": 0.047284398261497176, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 74.879, "pct_cuda_time": 0.047284398261497176, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 530.393, "cuda_time_us": 3791.406, "pct_cuda_time": 2.3941873058538437, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 186.57, "cuda_time_us": 2402.625, "pct_cuda_time": 1.5172034532115768, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2401.889, "pct_cuda_time": 1.516738685825254, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 110.707, "cuda_time_us": 311.515, "pct_cuda_time": 0.19671469069338926, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 311.515, "pct_cuda_time": 0.19671469069338926, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 176.943, "cuda_time_us": 1077.2659999999998, "pct_cuda_time": 0.6802691619488777, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1075.954, "pct_cuda_time": 0.6794406635645632, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2666.121, "cuda_time_us": 5069.534, "pct_cuda_time": 3.201296286758648, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.143, "cuda_time_us": 78.048, "pct_cuda_time": 0.04928555022787873, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.048, "pct_cuda_time": 0.04928555022787873, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1923.848, "cuda_time_us": 1125.616, "pct_cuda_time": 0.7108010955476622, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 178.771, "cuda_time_us": 525.017, "pct_cuda_time": 0.33153638432746785, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 524.249, "pct_cuda_time": 0.3310514096634789, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.209, "cuda_time_us": 98.687, "pct_cuda_time": 0.06231861284515514, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.687, "pct_cuda_time": 0.06231861284515514, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 875.877, "cuda_time_us": 168.285, "pct_cuda_time": 0.10626817881430109, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.936, "pct_cuda_time": 0.025218682527426265, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.814, "pct_cuda_time": 0.08008017843632396, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.535, "pct_cuda_time": 0.0009693178505508641, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 208.206, "cuda_time_us": 333.627, "pct_cuda_time": 0.21067791956073822, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0007880838289820708, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 332.379, "pct_cuda_time": 0.20988983573175618, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.025, "cuda_time_us": 75.2, "pct_cuda_time": 0.04748710251558631, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.2, "pct_cuda_time": 0.04748710251558631, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 512.407, "cuda_time_us": 3790.67, "pct_cuda_time": 2.393722538467521, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 187.961, "cuda_time_us": 2402.08, "pct_cuda_time": 1.516859298013824, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.00046413590889569066, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2401.345, "pct_cuda_time": 1.5163951621049283, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 107.076, "cuda_time_us": 310.492, "pct_cuda_time": 0.1960686892854977, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.492, "pct_cuda_time": 0.1960686892854977, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 167.798, "cuda_time_us": 1078.0980000000002, "pct_cuda_time": 0.6807945511681993, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1077.362, "pct_cuda_time": 0.6803297837818765, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2754.363, "cuda_time_us": 5059.807000000001, "pct_cuda_time": 3.195153905825549, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 101.057, "cuda_time_us": 78.175, "pct_cuda_time": 0.04936574786111649, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.175, "pct_cuda_time": 0.04936574786111649, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1985.133, "cuda_time_us": 1123.314, "pct_cuda_time": 0.7093474345105496, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 176.017, "cuda_time_us": 524.537, "pct_cuda_time": 0.33123327516247475, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.225, "pct_cuda_time": 0.33040477677816027, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 514.316, "cuda_time_us": 98.207, "pct_cuda_time": 0.06201550368016203, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.207, "pct_cuda_time": 0.06201550368016203, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 912.906, "cuda_time_us": 168.702, "pct_cuda_time": 0.10653150490138887, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 40.288, "pct_cuda_time": 0.025440962581754538, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.686, "pct_cuda_time": 0.07999934932565915, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.57, "cuda_time_us": 331.868, "pct_cuda_time": 0.2095671507665239, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 331.132, "pct_cuda_time": 0.20910238338020118, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.55, "cuda_time_us": 75.231, "pct_cuda_time": 0.04750667831582545, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.231, "pct_cuda_time": 0.04750667831582545, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 500.484, "cuda_time_us": 3783.0870000000004, "pct_cuda_time": 2.388934045138057, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 172.424, "cuda_time_us": 2396.257, "pct_cuda_time": 1.5131822049560015, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.792, "pct_cuda_time": 0.0011316075493075888, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2394.465, "pct_cuda_time": 1.512050597406694, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.806, "cuda_time_us": 310.972, "pct_cuda_time": 0.1963717984504908, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 310.972, "pct_cuda_time": 0.1963717984504908, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 184.613, "cuda_time_us": 1075.858, "pct_cuda_time": 0.6793800417315646, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1075.09, "pct_cuda_time": 0.6788950670675756, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2863.065, "cuda_time_us": 5079.005, "pct_cuda_time": 3.207277009470418, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.133, "cuda_time_us": 77.855, "pct_cuda_time": 0.049163675084454427, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.855, "pct_cuda_time": 0.049163675084454427, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2137.366, "cuda_time_us": 1123.763, "pct_cuda_time": 0.7096309678753034, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 178.707, "cuda_time_us": 523.193, "pct_cuda_time": 0.330384569500494, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 522.457, "pct_cuda_time": 0.32991980211417127, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.602, "cuda_time_us": 98.239, "pct_cuda_time": 0.062035710957828255, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.239, "pct_cuda_time": 0.062035710957828255, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1077.286, "cuda_time_us": 167.616, "pct_cuda_time": 0.10584572041559197, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.52, "pct_cuda_time": 0.024955987917765574, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 126.303, "pct_cuda_time": 0.07975749347109173, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.793, "pct_cuda_time": 0.0011322390267346578, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.386, "cuda_time_us": 334.715, "pct_cuda_time": 0.21136496700138924, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0008082911066482778, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 333.435, "pct_cuda_time": 0.21055667589474097, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.074, "cuda_time_us": 75.583, "pct_cuda_time": 0.04772895837015373, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.583, "pct_cuda_time": 0.04772895837015373, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 496.995, "cuda_time_us": 3801.804, "pct_cuda_time": 2.400753408140507, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.575, "cuda_time_us": 2411.808, "pct_cuda_time": 1.523002310424351, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2411.072, "pct_cuda_time": 1.5225375430380284, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.566, "cuda_time_us": 311.163, "pct_cuda_time": 0.19649241063906098, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 311.163, "pct_cuda_time": 0.19649241063906098, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 181.251, "cuda_time_us": 1078.833, "pct_cuda_time": 0.6812586870770949, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1078.065, "pct_cuda_time": 0.6807737124131059, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2712.171, "cuda_time_us": 5079.421, "pct_cuda_time": 3.2075397040800793, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 95.7, "cuda_time_us": 77.984, "pct_cuda_time": 0.04924513567254632, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.984, "pct_cuda_time": 0.04924513567254632, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1953.281, "cuda_time_us": 1125.329, "pct_cuda_time": 0.7106198615260936, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.825, "cuda_time_us": 524.697, "pct_cuda_time": 0.3313343115508057, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.152, "pct_cuda_time": 0.0007274619959834498, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 523.545, "pct_cuda_time": 0.3306068495548223, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 523.892, "cuda_time_us": 98.943, "pct_cuda_time": 0.062480271066484794, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.943, "pct_cuda_time": 0.062480271066484794, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 908.543, "cuda_time_us": 168.062, "pct_cuda_time": 0.10612735934806473, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.392, "pct_cuda_time": 0.02487515880710075, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 127.134, "pct_cuda_time": 0.08028225121298604, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0009699493279779333, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 204.01, "cuda_time_us": 333.627, "pct_cuda_time": 0.21067791956073822, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 2.048, "pct_cuda_time": 0.0012932657706372444, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 331.579, "pct_cuda_time": 0.20938465379010102, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.323, "cuda_time_us": 75.871, "pct_cuda_time": 0.04791082386914959, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.871, "pct_cuda_time": 0.04791082386914959, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 514.06, "cuda_time_us": 3800.237, "pct_cuda_time": 2.39976388301229, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.941, "cuda_time_us": 2407.04, "pct_cuda_time": 1.5199914260520861, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.279, "pct_cuda_time": 0.0008076596292212086, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2405.761, "pct_cuda_time": 1.519183766422865, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.045, "cuda_time_us": 312.284, "pct_cuda_time": 0.19720029683480528, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 312.284, "pct_cuda_time": 0.19720029683480528, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 168.564, "cuda_time_us": 1080.913, "pct_cuda_time": 0.6825721601253983, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0009699493279779333, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1079.377, "pct_cuda_time": 0.6816022107974203, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2753.413, "cuda_time_us": 5109.5019999999995, "pct_cuda_time": 3.2265351765637407, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.729, "cuda_time_us": 77.888, "pct_cuda_time": 0.0491845138395477, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.888, "pct_cuda_time": 0.0491845138395477, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2005.9, "cuda_time_us": 1134.961, "pct_cuda_time": 0.7167022521036218, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 185.626, "cuda_time_us": 528.058, "pct_cuda_time": 0.33345670718318454, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.00046539886374982867, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 527.321, "pct_cuda_time": 0.33299130831943474, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.526, "cuda_time_us": 99.678, "pct_cuda_time": 0.06294440697538049, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 99.678, "pct_cuda_time": 0.06294440697538049, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 944.099, "cuda_time_us": 170.20600000000002, "pct_cuda_time": 0.10748124695170061, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 40.128, "pct_cuda_time": 0.025339926193423505, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 128.511, "pct_cuda_time": 0.08115179563006002, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.567, "pct_cuda_time": 0.000989525128217071, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 230.118, "cuda_time_us": 337.019, "pct_cuda_time": 0.21281989099335616, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 335.707, "pct_cuda_time": 0.21199139260904168, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.753, "cuda_time_us": 75.71, "pct_cuda_time": 0.04780915600339149, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.71, "pct_cuda_time": 0.04780915600339149, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 515.452, "cuda_time_us": 3820.9429999999998, "pct_cuda_time": 2.4128392546171797, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 186.714, "cuda_time_us": 2427.584, "pct_cuda_time": 1.532964498313791, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2426.816, "pct_cuda_time": 1.532479523649802, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.864, "cuda_time_us": 312.413, "pct_cuda_time": 0.19728175742289716, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 312.413, "pct_cuda_time": 0.19728175742289716, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 171.569, "cuda_time_us": 1080.946, "pct_cuda_time": 0.6825929988804915, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0008082911066482778, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1079.666, "pct_cuda_time": 0.6817847077738433, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2924.034, "cuda_time_us": 5107.196, "pct_cuda_time": 3.22507898961692, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.662, "cuda_time_us": 77.983, "pct_cuda_time": 0.04924450419511925, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.983, "pct_cuda_time": 0.04924450419511925, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2128.013, "cuda_time_us": 1133.873, "pct_cuda_time": 0.7160152046629709, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 165.535, "cuda_time_us": 528.217, "pct_cuda_time": 0.3335571120940885, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 527.449, "pct_cuda_time": 0.3330721374300995, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 484.561, "cuda_time_us": 99.71, "pct_cuda_time": 0.0629646142530467, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 99.71, "pct_cuda_time": 0.0629646142530467, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1069.077, "cuda_time_us": 169.56699999999998, "pct_cuda_time": 0.10707773287580351, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 40.447, "pct_cuda_time": 0.025541367492658507, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 127.359, "pct_cuda_time": 0.08042433363407656, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.761, "pct_cuda_time": 0.0011120317490684508, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 238.065, "cuda_time_us": 336.37899999999996, "pct_cuda_time": 0.212415745440032, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 335.643, "pct_cuda_time": 0.21195097805370927, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.09, "cuda_time_us": 75.103, "pct_cuda_time": 0.04742584920516062, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.103, "pct_cuda_time": 0.04742584920516062, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 529.574, "cuda_time_us": 3820.237, "pct_cuda_time": 2.412393431553669, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 186.95, "cuda_time_us": 2428.7999999999997, "pct_cuda_time": 1.5337323748651068, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0007678765513158637, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2427.584, "pct_cuda_time": 1.532964498313791, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.874, "cuda_time_us": 312.347, "pct_cuda_time": 0.19724007991271061, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 312.347, "pct_cuda_time": 0.19724007991271061, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 189.017, "cuda_time_us": 1079.0900000000001, "pct_cuda_time": 0.6814209767758517, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1078.354, "pct_cuda_time": 0.6809562093895288, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2785.17, "cuda_time_us": 5104.7029999999995, "pct_cuda_time": 3.2235047163912367, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.787, "cuda_time_us": 77.215, "pct_cuda_time": 0.04875952953113029, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.215, "pct_cuda_time": 0.04875952953113029, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1981.897, "cuda_time_us": 1134.866, "pct_cuda_time": 0.7166422617480502, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 176.563, "cuda_time_us": 529.1129999999999, "pct_cuda_time": 0.33412291586874227, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0009699493279779333, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 527.577, "pct_cuda_time": 0.3331529665407644, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 465.154, "cuda_time_us": 99.263, "pct_cuda_time": 0.06268234384314687, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 99.263, "pct_cuda_time": 0.06268234384314687, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 945.241, "cuda_time_us": 169.502, "pct_cuda_time": 0.10703668684304406, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.743, "pct_cuda_time": 0.025096807384001957, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 128.031, "pct_cuda_time": 0.08084868646506692, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.001091192993975175, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.289, "cuda_time_us": 336.988, "pct_cuda_time": 0.212800315193117, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0009093274949793123, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 335.548, "pct_cuda_time": 0.21189098769813772, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.221, "cuda_time_us": 75.903, "pct_cuda_time": 0.0479310311468158, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 75.903, "pct_cuda_time": 0.0479310311468158, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 541.636, "cuda_time_us": 3816.719, "pct_cuda_time": 2.4101718939652406, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 200.947, "cuda_time_us": 2416.449, "pct_cuda_time": 1.5259329971633782, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0004647673863227597, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2415.713, "pct_cuda_time": 1.5254682297770554, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.991, "cuda_time_us": 312.892, "pct_cuda_time": 0.19758423511046322, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 312.892, "pct_cuda_time": 0.19758423511046322, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 185.527, "cuda_time_us": 1087.378, "pct_cuda_time": 0.6866546616913991, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0008284983843144847, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1086.066, "pct_cuda_time": 0.6858261633070847, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2862.488, "cuda_time_us": 5062.174000000001, "pct_cuda_time": 3.1966486128954212, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.943, "cuda_time_us": 78.751, "pct_cuda_time": 0.04972947885910822, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 78.751, "pct_cuda_time": 0.04972947885910822, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2060.516, "cuda_time_us": 1128.784, "pct_cuda_time": 0.7128016160366168, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 186.674, "cuda_time_us": 526.777, "pct_cuda_time": 0.3326477845991092, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.00048497466398896667, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 526.009, "pct_cuda_time": 0.33216280993512026, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[7168, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 502.03, "cuda_time_us": 98.718, "pct_cuda_time": 0.06233818864539428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 98.718, "pct_cuda_time": 0.06233818864539428, "trace": "_C::rotary_embedding(int64[7168], bfloat16[7168, 4096], bfloat16[7168, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 963.502, "cuda_time_us": 168.701, "pct_cuda_time": 0.1065308734239618, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 39.615, "pct_cuda_time": 0.02501597827333713, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[7168], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 127.55, "pct_cuda_time": 0.08054494582264674, "trace": "_vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0009699493279779333, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], None, None, bfloat16[7168, 32, 128], int32[15], int32[15], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[7168, 32, 128], bfloat16[7168, 8, 128], bfloat16[7168, 8, 128], bfloat16[7168, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 223.819, "cuda_time_us": 334.58799999999997, "pct_cuda_time": 0.21128476936815146, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.76, "pct_cuda_time": 0.001111400271641382, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 332.828, "pct_cuda_time": 0.21017336909651013, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[7168, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 100.726, "cuda_time_us": 76.287, "pct_cuda_time": 0.04817351847881028, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 76.287, "pct_cuda_time": 0.04817351847881028, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 554.108, "cuda_time_us": 3778.3520000000003, "pct_cuda_time": 2.3859439995208858, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 192.701, "cuda_time_us": 2399.936, "pct_cuda_time": 1.5155054104101884, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0007880838289820708, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 2398.688, "pct_cuda_time": 1.5147173265812064, "trace": "mm(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[7168, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[7168, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 121.093, "cuda_time_us": 311.645, "pct_cuda_time": 0.19679678275890822, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 311.645, "pct_cuda_time": 0.19679678275890822, "trace": "_C::silu_and_mul(bfloat16[7168, 14336], bfloat16[7168, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 163.881, "cuda_time_us": 1066.7710000000002, "pct_cuda_time": 0.673641806351789, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.00046539886374982867, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1066.034, "pct_cuda_time": 0.6731764074880392, "trace": "mm(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[7168, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[7168, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.111, "cuda_time_us": 77.279, "pct_cuda_time": 0.048799944086462696, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 77.279, "pct_cuda_time": 0.048799944086462696, "trace": "_C::fused_add_rms_norm(bfloat16[7168, 4096], bfloat16[7168, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 516.884, "cuda_time_us": 366.139, "pct_cuda_time": 0.2312085136696045, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 11.328, "pct_cuda_time": 0.007153376293837258, "trace": "index_select(bfloat16[7168, 4096], 0, int64[14])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.0008689129396468984, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 353.435, "pct_cuda_time": 0.22318622443612035, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 63523.971, "cuda_time_us": 153.726, "pct_cuda_time": 0.09707449895360402, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.0018994841006234526, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.335, "pct_cuda_time": 0.0014744997922060377, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.0014953385472993136, "trace": "copy_(int32[14], int32[14], True) <- _to_copy(int32[14], 3, 0, None, None, True, None) <- to(int32[14], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.0015155458249655207, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.0015155458249655207, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.336, "pct_cuda_time": 0.0014751312696331068, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.336, "pct_cuda_time": 0.0014751312696331068, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 7.904, "pct_cuda_time": 0.004991197583553115, "trace": "copy_(float32[14, 128256], bfloat16[14, 128256], False) <- _to_copy(bfloat16[14, 128256], 6, None, None, None, False, None) <- to(bfloat16[14, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 10.816, "pct_cuda_time": 0.006830059851177946, "trace": "div_(float32[14, 128256], bfloat16[14, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 38.847, "pct_cuda_time": 0.02453100360934816, "trace": "_softmax(float32[14, 128256], -1, False) <- softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 31.392, "pct_cuda_time": 0.01982333939054901, "trace": "_log_softmax(float32[14, 128256], -1, False) <- log_softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.0013134730483034512, "trace": "copy_(int64[14], int32[14], False) <- _to_copy(int32[14], 4, None, None, None, False, None) <- to(int32[14], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 11.456, "pct_cuda_time": 0.007234205404502085, "trace": "index(float32[14, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 30.72, "pct_cuda_time": 0.019398986559558667, "trace": "argmax(float32[14, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.002101556877285522, "trace": "copy_(int64[14], int64[14], False) <- _to_copy(int64[14], 4, 0, None, None, False, None) <- to(int64[14], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 14 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6612.205, "pct_cuda_time": 93.20143924947259, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 11.072, "pct_cuda_time": 0.15606387511732628, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 11.072, "pct_cuda_time": 0.15606387511732628, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6598.060000000001, "pct_cuda_time": 93.0020603194207, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 202.75100000000012, "pct_cuda_time": 2.857849236263822, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.511, "pct_cuda_time": 0.06358418900417802, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 198.24000000000012, "pct_cuda_time": 2.7942650472596444, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 2089.731, "pct_cuda_time": 29.45552003367101, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 701.5899999999999, "pct_cuda_time": 9.8891667398451, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 701.5899999999999, "pct_cuda_time": 9.8891667398451, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 120.28600000000002, "pct_cuda_time": 1.6954750074388287, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 120.28600000000002, "pct_cuda_time": 1.6954750074388287, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 750.9959999999999, "pct_cuda_time": 10.585562315535725, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 82.62300000000002, "pct_cuda_time": 1.1646012964070496, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 623.637, "pct_cuda_time": 8.79039079538873, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 44.73600000000001, "pct_cuda_time": 0.6305702237399486, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 516.859, "pct_cuda_time": 7.285315970851351, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 516.859, "pct_cuda_time": 7.285315970851351, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4305.5779999999995, "pct_cuda_time": 60.688691049485854, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2605.5350000000008, "pct_cuda_time": 36.725965395034585, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2605.5350000000008, "pct_cuda_time": 36.725965395034585, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 287.162, "pct_cuda_time": 4.047653044295669, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 287.162, "pct_cuda_time": 4.047653044295669, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1412.8809999999999, "pct_cuda_time": 19.915072610155622, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1412.8809999999999, "pct_cuda_time": 19.915072610155622, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.073, "pct_cuda_time": 0.04331505493456861, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.073, "pct_cuda_time": 0.04331505493456861, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 352.82599999999996, "pct_cuda_time": 4.973211055107096, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 8.191, "pct_cuda_time": 0.11545513015589053, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.735, "pct_cuda_time": 0.010360092865899099, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 343.9, "pct_cuda_time": 4.847395832085306, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 129.5, "pct_cuda_time": 1.8253496954203177, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.343, "pct_cuda_time": 0.07531153222108691, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 7.136, "pct_cuda_time": 0.10058452066810336, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 9.856, "pct_cuda_time": 0.13892391195415174, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 35.327, "pct_cuda_time": 0.4979469396919965, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.447, "pct_cuda_time": 0.4009708323214036, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.792, "pct_cuda_time": 0.02525889308257304, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 10.4, "pct_cuda_time": 0.1465917902113614, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 28.287, "pct_cuda_time": 0.3987155740104596, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.912, "pct_cuda_time": 0.04104570125918119, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 80746.622, "cuda_time_us": 6612.205, "pct_cuda_time": 93.20143924947259, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 309.741, "cuda_time_us": 11.072, "pct_cuda_time": 0.15606387511732628, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 11.072, "pct_cuda_time": 0.15606387511732628, "trace": "index_select(bfloat16[128256, 4096], 0, int64[14]) <- embedding(bfloat16[128256, 4096], int64[14], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4491.29, "cuda_time_us": 214.014, "pct_cuda_time": 3.0166053259898367, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 328.355, "cuda_time_us": 4.511, "pct_cuda_time": 0.06358418900417802, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.511, "pct_cuda_time": 0.06358418900417802, "trace": "_C::rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3333.039, "cuda_time_us": 72.19200000000001, "pct_cuda_time": 1.0175725498979427, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 637.549, "cuda_time_us": 27.712, "pct_cuda_time": 0.3906107394555045, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.712, "pct_cuda_time": 0.3906107394555045, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1060.483, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1023.739, "cuda_time_us": 24.864, "pct_cuda_time": 0.35046714152070096, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.034730977988537935, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.088, "pct_cuda_time": 0.2972430453824221, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018493118149740976, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 279.225, "cuda_time_us": 15.872, "pct_cuda_time": 0.22372162444564692, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.872, "pct_cuda_time": 0.22372162444564692, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 128.473, "cuda_time_us": 3.105, "pct_cuda_time": 0.04376610659675742, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04376610659675742, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 573.709, "cuda_time_us": 134.20600000000002, "pct_cuda_time": 1.8916824804909587, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 188.773, "cuda_time_us": 80.831, "pct_cuda_time": 1.1393424033244763, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.831, "pct_cuda_time": 1.1393424033244763, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 139.046, "cuda_time_us": 9.087, "pct_cuda_time": 0.12808457669717702, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.087, "pct_cuda_time": 0.12808457669717702, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 164.549, "cuda_time_us": 44.288, "pct_cuda_time": 0.6242555004693051, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.288, "pct_cuda_time": 0.6242555004693051, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2608.587, "cuda_time_us": 204.86, "pct_cuda_time": 2.8875763598749518, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.724, "cuda_time_us": 3.232, "pct_cuda_time": 0.04555621788106924, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04555621788106924, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1865.519, "cuda_time_us": 63.806, "pct_cuda_time": 0.8993688236755889, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 159.412, "cuda_time_us": 20.512, "pct_cuda_time": 0.2891241154630236, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.512, "pct_cuda_time": 0.2891241154630236, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 565.22, "cuda_time_us": 3.68, "pct_cuda_time": 0.051870941151712494, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.051870941151712494, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 773.287, "cuda_time_us": 23.583, "pct_cuda_time": 0.33241097966870536, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03608413297510434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.552, "pct_cuda_time": 0.27559256559735945, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.471, "pct_cuda_time": 0.020734281096241598, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.396, "cuda_time_us": 16.031, "pct_cuda_time": 0.22596278739214753, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.031, "pct_cuda_time": 0.22596278739214753, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 92.306, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.311, "cuda_time_us": 134.654, "pct_cuda_time": 1.8979972037616015, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.276, "cuda_time_us": 81.311, "pct_cuda_time": 1.1461081782573084, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.311, "pct_cuda_time": 1.1461081782573084, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.111, "cuda_time_us": 9.088, "pct_cuda_time": 0.1280986720616204, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.1280986720616204, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.923, "cuda_time_us": 44.255, "pct_cuda_time": 0.623790353442673, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.255, "pct_cuda_time": 0.623790353442673, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2444.085, "cuda_time_us": 205.117, "pct_cuda_time": 2.891198868536905, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.933, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1736.427, "cuda_time_us": 64.447, "pct_cuda_time": 0.9084039522838085, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 159.585, "cuda_time_us": 20.927, "pct_cuda_time": 0.2949736917070346, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.927, "pct_cuda_time": 0.2949736917070346, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 519.081, "cuda_time_us": 3.936, "pct_cuda_time": 0.05547935444922293, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.936, "pct_cuda_time": 0.05547935444922293, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.332, "cuda_time_us": 23.392, "pct_cuda_time": 0.3297187650600159, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03563308131291554, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.27333730728641537, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 180.441, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.926, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 449.996, "cuda_time_us": 134.36599999999999, "pct_cuda_time": 1.8939377388019023, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.062, "cuda_time_us": 81.823, "pct_cuda_time": 1.1533250048523291, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.823, "pct_cuda_time": 1.1533250048523291, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.521, "cuda_time_us": 9.055, "pct_cuda_time": 0.1276335250349882, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.055, "pct_cuda_time": 0.1276335250349882, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.104, "cuda_time_us": 43.488, "pct_cuda_time": 0.612979208914585, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.488, "pct_cuda_time": 0.612979208914585, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2406.774, "cuda_time_us": 206.40000000000003, "pct_cuda_time": 2.9092832211177884, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.678, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1703.056, "cuda_time_us": 65.185, "pct_cuda_time": 0.9188063312430378, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.136, "cuda_time_us": 21.824, "pct_cuda_time": 0.30761723361276455, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.824, "pct_cuda_time": 0.30761723361276455, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.174, "cuda_time_us": 3.841, "pct_cuda_time": 0.05414029482709992, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.841, "pct_cuda_time": 0.05414029482709992, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 699.069, "cuda_time_us": 23.424, "pct_cuda_time": 0.33016981672220475, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03608413297510434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.584, "pct_cuda_time": 0.27604361725954824, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 208.786, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.462, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 457.393, "cuda_time_us": 134.943, "pct_cuda_time": 1.9020707640857444, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.449, "cuda_time_us": 82.111, "pct_cuda_time": 1.1573844698120286, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.111, "pct_cuda_time": 1.1573844698120286, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.325, "cuda_time_us": 9.024, "pct_cuda_time": 0.1271965687372428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1271965687372428, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.823, "cuda_time_us": 43.808, "pct_cuda_time": 0.6174897255364731, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.808, "pct_cuda_time": 0.6174897255364731, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2429.75, "cuda_time_us": 205.023, "pct_cuda_time": 2.889873904279226, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.029, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1747.411, "cuda_time_us": 64.48, "pct_cuda_time": 0.9088690993104408, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.938, "cuda_time_us": 21.504, "pct_cuda_time": 0.3031067169908765, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.504, "pct_cuda_time": 0.3031067169908765, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 540.674, "cuda_time_us": 3.84, "pct_cuda_time": 0.05412619946265652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05412619946265652, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 734.912, "cuda_time_us": 23.296, "pct_cuda_time": 0.3283656100734495, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.424, "pct_cuda_time": 0.2737883589486042, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.333, "cuda_time_us": 15.84, "pct_cuda_time": 0.22327057278345813, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.84, "pct_cuda_time": 0.22327057278345813, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.897, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.267, "cuda_time_us": 134.303, "pct_cuda_time": 1.8930497308419683, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.092, "cuda_time_us": 81.535, "pct_cuda_time": 1.14926553989263, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.535, "pct_cuda_time": 1.14926553989263, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.702, "cuda_time_us": 8.96, "pct_cuda_time": 0.12629446541286524, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.12629446541286524, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.317, "cuda_time_us": 43.808, "pct_cuda_time": 0.6174897255364731, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.808, "pct_cuda_time": 0.6174897255364731, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2523.409, "cuda_time_us": 206.331, "pct_cuda_time": 2.908310640971193, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.093, "cuda_time_us": 3.232, "pct_cuda_time": 0.04555621788106924, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04555621788106924, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1832.756, "cuda_time_us": 65.245, "pct_cuda_time": 0.9196520531096418, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.05, "cuda_time_us": 21.92, "pct_cuda_time": 0.30897038859933096, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.92, "pct_cuda_time": 0.30897038859933096, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 521.1, "cuda_time_us": 3.839, "pct_cuda_time": 0.05411210409821312, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.839, "pct_cuda_time": 0.05411210409821312, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 833.269, "cuda_time_us": 23.294999999999998, "pct_cuda_time": 0.3283515147090061, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.2, "pct_cuda_time": 0.27063099731328255, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0211853327584304, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.509, "cuda_time_us": 16.191, "pct_cuda_time": 0.22821804570309157, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.191, "pct_cuda_time": 0.22821804570309157, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.578, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 440.553, "cuda_time_us": 134.68599999999998, "pct_cuda_time": 1.8984482554237903, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.539, "cuda_time_us": 81.151, "pct_cuda_time": 1.1438529199463643, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.151, "pct_cuda_time": 1.1438529199463643, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.612, "cuda_time_us": 8.8, "pct_cuda_time": 0.12403920710192119, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.12403920710192119, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.28, "cuda_time_us": 44.735, "pct_cuda_time": 0.630556128375505, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.735, "pct_cuda_time": 0.630556128375505, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2397.218, "cuda_time_us": 206.461, "pct_cuda_time": 2.9101430383488354, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.343, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.054, "cuda_time_us": 65.886, "pct_cuda_time": 0.9286871817178611, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.976, "cuda_time_us": 21.855, "pct_cuda_time": 0.3080541899105099, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.855, "pct_cuda_time": 0.3080541899105099, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.904, "cuda_time_us": 3.872, "pct_cuda_time": 0.05457725112484532, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.05457725112484532, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 707.831, "cuda_time_us": 23.391, "pct_cuda_time": 0.32970466969557255, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03563308131291554, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.391, "pct_cuda_time": 0.273323211921972, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.219, "cuda_time_us": 16.768, "pct_cuda_time": 0.23635107098693348, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.768, "pct_cuda_time": 0.23635107098693348, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.271, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 474.23, "cuda_time_us": 134.335, "pct_cuda_time": 1.8935007825041574, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.35, "cuda_time_us": 80.799, "pct_cuda_time": 1.1388913516622876, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.799, "pct_cuda_time": 1.1388913516622876, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.814, "cuda_time_us": 8.96, "pct_cuda_time": 0.12629446541286524, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.12629446541286524, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 164.862, "cuda_time_us": 44.576, "pct_cuda_time": 0.6283149654290044, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.576, "pct_cuda_time": 0.6283149654290044, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2381.257, "cuda_time_us": 206.94, "pct_cuda_time": 2.9168947179172235, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.527, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1697.908, "cuda_time_us": 65.34299999999999, "pct_cuda_time": 0.9210333988250949, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.123, "cuda_time_us": 21.759, "pct_cuda_time": 0.3067010349239435, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.759, "pct_cuda_time": 0.3067010349239435, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.513, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.013, "cuda_time_us": 23.264, "pct_cuda_time": 0.3279145584112607, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03563308131291554, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.456, "pct_cuda_time": 0.27423941061079304, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 178.879, "cuda_time_us": 16.576, "pct_cuda_time": 0.23364476101380063, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.576, "pct_cuda_time": 0.23364476101380063, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.368, "cuda_time_us": 3.231, "pct_cuda_time": 0.04554212251662583, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.04554212251662583, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.854, "cuda_time_us": 135.23000000000002, "pct_cuda_time": 1.9061161336810004, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.782, "cuda_time_us": 81.983, "pct_cuda_time": 1.1555802631632732, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.983, "pct_cuda_time": 1.1555802631632732, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.661, "cuda_time_us": 8.96, "pct_cuda_time": 0.12629446541286524, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.12629446541286524, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.87, "cuda_time_us": 44.287, "pct_cuda_time": 0.6242414051048617, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.6242414051048617, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2308.669, "cuda_time_us": 205.66200000000003, "pct_cuda_time": 2.898880842158559, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.44, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1658.021, "cuda_time_us": 64.51100000000001, "pct_cuda_time": 0.9093060556081863, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.245, "cuda_time_us": 21.536, "pct_cuda_time": 0.30355776865306533, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.536, "pct_cuda_time": 0.30355776865306533, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.884, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 712.111, "cuda_time_us": 23.103, "pct_cuda_time": 0.32564520473587333, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.199, "pct_cuda_time": 0.27061690194883925, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018493118149740976, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.035, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.129, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 426.704, "cuda_time_us": 134.815, "pct_cuda_time": 1.900266557436989, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.261, "cuda_time_us": 81.023, "pct_cuda_time": 1.142048713297609, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.023, "pct_cuda_time": 1.142048713297609, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.847, "cuda_time_us": 8.864, "pct_cuda_time": 0.1249413104262988, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1249413104262988, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.31, "cuda_time_us": 44.928, "pct_cuda_time": 0.6332765337130811, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.928, "pct_cuda_time": 0.6332765337130811, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2303.394, "cuda_time_us": 206.62099999999998, "pct_cuda_time": 2.912398296659779, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.686, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1641.541, "cuda_time_us": 65.02300000000001, "pct_cuda_time": 0.9165228822032072, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 170.647, "cuda_time_us": 21.728, "pct_cuda_time": 0.30626407862619814, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.728, "pct_cuda_time": 0.30626407862619814, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 476.975, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 679.874, "cuda_time_us": 23.551000000000002, "pct_cuda_time": 0.3319599280065166, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.591, "pct_cuda_time": 0.03652108927284975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.488, "pct_cuda_time": 0.27469046227298183, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.433, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.754, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 449.319, "cuda_time_us": 135.29399999999998, "pct_cuda_time": 1.9070182370053776, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.925, "cuda_time_us": 81.823, "pct_cuda_time": 1.1533250048523291, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.823, "pct_cuda_time": 1.1533250048523291, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.341, "cuda_time_us": 9.12, "pct_cuda_time": 0.12854972372380924, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.12854972372380924, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.048, "cuda_time_us": 44.351, "pct_cuda_time": 0.6251435084292394, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.351, "pct_cuda_time": 0.6251435084292394, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2405.213, "cuda_time_us": 205.43800000000002, "pct_cuda_time": 2.895723480523237, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.583, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1742.775, "cuda_time_us": 64.543, "pct_cuda_time": 0.909757107270375, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.48, "cuda_time_us": 21.568, "pct_cuda_time": 0.3040088203152541, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.568, "pct_cuda_time": 0.3040088203152541, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 514.465, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 747.123, "cuda_time_us": 23.295, "pct_cuda_time": 0.3283515147090062, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.035167934286283335, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.2724352039620378, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.308, "cuda_time_us": 15.904, "pct_cuda_time": 0.22417267610783573, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.904, "pct_cuda_time": 0.22417267610783573, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.877, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.028, "cuda_time_us": 134.687, "pct_cuda_time": 1.8984623507882339, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.227, "cuda_time_us": 81.503, "pct_cuda_time": 1.1488144882304412, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.503, "pct_cuda_time": 1.1488144882304412, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.134, "cuda_time_us": 8.928, "pct_cuda_time": 0.12584341375067642, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12584341375067642, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.592, "cuda_time_us": 44.256, "pct_cuda_time": 0.6238044488071163, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.256, "pct_cuda_time": 0.6238044488071163, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2417.627, "cuda_time_us": 204.957, "pct_cuda_time": 2.888943610225961, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.863, "cuda_time_us": 3.04, "pct_cuda_time": 0.042849907907936406, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.042849907907936406, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1731.62, "cuda_time_us": 64.702, "pct_cuda_time": 0.9119982702168754, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.069, "cuda_time_us": 21.408, "pct_cuda_time": 0.3017535620043101, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.408, "pct_cuda_time": 0.3017535620043101, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.571, "cuda_time_us": 3.84, "pct_cuda_time": 0.05412619946265652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05412619946265652, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 715.565, "cuda_time_us": 23.262, "pct_cuda_time": 0.32788636768237395, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.035167934286283335, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.487, "pct_cuda_time": 0.2746763669085384, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 176.785, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.174, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 456.184, "cuda_time_us": 134.143, "pct_cuda_time": 1.8907944725310242, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 139.266, "cuda_time_us": 80.607, "pct_cuda_time": 1.1361850416891548, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.607, "pct_cuda_time": 1.1361850416891548, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.165, "cuda_time_us": 8.896, "pct_cuda_time": 0.1253923620884876, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.1253923620884876, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 165.073, "cuda_time_us": 44.64, "pct_cuda_time": 0.629217068753382, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.64, "pct_cuda_time": 0.629217068753382, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2484.388, "cuda_time_us": 205.85199999999998, "pct_cuda_time": 2.901558961402804, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.274, "cuda_time_us": 3.167, "pct_cuda_time": 0.04464001919224823, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.167, "pct_cuda_time": 0.04464001919224823, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1791.633, "cuda_time_us": 65.695, "pct_cuda_time": 0.9259949671091717, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.436, "cuda_time_us": 22.112, "pct_cuda_time": 0.3116766985724638, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.112, "pct_cuda_time": 0.3116766985724638, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.118, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 829.495, "cuda_time_us": 23.423000000000002, "pct_cuda_time": 0.33015572135776133, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.816, "pct_cuda_time": 0.03969254627261478, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.327, "pct_cuda_time": 0.2724211085975944, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.838, "cuda_time_us": 16.416, "pct_cuda_time": 0.23138950270285663, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.416, "pct_cuda_time": 0.23138950270285663, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.184, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.99, "cuda_time_us": 133.886, "pct_cuda_time": 1.8871719638690703, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 151.69, "cuda_time_us": 81.215, "pct_cuda_time": 1.144755023270742, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.215, "pct_cuda_time": 1.144755023270742, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 109.508, "cuda_time_us": 9.088, "pct_cuda_time": 0.1280986720616204, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.1280986720616204, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.285, "cuda_time_us": 43.583, "pct_cuda_time": 0.614318268536708, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.583, "pct_cuda_time": 0.614318268536708, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2394.618, "cuda_time_us": 205.72500000000002, "pct_cuda_time": 2.8997688501184933, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.129, "cuda_time_us": 3.327, "pct_cuda_time": 0.046895277503192244, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.327, "pct_cuda_time": 0.046895277503192244, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1724.043, "cuda_time_us": 65.21600000000001, "pct_cuda_time": 0.9192432875407833, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.969, "cuda_time_us": 21.888, "pct_cuda_time": 0.3085193369371422, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.888, "pct_cuda_time": 0.3085193369371422, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 509.614, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 726.565, "cuda_time_us": 23.584, "pct_cuda_time": 0.3324250750331488, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.488, "pct_cuda_time": 0.27469046227298183, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0211994281228738, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.066, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.818, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 431.416, "cuda_time_us": 134.078, "pct_cuda_time": 1.889878273842203, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.564, "cuda_time_us": 81.919, "pct_cuda_time": 1.1546781598388955, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.919, "pct_cuda_time": 1.1546781598388955, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.419, "cuda_time_us": 8.736, "pct_cuda_time": 0.12313710377754358, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.736, "pct_cuda_time": 0.12313710377754358, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.234, "cuda_time_us": 43.423, "pct_cuda_time": 0.612063010225764, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.423, "pct_cuda_time": 0.612063010225764, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2455.627, "cuda_time_us": 206.494, "pct_cuda_time": 2.9106081853754673, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.078, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1754.155, "cuda_time_us": 65.951, "pct_cuda_time": 0.9296033804066822, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 165.384, "cuda_time_us": 22.752, "pct_cuda_time": 0.32069773181623984, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.752, "pct_cuda_time": 0.32069773181623984, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 502.684, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 746.15, "cuda_time_us": 23.423000000000002, "pct_cuda_time": 0.33015572135776133, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03608413297510434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.359, "pct_cuda_time": 0.2728721602597832, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0211994281228738, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.893, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 95.732, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.964, "cuda_time_us": 134.303, "pct_cuda_time": 1.8930497308419683, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 151.212, "cuda_time_us": 81.663, "pct_cuda_time": 1.1510697465413853, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.663, "pct_cuda_time": 1.1510697465413853, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.767, "cuda_time_us": 8.992, "pct_cuda_time": 0.12674551707505402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12674551707505402, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.222, "cuda_time_us": 43.648, "pct_cuda_time": 0.6152344672255291, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.648, "pct_cuda_time": 0.6152344672255291, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2327.841, "cuda_time_us": 207.805, "pct_cuda_time": 2.929087208160765, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.836, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1660.801, "cuda_time_us": 65.728, "pct_cuda_time": 0.9264601141358039, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.221, "cuda_time_us": 21.664, "pct_cuda_time": 0.3053619753018206, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.664, "pct_cuda_time": 0.3053619753018206, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 496.291, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 696.74, "cuda_time_us": 24.032, "pct_cuda_time": 0.33873979830379203, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03788833962385956, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.064, "pct_cuda_time": 0.2828093921923803, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.195, "cuda_time_us": 16.288, "pct_cuda_time": 0.2295852960541014, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.2295852960541014, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.248, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 437.902, "cuda_time_us": 135.741, "pct_cuda_time": 1.9133188649115778, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.749, "cuda_time_us": 82.111, "pct_cuda_time": 1.1573844698120286, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.111, "pct_cuda_time": 1.1573844698120286, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.354, "cuda_time_us": 9.119, "pct_cuda_time": 0.12853562835936583, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.119, "pct_cuda_time": 0.12853562835936583, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.707, "cuda_time_us": 44.511, "pct_cuda_time": 0.6273987667401835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.511, "pct_cuda_time": 0.6273987667401835, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2282.12, "cuda_time_us": 204.734, "pct_cuda_time": 2.8858003439550832, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.263, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1605.203, "cuda_time_us": 64.704, "pct_cuda_time": 0.9120264609457622, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.517, "cuda_time_us": 21.759, "pct_cuda_time": 0.3067010349239435, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.759, "pct_cuda_time": 0.3067010349239435, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 454.403, "cuda_time_us": 3.743, "pct_cuda_time": 0.0527589491116467, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.743, "pct_cuda_time": 0.0527589491116467, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 698.367, "cuda_time_us": 23.201999999999998, "pct_cuda_time": 0.32704064581576986, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.593, "pct_cuda_time": 0.036549280001736545, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.264, "pct_cuda_time": 0.27153310063766023, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.345, "pct_cuda_time": 0.018958265176373182, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.885, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.334, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.427, "cuda_time_us": 133.854, "pct_cuda_time": 1.886720912206882, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.55, "cuda_time_us": 81.247, "pct_cuda_time": 1.1452060749329307, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.247, "pct_cuda_time": 1.1452060749329307, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 122.153, "cuda_time_us": 8.736, "pct_cuda_time": 0.12313710377754358, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.736, "pct_cuda_time": 0.12313710377754358, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.486, "cuda_time_us": 43.871, "pct_cuda_time": 0.6183777334964073, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.871, "pct_cuda_time": 0.6183777334964073, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2425.199, "cuda_time_us": 206.04700000000003, "pct_cuda_time": 2.904307557469268, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.31, "cuda_time_us": 3.456, "pct_cuda_time": 0.04871357951639087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.456, "pct_cuda_time": 0.04871357951639087, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1718.23, "cuda_time_us": 65.05600000000001, "pct_cuda_time": 0.9169880292298394, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.228, "cuda_time_us": 21.728, "pct_cuda_time": 0.30626407862619814, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.728, "pct_cuda_time": 0.30626407862619814, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 478.454, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 749.163, "cuda_time_us": 23.488, "pct_cuda_time": 0.3310719200465823, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.27333730728641537, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0211994281228738, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.809, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.857, "cuda_time_us": 3.105, "pct_cuda_time": 0.04376610659675742, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04376610659675742, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.446, "cuda_time_us": 134.43, "pct_cuda_time": 1.89483984212628, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.794, "cuda_time_us": 80.927, "pct_cuda_time": 1.1406955583110427, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.927, "pct_cuda_time": 1.1406955583110427, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.957, "cuda_time_us": 8.864, "pct_cuda_time": 0.1249413104262988, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1249413104262988, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.904, "cuda_time_us": 44.639, "pct_cuda_time": 0.6292029733889386, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.639, "pct_cuda_time": 0.6292029733889386, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2397.5, "cuda_time_us": 206.113, "pct_cuda_time": 2.905237851522532, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.451, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1694.461, "cuda_time_us": 65.536, "pct_cuda_time": 0.9237538041626713, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.747, "cuda_time_us": 21.856, "pct_cuda_time": 0.3080682852749534, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.856, "pct_cuda_time": 0.3080682852749534, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 470.053, "cuda_time_us": 3.68, "pct_cuda_time": 0.051870941151712494, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.051870941151712494, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 743.06, "cuda_time_us": 23.904, "pct_cuda_time": 0.33693559165503684, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.625, "pct_cuda_time": 0.03700033166392536, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.647, "pct_cuda_time": 0.27693162521948245, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.023003634771629016, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.105, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.096, "pct_cuda_time": 0.22687898608096854, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.921, "cuda_time_us": 3.105, "pct_cuda_time": 0.04376610659675742, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04376610659675742, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.757, "cuda_time_us": 134.272, "pct_cuda_time": 1.8926127745442227, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.896, "cuda_time_us": 81.567, "pct_cuda_time": 1.1497165915548186, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.567, "pct_cuda_time": 1.1497165915548186, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.952, "cuda_time_us": 9.024, "pct_cuda_time": 0.1271965687372428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1271965687372428, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.937, "cuda_time_us": 43.681, "pct_cuda_time": 0.6156996142521612, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.681, "pct_cuda_time": 0.6156996142521612, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2519.329, "cuda_time_us": 205.852, "pct_cuda_time": 2.9015589614028046, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.602, "cuda_time_us": 3.04, "pct_cuda_time": 0.042849907907936406, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.042849907907936406, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.162, "cuda_time_us": 65.374, "pct_cuda_time": 0.9214703551228403, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.243, "cuda_time_us": 21.471, "pct_cuda_time": 0.3026415699642443, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.471, "pct_cuda_time": 0.3026415699642443, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 460.751, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 749.847, "cuda_time_us": 23.903, "pct_cuda_time": 0.33692149629059337, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.752, "pct_cuda_time": 0.038790442948237164, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.871, "pct_cuda_time": 0.2800889868548041, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 186.19, "cuda_time_us": 16.256, "pct_cuda_time": 0.2291342443919126, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.256, "pct_cuda_time": 0.2291342443919126, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.285, "cuda_time_us": 3.135, "pct_cuda_time": 0.04418896753005942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.04418896753005942, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 580.388, "cuda_time_us": 134.303, "pct_cuda_time": 1.8930497308419683, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.867, "cuda_time_us": 82.015, "pct_cuda_time": 1.1560313148254622, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.015, "pct_cuda_time": 1.1560313148254622, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.254, "cuda_time_us": 8.833, "pct_cuda_time": 0.12450435412855339, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.833, "pct_cuda_time": 0.12450435412855339, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 268.892, "cuda_time_us": 43.455, "pct_cuda_time": 0.6125140618879529, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.455, "pct_cuda_time": 0.6125140618879529, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2427.162, "cuda_time_us": 205.98200000000003, "pct_cuda_time": 2.903391358780447, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.944, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1755.005, "cuda_time_us": 65.152, "pct_cuda_time": 0.9183411842164056, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 166.893, "cuda_time_us": 21.888, "pct_cuda_time": 0.3085193369371422, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.888, "pct_cuda_time": 0.3085193369371422, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 525.01, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.827, "cuda_time_us": 23.264, "pct_cuda_time": 0.3279145584112607, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.27333730728641537, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.983, "cuda_time_us": 16.256, "pct_cuda_time": 0.2291342443919126, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.256, "pct_cuda_time": 0.2291342443919126, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.598, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.184, "cuda_time_us": 134.622, "pct_cuda_time": 1.8975461520994132, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.902, "cuda_time_us": 81.727, "pct_cuda_time": 1.151971849865763, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.727, "pct_cuda_time": 1.151971849865763, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.969, "cuda_time_us": 8.992, "pct_cuda_time": 0.12674551707505402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12674551707505402, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.632, "cuda_time_us": 43.903, "pct_cuda_time": 0.6188287851585961, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.903, "pct_cuda_time": 0.6188287851585961, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2328.107, "cuda_time_us": 205.661, "pct_cuda_time": 2.898866746794115, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.652, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1629.898, "cuda_time_us": 64.703, "pct_cuda_time": 0.9120123655813189, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.356, "cuda_time_us": 21.76, "pct_cuda_time": 0.306715130288387, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.76, "pct_cuda_time": 0.306715130288387, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.466, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05141988948952369, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 680.634, "cuda_time_us": 23.263, "pct_cuda_time": 0.32790046304681736, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03608413297510434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.199, "pct_cuda_time": 0.27061690194883925, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0211994281228738, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.753, "cuda_time_us": 16.032, "pct_cuda_time": 0.22597688275659095, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.032, "pct_cuda_time": 0.22597688275659095, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.626, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.415, "cuda_time_us": 134.68599999999998, "pct_cuda_time": 1.8984482554237903, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.312, "cuda_time_us": 81.118, "pct_cuda_time": 1.1433877729197321, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.118, "pct_cuda_time": 1.1433877729197321, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.905, "cuda_time_us": 8.928, "pct_cuda_time": 0.12584341375067642, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12584341375067642, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.138, "cuda_time_us": 44.64, "pct_cuda_time": 0.629217068753382, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.64, "pct_cuda_time": 0.629217068753382, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2334.55, "cuda_time_us": 207.482, "pct_cuda_time": 2.924534405445547, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.702, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1661.698, "cuda_time_us": 65.853, "pct_cuda_time": 0.928222034691229, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.812, "cuda_time_us": 21.503, "pct_cuda_time": 0.3030926216264331, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.503, "pct_cuda_time": 0.3030926216264331, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 467.529, "cuda_time_us": 3.712, "pct_cuda_time": 0.052321992813901305, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.052321992813901305, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 711.748, "cuda_time_us": 23.775, "pct_cuda_time": 0.3351172896418382, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03563308131291554, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.775, "pct_cuda_time": 0.27873583186823764, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.675, "cuda_time_us": 16.863, "pct_cuda_time": 0.23769013060905647, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.863, "pct_cuda_time": 0.23769013060905647, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.05, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 445.323, "cuda_time_us": 135.357, "pct_cuda_time": 1.9079062449653121, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 149.314, "cuda_time_us": 81.566, "pct_cuda_time": 1.1497024961903755, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.566, "pct_cuda_time": 1.1497024961903755, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.432, "cuda_time_us": 9.28, "pct_cuda_time": 0.13080498203475324, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.28, "pct_cuda_time": 0.13080498203475324, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.944, "cuda_time_us": 44.511, "pct_cuda_time": 0.6273987667401835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.511, "pct_cuda_time": 0.6273987667401835, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2389.595, "cuda_time_us": 205.565, "pct_cuda_time": 2.8975135918075487, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.334, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1724.523, "cuda_time_us": 64.543, "pct_cuda_time": 0.909757107270375, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.91, "cuda_time_us": 21.504, "pct_cuda_time": 0.3031067169908765, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.504, "pct_cuda_time": 0.3031067169908765, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 527.113, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.128, "cuda_time_us": 23.071, "pct_cuda_time": 0.32519415307368454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.035167934286283335, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.296, "pct_cuda_time": 0.27198415229984896, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01804206648755217, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 180.755, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.228232141067535, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.929, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 437.664, "cuda_time_us": 134.75, "pct_cuda_time": 1.899350358748168, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.86, "cuda_time_us": 81.183, "pct_cuda_time": 1.1443039716085532, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.183, "pct_cuda_time": 1.1443039716085532, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.824, "cuda_time_us": 9.184, "pct_cuda_time": 0.1294518270481868, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.1294518270481868, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.006, "cuda_time_us": 44.383, "pct_cuda_time": 0.6255945600914282, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.383, "pct_cuda_time": 0.6255945600914282, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2264.581, "cuda_time_us": 205.24599999999998, "pct_cuda_time": 2.8930171705501038, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.856, "cuda_time_us": 3.137, "pct_cuda_time": 0.044217158258946225, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.044217158258946225, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1614.45, "cuda_time_us": 64.542, "pct_cuda_time": 0.9097430119059314, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.986, "cuda_time_us": 21.823, "pct_cuda_time": 0.30760313824832114, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.823, "pct_cuda_time": 0.30760313824832114, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.254, "cuda_time_us": 3.712, "pct_cuda_time": 0.052321992813901305, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.052321992813901305, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 671.706, "cuda_time_us": 23.104, "pct_cuda_time": 0.3256593001003167, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.2, "pct_cuda_time": 0.27063099731328255, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018493118149740976, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.99, "cuda_time_us": 15.903, "pct_cuda_time": 0.22415858074339234, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.903, "pct_cuda_time": 0.22415858074339234, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.065, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 428.356, "cuda_time_us": 134.463, "pct_cuda_time": 1.8953049891529121, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 140.075, "cuda_time_us": 81.375, "pct_cuda_time": 1.147010281581686, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.375, "pct_cuda_time": 1.147010281581686, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.288, "cuda_time_us": 8.896, "pct_cuda_time": 0.1253923620884876, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.1253923620884876, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.333, "cuda_time_us": 44.192, "pct_cuda_time": 0.6229023454827388, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.192, "pct_cuda_time": 0.6229023454827388, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2392.489, "cuda_time_us": 206.94400000000002, "pct_cuda_time": 2.9169510993749976, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.619, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.456, "cuda_time_us": 65.792, "pct_cuda_time": 0.9273622174601817, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.336, "cuda_time_us": 22.816, "pct_cuda_time": 0.32159983514061746, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.816, "pct_cuda_time": 0.32159983514061746, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.304, "cuda_time_us": 3.872, "pct_cuda_time": 0.05457725112484532, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.05457725112484532, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.083, "cuda_time_us": 23.169, "pct_cuda_time": 0.3265754987891377, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.593, "pct_cuda_time": 0.036549280001736545, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.104, "pct_cuda_time": 0.26927784232671614, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 196.043, "cuda_time_us": 15.935, "pct_cuda_time": 0.22460963240558116, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.935, "pct_cuda_time": 0.22460963240558116, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.166, "cuda_time_us": 3.456, "pct_cuda_time": 0.04871357951639087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.456, "pct_cuda_time": 0.04871357951639087, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.808, "cuda_time_us": 134.496, "pct_cuda_time": 1.8957701361795445, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 150.522, "cuda_time_us": 80.768, "pct_cuda_time": 1.138454395364542, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.768, "pct_cuda_time": 1.138454395364542, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.638, "cuda_time_us": 9.152, "pct_cuda_time": 0.12900077538599802, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.12900077538599802, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.561, "cuda_time_us": 44.576, "pct_cuda_time": 0.6283149654290044, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.576, "pct_cuda_time": 0.6283149654290044, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2332.154, "cuda_time_us": 206.332, "pct_cuda_time": 2.9083247363356364, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.594, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1627.95, "cuda_time_us": 64.765, "pct_cuda_time": 0.9128862781768098, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.823, "cuda_time_us": 21.567, "pct_cuda_time": 0.3039947249508107, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.567, "pct_cuda_time": 0.3039947249508107, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.262, "cuda_time_us": 3.711, "pct_cuda_time": 0.052307897449457894, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.711, "pct_cuda_time": 0.052307897449457894, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 683.721, "cuda_time_us": 23.487000000000002, "pct_cuda_time": 0.331057824682139, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.035167934286283335, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.52, "pct_cuda_time": 0.2751415139351706, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 176.547, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.11, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 438.2, "cuda_time_us": 135.295, "pct_cuda_time": 1.907032332369821, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.435, "cuda_time_us": 82.335, "pct_cuda_time": 1.1605418314473501, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.335, "pct_cuda_time": 1.1605418314473501, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.378, "cuda_time_us": 9.024, "pct_cuda_time": 0.1271965687372428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1271965687372428, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.52, "cuda_time_us": 43.936, "pct_cuda_time": 0.6192939321852283, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.936, "pct_cuda_time": 0.6192939321852283, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2380.123, "cuda_time_us": 205.49899999999997, "pct_cuda_time": 2.896583297754284, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.158, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1731.144, "cuda_time_us": 65.598, "pct_cuda_time": 0.9246277167581619, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.667, "cuda_time_us": 21.823, "pct_cuda_time": 0.30760313824832114, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.823, "pct_cuda_time": 0.30760313824832114, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 472.62, "cuda_time_us": 3.84, "pct_cuda_time": 0.05412619946265652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05412619946265652, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 804.131, "cuda_time_us": 23.615, "pct_cuda_time": 0.33286203133089415, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03788833962385956, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.519, "pct_cuda_time": 0.2751274185707272, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.01984627313630739, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.45, "cuda_time_us": 16.32, "pct_cuda_time": 0.23003634771629022, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.32, "pct_cuda_time": 0.23003634771629022, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.923, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04420306289450282, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 427.207, "cuda_time_us": 133.69299999999998, "pct_cuda_time": 1.8844515585314938, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 138.478, "cuda_time_us": 80.511, "pct_cuda_time": 1.1348318867025882, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.511, "pct_cuda_time": 1.1348318867025882, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.853, "cuda_time_us": 8.895, "pct_cuda_time": 0.12537826672404417, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.895, "pct_cuda_time": 0.12537826672404417, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.312, "cuda_time_us": 44.287, "pct_cuda_time": 0.6242414051048617, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.6242414051048617, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2371.841, "cuda_time_us": 205.69299999999998, "pct_cuda_time": 2.8993177984563037, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.802, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04330095957012522, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.688, "cuda_time_us": 64.95899999999999, "pct_cuda_time": 0.9156207788788291, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.736, "cuda_time_us": 21.663, "pct_cuda_time": 0.3053478799373771, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.663, "pct_cuda_time": 0.3053478799373771, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 515.197, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05277304447609011, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 726.409, "cuda_time_us": 23.391999999999996, "pct_cuda_time": 0.3297187650600159, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.036535184637293154, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.487, "pct_cuda_time": 0.2746763669085384, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.018507213514184374, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.63, "cuda_time_us": 16.16, "pct_cuda_time": 0.2277810894053462, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.2277810894053462, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.877, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 440.82, "cuda_time_us": 134.462, "pct_cuda_time": 1.8952908937884687, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 140.55, "cuda_time_us": 81.311, "pct_cuda_time": 1.1461081782573084, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.311, "pct_cuda_time": 1.1461081782573084, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.141, "cuda_time_us": 9.119, "pct_cuda_time": 0.12853562835936583, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.119, "pct_cuda_time": 0.12853562835936583, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.03, "cuda_time_us": 44.032, "pct_cuda_time": 0.6206470871717946, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.032, "pct_cuda_time": 0.6206470871717946, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2426.896, "cuda_time_us": 205.24699999999999, "pct_cuda_time": 2.8930312659145474, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.008, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04510516621888044, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1742.453, "cuda_time_us": 64.993, "pct_cuda_time": 0.9161000212699049, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.828, "cuda_time_us": 21.856, "pct_cuda_time": 0.3080682852749534, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.856, "pct_cuda_time": 0.3080682852749534, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 499.136, "cuda_time_us": 3.808, "pct_cuda_time": 0.05367514780046771, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05367514780046771, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.609, "cuda_time_us": 23.329, "pct_cuda_time": 0.32883075710008175, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.561, "pct_cuda_time": 0.03609822833954774, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.264, "pct_cuda_time": 0.27153310063766023, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0211994281228738, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 248.133, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.22552583109440213, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.743, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.272, "cuda_time_us": 133.95, "pct_cuda_time": 1.888074067193448, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.858, "cuda_time_us": 81.407, "pct_cuda_time": 1.1474613332438746, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.407, "pct_cuda_time": 1.1474613332438746, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.605, "cuda_time_us": 8.799, "pct_cuda_time": 0.12402511173747778, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.799, "pct_cuda_time": 0.12402511173747778, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.707, "cuda_time_us": 43.744, "pct_cuda_time": 0.6165876222120955, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.744, "pct_cuda_time": 0.6165876222120955, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2390.317, "cuda_time_us": 206.68400000000003, "pct_cuda_time": 2.9132863046197137, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.7, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.044654114556691625, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1724.9, "cuda_time_us": 65.28, "pct_cuda_time": 0.9201453908651609, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.588, "cuda_time_us": 22.208, "pct_cuda_time": 0.3130298535590302, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.208, "pct_cuda_time": 0.3130298535590302, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 522.689, "cuda_time_us": 3.68, "pct_cuda_time": 0.051870941151712494, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.051870941151712494, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 718.815, "cuda_time_us": 23.52, "pct_cuda_time": 0.33152297170877115, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.03518202965072673, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.552, "pct_cuda_time": 0.27559256559735945, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.020748376460685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.406, "cuda_time_us": 15.872, "pct_cuda_time": 0.22372162444564692, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.872, "pct_cuda_time": 0.22372162444564692, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.077, "cuda_time_us": 3.071, "pct_cuda_time": 0.04328686420568182, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.04328686420568182, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 425.948, "cuda_time_us": 135.16500000000002, "pct_cuda_time": 1.9051999349921795, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 140.368, "cuda_time_us": 80.991, "pct_cuda_time": 1.1415976616354204, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.991, "pct_cuda_time": 1.1415976616354204, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.157, "cuda_time_us": 8.927, "pct_cuda_time": 0.125829318386233, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.927, "pct_cuda_time": 0.125829318386233, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.783, "cuda_time_us": 45.247, "pct_cuda_time": 0.6377729549705259, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 45.247, "pct_cuda_time": 0.6377729549705259, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2323.915, "cuda_time_us": 205.279, "pct_cuda_time": 2.893482317576736, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.524, "cuda_time_us": 3.137, "pct_cuda_time": 0.044217158258946225, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.044217158258946225, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1657.884, "cuda_time_us": 64.928, "pct_cuda_time": 0.915183822581084, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.97, "cuda_time_us": 21.696, "pct_cuda_time": 0.3058130269640093, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.696, "pct_cuda_time": 0.3058130269640093, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.901, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.0532240961382789, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 676.022, "cuda_time_us": 23.328, "pct_cuda_time": 0.32881666173563834, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03788833962385956, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.2724352039620378, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018493118149740976, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.313, "cuda_time_us": 16.128, "pct_cuda_time": 0.22733003774315735, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.22733003774315735, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.025, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04375201123231402, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 437.945, "cuda_time_us": 134.11, "pct_cuda_time": 1.8903293255043923, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.131, "cuda_time_us": 82.079, "pct_cuda_time": 1.1569334181498394, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.079, "pct_cuda_time": 1.1569334181498394, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.572, "cuda_time_us": 8.832, "pct_cuda_time": 0.12449025876411, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.12449025876411, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.209, "cuda_time_us": 43.199, "pct_cuda_time": 0.6089056485904424, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.199, "pct_cuda_time": 0.6089056485904424, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.873, "cuda_time_us": 3.073, "pct_cuda_time": 0.04331505493456861, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.073, "pct_cuda_time": 0.04331505493456861, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 487.119, "cuda_time_us": 352.82599999999996, "pct_cuda_time": 4.973211055107096, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 8.191, "pct_cuda_time": 0.11545513015589053, "trace": "index_select(bfloat16[14, 4096], 0, int64[14])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.010360092865899099, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 343.9, "pct_cuda_time": 4.847395832085306, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3617.83, "cuda_time_us": 129.5, "pct_cuda_time": 1.8253496954203177, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0103741882303425, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0103741882303425, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.01127629155472011, "trace": "copy_(int32[14], int32[14], True) <- _to_copy(int32[14], 3, 0, None, None, True, None) <- to(int32[14], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010825239892531304, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010825239892531304, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.010811144528087903, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010825239892531304, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 7.136, "pct_cuda_time": 0.10058452066810336, "trace": "copy_(float32[14, 128256], bfloat16[14, 128256], False) <- _to_copy(bfloat16[14, 128256], 6, None, None, None, False, None) <- to(bfloat16[14, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 9.856, "pct_cuda_time": 0.13892391195415174, "trace": "div_(float32[14, 128256], bfloat16[14, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 35.327, "pct_cuda_time": 0.4979469396919965, "trace": "_softmax(float32[14, 128256], -1, False) <- softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.447, "pct_cuda_time": 0.4009708323214036, "trace": "_log_softmax(float32[14, 128256], -1, False) <- log_softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.792, "pct_cuda_time": 0.02525889308257304, "trace": "copy_(int64[14], int32[14], False) <- _to_copy(int32[14], 4, None, None, None, False, None) <- to(int32[14], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 10.4, "pct_cuda_time": 0.1465917902113614, "trace": "index(float32[14, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 28.287, "pct_cuda_time": 0.3987155740104596, "trace": "argmax(float32[14, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.04104570125918119, "trace": "copy_(int64[14], int64[14], False) <- _to_copy(int64[14], 4, 0, None, None, False, None) <- to(int64[14], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }