{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 2, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 8591.814999999999, "pct_cuda_time": 94.83509574555214, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 8577.542, "pct_cuda_time": 94.67755262787838, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 282.939, "pct_cuda_time": 3.123035954003989, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 278.363, "pct_cuda_time": 3.072526789394224, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 2196.9249999999997, "pct_cuda_time": 24.24931085233995, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 899.5369999999999, "pct_cuda_time": 9.928947204015302, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 899.5369999999999, "pct_cuda_time": 9.928947204015302, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 150.01499999999993, "pct_cuda_time": 1.6558418550991845, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 150.01499999999993, "pct_cuda_time": 1.6558418550991845, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 453.048, "pct_cuda_time": 5.000672204572714, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 93.75700000000002, "pct_cuda_time": 1.0348749445624394, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 314.332, "pct_cuda_time": 3.4695469252877182, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 44.95900000000001, "pct_cuda_time": 0.49625033472255636, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 694.3249999999999, "pct_cuda_time": 7.663849588652746, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 694.3249999999999, "pct_cuda_time": 7.663849588652746, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 6097.678000000001, "pct_cuda_time": 67.30520582153446, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 3344.592, "pct_cuda_time": 36.91707777108885, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.646000000000008, "pct_cuda_time": 0.26100080995683994, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 3320.945999999999, "pct_cuda_time": 36.656076961132, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 369.34, "pct_cuda_time": 4.076716533428877, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 369.34, "pct_cuda_time": 4.076716533428877, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 2383.7459999999996, "pct_cuda_time": 26.311411517016708, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2383.7459999999996, "pct_cuda_time": 26.311411517016708, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 352.538, "pct_cuda_time": 3.891258713548356, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 3.103, "pct_cuda_time": 0.03425042346680514, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 348.699, "pct_cuda_time": 3.8488844384310292, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 115.389, "pct_cuda_time": 1.273645540899509, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.471, "pct_cuda_time": 0.06038803312500512, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.32, "pct_cuda_time": 0.04768347707914862, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 4.672, "pct_cuda_time": 0.051568797433745914, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 34.655, "pct_cuda_time": 0.38251641161525357, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.0, "pct_cuda_time": 0.30905957366114845, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.92, "pct_cuda_time": 0.02119265647962161, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 4.895, "pct_cuda_time": 0.05403023618111862, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 28.448, "pct_cuda_time": 0.31400452683972685, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 3.008, "pct_cuda_time": 0.03320182848474052, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 88537.237, "cuda_time_us": 8591.814999999999, "pct_cuda_time": 94.83509574555214, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 322.671, "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "trace": "index_select(bfloat16[128256, 4096], 0, int64[256]) <- embedding(bfloat16[128256, 4096], int64[256], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3853.908, "cuda_time_us": 269.054, "pct_cuda_time": 2.969775518993808, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 219.303, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "_C::rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2861.657, "cuda_time_us": 70.07999999999998, "pct_cuda_time": 0.7735319615061885, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 384.021, "cuda_time_us": 29.183, "pct_cuda_time": 0.322117340648332, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 29.183, "pct_cuda_time": 0.322117340648332, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 913.69, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1043.087, "cuda_time_us": 14.208999999999998, "pct_cuda_time": 0.15683669579111634, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.0328486175434135, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.953, "pct_cuda_time": 0.10985964059462182, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 275.726, "cuda_time_us": 22.08, "pct_cuda_time": 0.24371554951564847, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.08, "pct_cuda_time": 0.24371554951564847, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 112.681, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 549.475, "cuda_time_us": 189.95, "pct_cuda_time": 2.096638072033398, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 196.953, "cuda_time_us": 104.22200000000001, "pct_cuda_time": 1.1503859602182935, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.486, "pct_cuda_time": 1.1422621085677718, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 129.921, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.448, "cuda_time_us": 74.24, "pct_cuda_time": 0.8194493838787021, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.24, "pct_cuda_time": 0.8194493838787021, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2639.286, "cuda_time_us": 267.03599999999994, "pct_cuda_time": 2.947501154006372, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.274, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1898.196, "cuda_time_us": 68.064, "pct_cuda_time": 0.751279672202586, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.327, "cuda_time_us": 27.648, "pct_cuda_time": 0.3051742533065511, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.648, "pct_cuda_time": 0.3051742533065511, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 549.436, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 846.731, "cuda_time_us": 14.368999999999998, "pct_cuda_time": 0.1586027504977515, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.944, "pct_cuda_time": 0.03249540660208647, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.921, "pct_cuda_time": 0.10950642965329477, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.01660091424237026, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 207.494, "cuda_time_us": 21.439, "pct_cuda_time": 0.23664029284719149, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.439, "pct_cuda_time": 0.23664029284719149, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.298, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 494.245, "cuda_time_us": 190.14, "pct_cuda_time": 2.0987352619975272, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.281, "cuda_time_us": 104.478, "pct_cuda_time": 1.1532116477489096, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.008112813808605146, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.743, "pct_cuda_time": 1.1450988339403045, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 126.848, "cuda_time_us": 11.167, "pct_cuda_time": 0.1232595806812159, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.167, "pct_cuda_time": 0.1232595806812159, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.268, "cuda_time_us": 74.495, "pct_cuda_time": 0.822264033567402, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.495, "pct_cuda_time": 0.822264033567402, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2531.981, "cuda_time_us": 268.092, "pct_cuda_time": 2.9591571150701648, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.852, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1766.358, "cuda_time_us": 68.734, "pct_cuda_time": 0.7586750262866206, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.743, "cuda_time_us": 28.127, "pct_cuda_time": 0.31046137958454006, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.127, "pct_cuda_time": 0.31046137958454006, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 529.216, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 762.567, "cuda_time_us": 14.046999999999999, "pct_cuda_time": 0.1550485654006483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.031435773778105386, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.759, "pct_cuda_time": 0.10771829926282672, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.015894492359716204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.351, "cuda_time_us": 21.952, "pct_cuda_time": 0.24230270575034044, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.952, "pct_cuda_time": 0.24230270575034044, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.846, "cuda_time_us": 4.351, "pct_cuda_time": 0.04802565017855918, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.351, "pct_cuda_time": 0.04802565017855918, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 491.141, "cuda_time_us": 190.591, "pct_cuda_time": 2.1037133287018555, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.066, "cuda_time_us": 104.607, "pct_cuda_time": 1.1546355293561341, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.871, "pct_cuda_time": 1.1465116777056126, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 128.04, "cuda_time_us": 11.489, "pct_cuda_time": 0.1268137657783191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.489, "pct_cuda_time": 0.1268137657783191, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.798, "cuda_time_us": 74.495, "pct_cuda_time": 0.822264033567402, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.495, "pct_cuda_time": 0.822264033567402, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2378.245, "cuda_time_us": 266.78099999999995, "pct_cuda_time": 2.9446865043176724, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.891, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1718.688, "cuda_time_us": 67.967, "pct_cuda_time": 0.7502090015366885, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.936, "cuda_time_us": 27.647, "pct_cuda_time": 0.3051632154646347, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.647, "pct_cuda_time": 0.3051632154646347, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.303, "cuda_time_us": 4.704, "pct_cuda_time": 0.05192200837507294, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.704, "pct_cuda_time": 0.05192200837507294, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.555, "cuda_time_us": 13.984, "pct_cuda_time": 0.15435318135991072, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.033908250367394574, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.632, "pct_cuda_time": 0.10631649333943506, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 186.182, "cuda_time_us": 21.632, "pct_cuda_time": 0.23877059633707012, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.632, "pct_cuda_time": 0.23877059633707012, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.849, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.228, "cuda_time_us": 190.07799999999997, "pct_cuda_time": 2.0980509157987064, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.584, "cuda_time_us": 104.704, "pct_cuda_time": 1.1557062000220317, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.008134889492438086, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.967, "pct_cuda_time": 1.1475713105295937, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.745, "cuda_time_us": 11.52, "pct_cuda_time": 0.12715593887772964, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.52, "pct_cuda_time": 0.12715593887772964, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.537, "cuda_time_us": 73.854, "pct_cuda_time": 0.815188776898945, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 73.854, "pct_cuda_time": 0.815188776898945, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2325.648, "cuda_time_us": 266.906, "pct_cuda_time": 2.946066234557232, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.962, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1653.872, "cuda_time_us": 67.934, "pct_cuda_time": 0.749844752753445, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.632, "cuda_time_us": 27.647, "pct_cuda_time": 0.3051632154646347, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.647, "pct_cuda_time": 0.3051632154646347, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 478.828, "cuda_time_us": 4.544, "pct_cuda_time": 0.0501559536684378, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.544, "pct_cuda_time": 0.0501559536684378, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.527, "cuda_time_us": 14.208, "pct_cuda_time": 0.15682565794919992, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.11055502463535938, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.701, "cuda_time_us": 21.535, "pct_cuda_time": 0.2376999256711726, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.535, "pct_cuda_time": 0.2376999256711726, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.573, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 443.998, "cuda_time_us": 190.236, "pct_cuda_time": 2.0997948948215086, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.876, "cuda_time_us": 104.51, "pct_cuda_time": 1.1535648586902367, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.008112813808605146, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.775, "pct_cuda_time": 1.1454520448816314, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.781, "cuda_time_us": 11.584, "pct_cuda_time": 0.1278623607603837, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.584, "pct_cuda_time": 0.1278623607603837, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.47, "cuda_time_us": 74.142, "pct_cuda_time": 0.8183676753708881, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.142, "pct_cuda_time": 0.8183676753708881, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2708.177, "cuda_time_us": 267.323, "pct_cuda_time": 2.9506690146363996, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.678, "cuda_time_us": 4.417, "pct_cuda_time": 0.04875414774504617, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.417, "pct_cuda_time": 0.04875414774504617, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1994.928, "cuda_time_us": 69.118, "pct_cuda_time": 0.762913557582545, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.97, "cuda_time_us": 28.159, "pct_cuda_time": 0.31081459052586713, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.159, "pct_cuda_time": 0.31081459052586713, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 735.084, "cuda_time_us": 4.864, "pct_cuda_time": 0.05368806308170807, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.864, "pct_cuda_time": 0.05368806308170807, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 757.031, "cuda_time_us": 14.431, "pct_cuda_time": 0.15928709669657262, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.951, "pct_cuda_time": 0.10983756491078887, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.017307336125024313, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.282, "cuda_time_us": 21.664, "pct_cuda_time": 0.23912380727839716, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.664, "pct_cuda_time": 0.23912380727839716, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.485, "cuda_time_us": 4.479, "pct_cuda_time": 0.04943849394386728, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.479, "pct_cuda_time": 0.04943849394386728, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.457, "cuda_time_us": 189.309, "pct_cuda_time": 2.0895628153649413, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 179.031, "cuda_time_us": 103.03800000000001, "pct_cuda_time": 1.1373171553891936, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 102.302, "pct_cuda_time": 1.1291933037386719, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.379, "cuda_time_us": 11.552, "pct_cuda_time": 0.12750914981905667, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.552, "pct_cuda_time": 0.12750914981905667, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.433, "cuda_time_us": 74.719, "pct_cuda_time": 0.824736510156691, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.719, "pct_cuda_time": 0.824736510156691, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2387.9, "cuda_time_us": 269.18100000000004, "pct_cuda_time": 2.9711773249172007, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.753, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1696.272, "cuda_time_us": 69.056, "pct_cuda_time": 0.7622292113837238, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 163.855, "cuda_time_us": 27.968, "pct_cuda_time": 0.30870636271982144, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.968, "pct_cuda_time": 0.30870636271982144, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 481.483, "cuda_time_us": 4.705, "pct_cuda_time": 0.05193304621698941, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.705, "pct_cuda_time": 0.05193304621698941, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 730.468, "cuda_time_us": 14.078999999999999, "pct_cuda_time": 0.15540177634197533, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.847, "pct_cuda_time": 0.03142473593618892, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.728, "pct_cuda_time": 0.10737612616341614, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.01660091424237026, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.808, "cuda_time_us": 22.304, "pct_cuda_time": 0.24618802610493767, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.304, "pct_cuda_time": 0.24618802610493767, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.301, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 451.374, "cuda_time_us": 191.16500000000002, "pct_cuda_time": 2.110049049961909, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.892, "cuda_time_us": 103.998, "pct_cuda_time": 1.1479134836290044, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.262, "pct_cuda_time": 1.1397896319784826, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.335, "cuda_time_us": 11.584, "pct_cuda_time": 0.1278623607603837, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.584, "pct_cuda_time": 0.1278623607603837, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.806, "cuda_time_us": 75.583, "pct_cuda_time": 0.8342732055725208, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 75.583, "pct_cuda_time": 0.8342732055725208, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2397.785, "cuda_time_us": 268.283, "pct_cuda_time": 2.9612653428762106, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.786, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.192, "cuda_time_us": 68.862, "pct_cuda_time": 0.7600878700519287, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.72, "cuda_time_us": 28.735, "pct_cuda_time": 0.3171723874697536, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.735, "pct_cuda_time": 0.3171723874697536, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 503.282, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 747.843, "cuda_time_us": 13.918999999999999, "pct_cuda_time": 0.1536357216353402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03178898471943241, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.759, "pct_cuda_time": 0.10771829926282672, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.959, "cuda_time_us": 21.6, "pct_cuda_time": 0.23841738539574311, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.6, "pct_cuda_time": 0.23841738539574311, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.102, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.548, "cuda_time_us": 190.685, "pct_cuda_time": 2.1047508858420034, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.88, "cuda_time_us": 105.022, "pct_cuda_time": 1.159216233751469, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.286, "pct_cuda_time": 1.1510923821009476, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.406, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.24, "cuda_time_us": 74.175, "pct_cuda_time": 0.8187319241541317, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.175, "pct_cuda_time": 0.8187319241541317, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2357.562, "cuda_time_us": 267.067, "pct_cuda_time": 2.9478433271057836, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.634, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1646.287, "cuda_time_us": 67.774, "pct_cuda_time": 0.7480786980468099, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.824, "cuda_time_us": 27.679, "pct_cuda_time": 0.3055164264059617, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.679, "pct_cuda_time": 0.3055164264059617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 471.991, "cuda_time_us": 4.512, "pct_cuda_time": 0.04980274272711078, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.512, "pct_cuda_time": 0.04980274272711078, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.479, "cuda_time_us": 14.144, "pct_cuda_time": 0.15611923606654585, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0144816485944081, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.121, "cuda_time_us": 21.439, "pct_cuda_time": 0.23664029284719149, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.439, "pct_cuda_time": 0.23664029284719149, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.844, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 497.777, "cuda_time_us": 190.461, "pct_cuda_time": 2.1022784092527145, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.65, "cuda_time_us": 104.638, "pct_cuda_time": 1.1549777024555448, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.902, "pct_cuda_time": 1.146853850805023, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.797, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 195.204, "cuda_time_us": 74.335, "pct_cuda_time": 0.8204979788607668, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.335, "pct_cuda_time": 0.8204979788607668, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2324.636, "cuda_time_us": 269.535, "pct_cuda_time": 2.9750847209556306, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.19, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1656.506, "cuda_time_us": 69.21600000000001, "pct_cuda_time": 0.7639952660903591, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.432, "cuda_time_us": 28.608, "pct_cuda_time": 0.31577058154636195, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.608, "pct_cuda_time": 0.31577058154636195, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 467.32, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 733.47, "cuda_time_us": 14.304, "pct_cuda_time": 0.15788529077318098, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.78, "cuda_time_us": 21.696, "pct_cuda_time": 0.23947701821972422, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.696, "pct_cuda_time": 0.23947701821972422, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.744, "cuda_time_us": 4.415, "pct_cuda_time": 0.04873207206121323, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.415, "pct_cuda_time": 0.04873207206121323, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 449.389, "cuda_time_us": 191.45600000000002, "pct_cuda_time": 2.1132610619596015, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.719, "cuda_time_us": 105.02300000000001, "pct_cuda_time": 1.1592272715933856, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.287, "pct_cuda_time": 1.151103419942864, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.834, "cuda_time_us": 11.681, "pct_cuda_time": 0.12893303142628124, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.681, "pct_cuda_time": 0.12893303142628124, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.736, "cuda_time_us": 74.752, "pct_cuda_time": 0.8251007589399346, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.752, "pct_cuda_time": 0.8251007589399346, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2372.69, "cuda_time_us": 269.211, "pct_cuda_time": 2.9715084601746944, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.543, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.842, "cuda_time_us": 69.055, "pct_cuda_time": 0.7622181735418075, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.046, "cuda_time_us": 28.607, "pct_cuda_time": 0.3157595437044455, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.607, "pct_cuda_time": 0.3157595437044455, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 521.063, "cuda_time_us": 4.768, "pct_cuda_time": 0.05262843025772699, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.768, "pct_cuda_time": 0.05262843025772699, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 735.021, "cuda_time_us": 14.24, "pct_cuda_time": 0.15717886889052693, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.03355503942606755, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.10772933710474317, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.015894492359716204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 177.723, "cuda_time_us": 21.44, "pct_cuda_time": 0.23665133068910796, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.44, "pct_cuda_time": 0.23665133068910796, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.157, "cuda_time_us": 4.447, "pct_cuda_time": 0.04908528300254025, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.447, "pct_cuda_time": 0.04908528300254025, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 440.265, "cuda_time_us": 191.26100000000002, "pct_cuda_time": 2.1111086827858903, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.438, "cuda_time_us": 104.51100000000001, "pct_cuda_time": 1.1535758965321532, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.775, "pct_cuda_time": 1.1454520448816314, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.532, "cuda_time_us": 11.615, "pct_cuda_time": 0.12820453385979427, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.615, "pct_cuda_time": 0.12820453385979427, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.264, "cuda_time_us": 75.135, "pct_cuda_time": 0.8293282523939426, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 75.135, "pct_cuda_time": 0.8293282523939426, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2476.351, "cuda_time_us": 266.26599999999996, "pct_cuda_time": 2.939002015730691, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.76, "cuda_time_us": 4.351, "pct_cuda_time": 0.04802565017855918, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.351, "pct_cuda_time": 0.04802565017855918, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1795.599, "cuda_time_us": 68.958, "pct_cuda_time": 0.7611475028759098, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 166.623, "cuda_time_us": 28.127, "pct_cuda_time": 0.31046137958454006, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.127, "pct_cuda_time": 0.31046137958454006, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 481.027, "cuda_time_us": 4.96, "pct_cuda_time": 0.05474769590568916, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.96, "pct_cuda_time": 0.05474769590568916, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 716.536, "cuda_time_us": 13.663, "pct_cuda_time": 0.150810034104724, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.031435773778105386, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.536, "pct_cuda_time": 0.10525686051545398, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.279, "pct_cuda_time": 0.014117399811164602, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.607, "cuda_time_us": 22.208, "pct_cuda_time": 0.2451283932809566, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.208, "pct_cuda_time": 0.2451283932809566, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.08, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.751, "cuda_time_us": 188.605, "pct_cuda_time": 2.0817921746557464, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.182, "cuda_time_us": 103.038, "pct_cuda_time": 1.1373171553891934, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.007770640709194589, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 102.334, "pct_cuda_time": 1.1295465146799988, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.457, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.503, "cuda_time_us": 74.079, "pct_cuda_time": 0.8176722913301505, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.079, "pct_cuda_time": 0.8176722913301505, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2650.036, "cuda_time_us": 268.892, "pct_cuda_time": 2.9679873886033405, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.664, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1965.408, "cuda_time_us": 69.31099999999999, "pct_cuda_time": 0.7650438610724235, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.396, "cuda_time_us": 28.512, "pct_cuda_time": 0.3147109487223809, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.512, "pct_cuda_time": 0.3147109487223809, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 525.179, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 976.014, "cuda_time_us": 14.464, "pct_cuda_time": 0.15965134547981613, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.03532109413270268, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.952, "pct_cuda_time": 0.10984860275270535, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0144816485944081, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.639, "cuda_time_us": 21.727, "pct_cuda_time": 0.23981919131913473, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.727, "pct_cuda_time": 0.23981919131913473, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.045, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.705, "cuda_time_us": 190.749, "pct_cuda_time": 2.1054573077246572, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.304, "cuda_time_us": 105.214, "pct_cuda_time": 1.1613354993994311, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.478, "pct_cuda_time": 1.1532116477489096, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 105.379, "cuda_time_us": 11.904, "pct_cuda_time": 0.131394470173654, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.904, "pct_cuda_time": 0.131394470173654, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.203, "cuda_time_us": 73.631, "pct_cuda_time": 0.8127273381515723, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 73.631, "pct_cuda_time": 0.8127273381515723, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2280.313, "cuda_time_us": 268.35, "pct_cuda_time": 2.9620048782846142, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.692, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1641.841, "cuda_time_us": 69.05499999999999, "pct_cuda_time": 0.7622181735418073, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.037, "cuda_time_us": 28.351, "pct_cuda_time": 0.3129338561738293, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.351, "pct_cuda_time": 0.3129338561738293, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.966, "cuda_time_us": 4.864, "pct_cuda_time": 0.05368806308170807, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.864, "pct_cuda_time": 0.05368806308170807, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 687.465, "cuda_time_us": 14.4, "pct_cuda_time": 0.15894492359716206, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03178898471943241, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.11055502463535938, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.01660091424237026, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.575, "cuda_time_us": 21.44, "pct_cuda_time": 0.23665133068910796, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.44, "pct_cuda_time": 0.23665133068910796, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.291, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 432.993, "cuda_time_us": 190.431, "pct_cuda_time": 2.1019472739952203, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.647, "cuda_time_us": 104.095, "pct_cuda_time": 1.1489841542949017, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.359, "pct_cuda_time": 1.14086030264438, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.663, "cuda_time_us": 11.68, "pct_cuda_time": 0.1289219935843648, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.68, "pct_cuda_time": 0.1289219935843648, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.861, "cuda_time_us": 74.656, "pct_cuda_time": 0.8240411261159536, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.656, "pct_cuda_time": 0.8240411261159536, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2394.057, "cuda_time_us": 267.22799999999995, "pct_cuda_time": 2.9496204196543347, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.289, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1699.416, "cuda_time_us": 68.63799999999999, "pct_cuda_time": 0.7576153934626395, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 130.569, "cuda_time_us": 27.935, "pct_cuda_time": 0.3083421139365779, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.935, "pct_cuda_time": 0.3083421139365779, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.166, "cuda_time_us": 4.672, "pct_cuda_time": 0.051568797433745914, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.672, "pct_cuda_time": 0.051568797433745914, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 750.218, "cuda_time_us": 13.982999999999999, "pct_cuda_time": 0.15434214351799425, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03178898471943241, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.631, "pct_cuda_time": 0.10630545549751859, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 191.307, "cuda_time_us": 22.048, "pct_cuda_time": 0.24336233857432144, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.048, "pct_cuda_time": 0.24336233857432144, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 102.147, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.275, "cuda_time_us": 189.75799999999998, "pct_cuda_time": 2.094518806385436, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.325, "cuda_time_us": 103.807, "pct_cuda_time": 1.1458052558229586, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.071, "pct_cuda_time": 1.137681404172437, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.446, "cuda_time_us": 11.648, "pct_cuda_time": 0.12856878264303775, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.648, "pct_cuda_time": 0.12856878264303775, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.862, "cuda_time_us": 74.303, "pct_cuda_time": 0.8201447679194398, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.303, "pct_cuda_time": 0.8201447679194398, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2343.974, "cuda_time_us": 267.74, "pct_cuda_time": 2.955271794715568, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.75, "cuda_time_us": 4.288, "pct_cuda_time": 0.0473302661378216, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.288, "pct_cuda_time": 0.0473302661378216, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1674.493, "cuda_time_us": 67.902, "pct_cuda_time": 0.749491541812118, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.738, "cuda_time_us": 27.903, "pct_cuda_time": 0.3079889029952509, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.903, "pct_cuda_time": 0.3079889029952509, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 522.117, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 699.404, "cuda_time_us": 13.982999999999999, "pct_cuda_time": 0.15434214351799425, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03178898471943241, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.695, "pct_cuda_time": 0.10701187738017266, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.015541281418389178, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.167, "cuda_time_us": 21.568, "pct_cuda_time": 0.23806417445441608, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.568, "pct_cuda_time": 0.23806417445441608, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.446, "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.753, "cuda_time_us": 191.197, "pct_cuda_time": 2.110402260903236, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.55, "cuda_time_us": 104.767, "pct_cuda_time": 1.1564015840627693, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.008477062591848643, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.999, "pct_cuda_time": 1.1479245214709206, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.732, "cuda_time_us": 11.807, "pct_cuda_time": 0.1303237995077564, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.807, "pct_cuda_time": 0.1303237995077564, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.338, "cuda_time_us": 74.623, "pct_cuda_time": 0.82367687733271, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.623, "pct_cuda_time": 0.82367687733271, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2255.765, "cuda_time_us": 268.22, "pct_cuda_time": 2.9605699588354732, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.978, "cuda_time_us": 4.383, "pct_cuda_time": 0.04837886111988621, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.383, "pct_cuda_time": 0.04837886111988621, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1616.125, "cuda_time_us": 68.8, "pct_cuda_time": 0.7594035238531076, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.252, "cuda_time_us": 28.288, "pct_cuda_time": 0.3122384721330917, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.288, "pct_cuda_time": 0.3122384721330917, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 471.442, "cuda_time_us": 4.896, "pct_cuda_time": 0.05404127402303511, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.896, "pct_cuda_time": 0.05404127402303511, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 710.088, "cuda_time_us": 14.368, "pct_cuda_time": 0.15859171265583505, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.034261461308721604, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.952, "pct_cuda_time": 0.10984860275270535, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0144816485944081, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 162.705, "cuda_time_us": 21.248, "pct_cuda_time": 0.23453206504114582, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.248, "pct_cuda_time": 0.23453206504114582, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.968, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 435.34, "cuda_time_us": 190.621, "pct_cuda_time": 2.1040444639593496, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.571, "cuda_time_us": 104.382, "pct_cuda_time": 1.1521520149249285, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.646, "pct_cuda_time": 1.144028163274407, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.457, "cuda_time_us": 11.712, "pct_cuda_time": 0.1292752045256918, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.712, "pct_cuda_time": 0.1292752045256918, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.204, "cuda_time_us": 74.527, "pct_cuda_time": 0.822617244508729, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.527, "pct_cuda_time": 0.822617244508729, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2314.238, "cuda_time_us": 268.15700000000004, "pct_cuda_time": 2.9598745747947355, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 64.762, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1630.054, "cuda_time_us": 68.703, "pct_cuda_time": 0.7583328531872101, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 127.978, "cuda_time_us": 28.096, "pct_cuda_time": 0.31011920648512953, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.096, "pct_cuda_time": 0.31011920648512953, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 446.047, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.05086237555109186, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 681.555, "cuda_time_us": 14.302999999999999, "pct_cuda_time": 0.1578742529312645, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.919, "pct_cuda_time": 0.10948435396946185, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 241.769, "cuda_time_us": 21.696, "pct_cuda_time": 0.23947701821972422, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.696, "pct_cuda_time": 0.23947701821972422, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.097, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.267, "cuda_time_us": 190.526, "pct_cuda_time": 2.1029958689772847, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.057, "cuda_time_us": 104.863, "pct_cuda_time": 1.1574612168867504, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.127, "pct_cuda_time": 1.1493373652362286, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 115.316, "cuda_time_us": 11.072, "pct_cuda_time": 0.12221098569915127, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.072, "pct_cuda_time": 0.12221098569915127, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.83, "cuda_time_us": 74.591, "pct_cuda_time": 0.823323666391383, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.591, "pct_cuda_time": 0.823323666391383, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2383.88, "cuda_time_us": 268.702, "pct_cuda_time": 2.965890198639211, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.196, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1718.551, "cuda_time_us": 67.52, "pct_cuda_time": 0.7452750862000266, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.016, "cuda_time_us": 27.424, "pct_cuda_time": 0.30270177671726195, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.424, "pct_cuda_time": 0.30270177671726195, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 539.22, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 697.007, "cuda_time_us": 14.336, "pct_cuda_time": 0.158238501714508, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.031435773778105386, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.824, "pct_cuda_time": 0.10843575898739724, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.018366968949005393, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.91, "cuda_time_us": 21.184, "pct_cuda_time": 0.23382564315849175, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.184, "pct_cuda_time": 0.23382564315849175, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.483, "cuda_time_us": 4.705, "pct_cuda_time": 0.05193304621698941, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.705, "pct_cuda_time": 0.05193304621698941, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.065, "cuda_time_us": 192.029, "pct_cuda_time": 2.1195857453777385, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.423, "cuda_time_us": 105.342, "pct_cuda_time": 1.1627483431647392, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.008477062591848643, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.574, "pct_cuda_time": 1.1542712805728907, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.283, "cuda_time_us": 11.392, "pct_cuda_time": 0.12574309511242154, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.392, "pct_cuda_time": 0.12574309511242154, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.506, "cuda_time_us": 75.295, "pct_cuda_time": 0.8310943071005777, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 75.295, "pct_cuda_time": 0.8310943071005777, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2581.847, "cuda_time_us": 269.11699999999996, "pct_cuda_time": 2.970470903034546, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.733, "cuda_time_us": 4.32, "pct_cuda_time": 0.04768347707914862, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.32, "pct_cuda_time": 0.04768347707914862, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1921.734, "cuda_time_us": 69.184, "pct_cuda_time": 0.7636420551490319, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 129.745, "cuda_time_us": 28.608, "pct_cuda_time": 0.31577058154636195, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.608, "pct_cuda_time": 0.31577058154636195, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.974, "cuda_time_us": 4.544, "pct_cuda_time": 0.0501559536684378, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.544, "pct_cuda_time": 0.0501559536684378, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 985.551, "cuda_time_us": 13.92, "pct_cuda_time": 0.15364675947725667, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.031435773778105386, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.10772933710474317, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0144816485944081, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 178.483, "cuda_time_us": 22.112, "pct_cuda_time": 0.24406876045697548, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.112, "pct_cuda_time": 0.24406876045697548, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.718, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.873, "cuda_time_us": 191.197, "pct_cuda_time": 2.110402260903236, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.757, "cuda_time_us": 105.087, "pct_cuda_time": 1.1599336934760396, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.351, "pct_cuda_time": 1.151809841825518, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.161, "cuda_time_us": 12.223, "pct_cuda_time": 0.13491554174500778, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 12.223, "pct_cuda_time": 0.13491554174500778, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.85, "cuda_time_us": 73.887, "pct_cuda_time": 0.8155530256821885, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 73.887, "pct_cuda_time": 0.8155530256821885, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2405.143, "cuda_time_us": 266.81100000000004, "pct_cuda_time": 2.9450176395751675, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 64.05, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.48, "pct_cuda_time": 0.049449531785783755, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1701.103, "cuda_time_us": 67.806, "pct_cuda_time": 0.7484319089881368, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 129.524, "cuda_time_us": 27.712, "pct_cuda_time": 0.30588067518920525, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.712, "pct_cuda_time": 0.30588067518920525, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.917, "cuda_time_us": 4.543, "pct_cuda_time": 0.050144915826521336, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.543, "pct_cuda_time": 0.050144915826521336, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 717.876, "cuda_time_us": 14.079999999999998, "pct_cuda_time": 0.1554128141838918, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.911, "pct_cuda_time": 0.03213115781884297, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.889, "pct_cuda_time": 0.10915321871196774, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.053, "cuda_time_us": 21.471, "pct_cuda_time": 0.23699350378851852, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.471, "pct_cuda_time": 0.23699350378851852, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.024, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 479.197, "cuda_time_us": 190.14100000000002, "pct_cuda_time": 2.098746299839444, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.588, "cuda_time_us": 104.351, "pct_cuda_time": 1.151809841825518, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.615, "pct_cuda_time": 1.1436859901749963, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 124.045, "cuda_time_us": 11.263, "pct_cuda_time": 0.12431921350519698, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.263, "pct_cuda_time": 0.12431921350519698, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.46, "cuda_time_us": 74.527, "pct_cuda_time": 0.822617244508729, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.527, "pct_cuda_time": 0.822617244508729, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2326.985, "cuda_time_us": 269.564, "pct_cuda_time": 2.9754048183712083, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.502, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1657.421, "cuda_time_us": 69.568, "pct_cuda_time": 0.7678805864449563, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.962, "cuda_time_us": 27.808, "pct_cuda_time": 0.3069403080131863, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.808, "pct_cuda_time": 0.3069403080131863, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 485.627, "cuda_time_us": 5.152, "pct_cuda_time": 0.05686696155365132, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 5.152, "pct_cuda_time": 0.05686696155365132, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 704.418, "cuda_time_us": 14.336, "pct_cuda_time": 0.158238501714508, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.944, "pct_cuda_time": 0.03249540660208647, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.92, "pct_cuda_time": 0.10949539181137832, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.5, "cuda_time_us": 22.272, "pct_cuda_time": 0.24583481516361064, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.272, "pct_cuda_time": 0.24583481516361064, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.153, "cuda_time_us": 4.479, "pct_cuda_time": 0.04943849394386728, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.479, "pct_cuda_time": 0.04943849394386728, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.524, "cuda_time_us": 191.06900000000002, "pct_cuda_time": 2.108989417137928, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.0, "cuda_time_us": 104.38300000000001, "pct_cuda_time": 1.152163052766845, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.647, "pct_cuda_time": 1.1440392011163234, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.949, "cuda_time_us": 11.935, "pct_cuda_time": 0.13173664327306453, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.935, "pct_cuda_time": 0.13173664327306453, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.122, "cuda_time_us": 74.751, "pct_cuda_time": 0.8250897210980183, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.751, "pct_cuda_time": 0.8250897210980183, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2345.992, "cuda_time_us": 268.188, "pct_cuda_time": 2.9602167478941457, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.745, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1687.817, "cuda_time_us": 68.286, "pct_cuda_time": 0.7537300731080423, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 167.283, "cuda_time_us": 28.063, "pct_cuda_time": 0.30975495770188605, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.063, "pct_cuda_time": 0.30975495770188605, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.854, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.435, "cuda_time_us": 14.016, "pct_cuda_time": 0.15470639230123776, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03178898471943241, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.664, "pct_cuda_time": 0.10666970428076208, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.143, "cuda_time_us": 21.567, "pct_cuda_time": 0.2380531366124996, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.567, "pct_cuda_time": 0.2380531366124996, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.962, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.83, "cuda_time_us": 190.91, "pct_cuda_time": 2.107234400273209, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.103, "cuda_time_us": 104.959, "pct_cuda_time": 1.1585208497107315, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.008477062591848643, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.191, "pct_cuda_time": 1.1500437871188829, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.773, "cuda_time_us": 11.648, "pct_cuda_time": 0.12856878264303775, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.648, "pct_cuda_time": 0.12856878264303775, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.029, "cuda_time_us": 74.303, "pct_cuda_time": 0.8201447679194398, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.303, "pct_cuda_time": 0.8201447679194398, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2355.394, "cuda_time_us": 266.814, "pct_cuda_time": 2.945050753100917, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.93, "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1694.382, "cuda_time_us": 67.648, "pct_cuda_time": 0.7466879299653346, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.192, "cuda_time_us": 27.712, "pct_cuda_time": 0.30588067518920525, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.712, "pct_cuda_time": 0.30588067518920525, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 514.999, "cuda_time_us": 4.8, "pct_cuda_time": 0.05298164119905402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.8, "pct_cuda_time": 0.05298164119905402, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.673, "cuda_time_us": 13.888, "pct_cuda_time": 0.15329354853592964, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.696, "pct_cuda_time": 0.10702291522208913, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.41, "cuda_time_us": 21.248, "pct_cuda_time": 0.23453206504114582, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.248, "pct_cuda_time": 0.23453206504114582, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.351, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 441.256, "cuda_time_us": 190.461, "pct_cuda_time": 2.1022784092527145, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.632, "cuda_time_us": 105.086, "pct_cuda_time": 1.1599226556341231, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.35, "pct_cuda_time": 1.1517988039836013, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.901, "cuda_time_us": 11.616, "pct_cuda_time": 0.12821557170171072, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.616, "pct_cuda_time": 0.12821557170171072, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.807, "cuda_time_us": 73.759, "pct_cuda_time": 0.8141401819168803, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 73.759, "pct_cuda_time": 0.8141401819168803, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3208.602, "cuda_time_us": 266.557, "pct_cuda_time": 2.942214027728384, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.15, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2440.244, "cuda_time_us": 67.775, "pct_cuda_time": 0.7480897358887264, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 130.62, "cuda_time_us": 27.84, "pct_cuda_time": 0.30729351895451334, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.84, "pct_cuda_time": 0.30729351895451334, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 724.29, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.576, "pct_cuda_time": 0.05050916460976483, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1108.861, "cuda_time_us": 14.368, "pct_cuda_time": 0.15859171265583505, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.03320182848474052, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.08, "pct_cuda_time": 0.11126144651801345, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 294.785, "cuda_time_us": 20.991, "pct_cuda_time": 0.2316953396686131, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.991, "pct_cuda_time": 0.2316953396686131, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 104.489, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 519.502, "cuda_time_us": 189.98200000000003, "pct_cuda_time": 2.096991282974726, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 190.109, "cuda_time_us": 104.60600000000001, "pct_cuda_time": 1.1546244915142179, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.87, "pct_cuda_time": 1.146500639863696, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 115.15, "cuda_time_us": 11.296, "pct_cuda_time": 0.12468346228844047, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.296, "pct_cuda_time": 0.12468346228844047, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.038, "cuda_time_us": 74.08, "pct_cuda_time": 0.817683329172067, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.08, "pct_cuda_time": 0.817683329172067, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2664.975, "cuda_time_us": 267.995, "pct_cuda_time": 2.9580864444042674, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.851, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1908.486, "cuda_time_us": 68.414, "pct_cuda_time": 0.7551429168733503, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.251, "cuda_time_us": 27.424, "pct_cuda_time": 0.30270177671726195, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.424, "pct_cuda_time": 0.30270177671726195, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 547.483, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 822.29, "cuda_time_us": 14.174999999999999, "pct_cuda_time": 0.1564614091659564, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03178898471943241, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.823, "pct_cuda_time": 0.10842472114548077, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 212.852, "cuda_time_us": 22.175, "pct_cuda_time": 0.2447641444977131, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.175, "pct_cuda_time": 0.2447641444977131, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 96.478, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 503.198, "cuda_time_us": 190.71699999999998, "pct_cuda_time": 2.10510409678333, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.243, "cuda_time_us": 104.702, "pct_cuda_time": 1.1556841243381988, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.966, "pct_cuda_time": 1.1475602726876772, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.19, "cuda_time_us": 11.584, "pct_cuda_time": 0.1278623607603837, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.584, "pct_cuda_time": 0.1278623607603837, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 165.656, "cuda_time_us": 74.431, "pct_cuda_time": 0.8215576116847478, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.431, "pct_cuda_time": 0.8215576116847478, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4011.407, "cuda_time_us": 269.692, "pct_cuda_time": 2.976817662136516, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 98.783, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2680.21, "cuda_time_us": 69.66199999999999, "pct_cuda_time": 0.7689181435851044, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 164.08, "cuda_time_us": 28.799, "pct_cuda_time": 0.31787880935240764, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.799, "pct_cuda_time": 0.31787880935240764, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 541.285, "cuda_time_us": 4.864, "pct_cuda_time": 0.05368806308170807, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.864, "pct_cuda_time": 0.05368806308170807, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1371.62, "cuda_time_us": 14.08, "pct_cuda_time": 0.1554128141838918, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.696, "pct_cuda_time": 0.10702291522208913, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 416.069, "cuda_time_us": 21.919, "pct_cuda_time": 0.2419384569670969, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.919, "pct_cuda_time": 0.2419384569670969, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 167.559, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 928.307, "cuda_time_us": 191.198, "pct_cuda_time": 2.1104132987451525, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 325.66, "cuda_time_us": 105.15, "pct_cuda_time": 1.1606290775167771, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.008112813808605146, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.415, "pct_cuda_time": 1.152516263708172, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 204.659, "cuda_time_us": 11.168, "pct_cuda_time": 0.12327061852313234, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.12327061852313234, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 292.8, "cuda_time_us": 74.88, "pct_cuda_time": 0.8265136027052428, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.88, "pct_cuda_time": 0.8265136027052428, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3567.891, "cuda_time_us": 268.05899999999997, "pct_cuda_time": 2.958792866286921, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 136.036, "cuda_time_us": 4.32, "pct_cuda_time": 0.04768347707914862, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.32, "pct_cuda_time": 0.04768347707914862, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2774.406, "cuda_time_us": 68.35, "pct_cuda_time": 0.7544364949906962, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.944, "cuda_time_us": 28.192, "pct_cuda_time": 0.31117883930911067, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.192, "pct_cuda_time": 0.31117883930911067, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 625.45, "cuda_time_us": 4.671, "pct_cuda_time": 0.05155775959182944, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.671, "pct_cuda_time": 0.05155775959182944, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1421.16, "cuda_time_us": 13.822999999999999, "pct_cuda_time": 0.15257608881135912, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.879, "pct_cuda_time": 0.03177794687751594, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.664, "pct_cuda_time": 0.10666970428076208, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.014128437653081072, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 370.746, "cuda_time_us": 21.664, "pct_cuda_time": 0.23912380727839716, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.664, "pct_cuda_time": 0.23912380727839716, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.158, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.352, "pct_cuda_time": 0.04803668802047565, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.843, "cuda_time_us": 191.037, "pct_cuda_time": 2.1086362061966004, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 184.955, "cuda_time_us": 105.119, "pct_cuda_time": 1.1602869044173667, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.383, "pct_cuda_time": 1.152163052766845, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.001, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.488, "pct_cuda_time": 0.12680272793640263, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.42, "cuda_time_us": 74.43, "pct_cuda_time": 0.8215465738428315, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.43, "pct_cuda_time": 0.8215465738428315, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2898.694, "cuda_time_us": 268.507, "pct_cuda_time": 2.9637378194654995, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.824, "cuda_time_us": 4.287, "pct_cuda_time": 0.04731922829590512, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.287, "pct_cuda_time": 0.04731922829590512, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1781.216, "cuda_time_us": 69.344, "pct_cuda_time": 0.7654081098556671, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.937, "cuda_time_us": 28.352, "pct_cuda_time": 0.31294489401574577, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.352, "pct_cuda_time": 0.31294489401574577, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 599.247, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 706.061, "cuda_time_us": 14.304, "pct_cuda_time": 0.15788529077318098, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.952, "pct_cuda_time": 0.10984860275270535, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.015894492359716204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.617, "cuda_time_us": 22.048, "pct_cuda_time": 0.24336233857432144, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.048, "pct_cuda_time": 0.24336233857432144, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.284, "cuda_time_us": 4.607, "pct_cuda_time": 0.05085133770917539, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.607, "pct_cuda_time": 0.05085133770917539, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 890.035, "cuda_time_us": 190.269, "pct_cuda_time": 2.100159143604752, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 307.1, "cuda_time_us": 104.60600000000001, "pct_cuda_time": 1.1546244915142179, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.87, "pct_cuda_time": 1.146500639863696, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 195.58, "cuda_time_us": 11.168, "pct_cuda_time": 0.12327061852313234, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.12327061852313234, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 306.507, "cuda_time_us": 74.495, "pct_cuda_time": 0.822264033567402, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.495, "pct_cuda_time": 0.822264033567402, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4150.395, "cuda_time_us": 267.356, "pct_cuda_time": 2.9510332634196432, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 144.74, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2805.418, "cuda_time_us": 69.311, "pct_cuda_time": 0.7650438610724236, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 275.499, "cuda_time_us": 28.256, "pct_cuda_time": 0.3118852611917647, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.256, "pct_cuda_time": 0.3118852611917647, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1070.994, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1020.948, "cuda_time_us": 14.336, "pct_cuda_time": 0.158238501714508, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.944, "pct_cuda_time": 0.03249540660208647, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.888, "pct_cuda_time": 0.10914218087005127, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.01660091424237026, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.399, "cuda_time_us": 22.079, "pct_cuda_time": 0.24370451167373205, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.079, "pct_cuda_time": 0.24370451167373205, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 112.815, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.448, "pct_cuda_time": 0.04909632084445673, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 952.039, "cuda_time_us": 189.149, "pct_cuda_time": 2.087796760658306, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 322.063, "cuda_time_us": 102.622, "pct_cuda_time": 1.132725413151942, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 101.886, "pct_cuda_time": 1.1246015615014204, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 237.817, "cuda_time_us": 11.52, "pct_cuda_time": 0.12715593887772964, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.52, "pct_cuda_time": 0.12715593887772964, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 276.034, "cuda_time_us": 75.007, "pct_cuda_time": 0.8279154086286344, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 75.007, "pct_cuda_time": 0.8279154086286344, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3173.323, "cuda_time_us": 269.628, "pct_cuda_time": 2.976111240253862, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 136.812, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2420.797, "cuda_time_us": 68.511, "pct_cuda_time": 0.756213587539248, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 263.773, "cuda_time_us": 28.128, "pct_cuda_time": 0.31047241742645654, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 28.128, "pct_cuda_time": 0.31047241742645654, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 986.348, "cuda_time_us": 4.896, "pct_cuda_time": 0.05404127402303511, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.896, "pct_cuda_time": 0.05404127402303511, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 750.879, "cuda_time_us": 14.176, "pct_cuda_time": 0.15647244700787288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.03320182848474052, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.696, "pct_cuda_time": 0.10702291522208913, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.016247703301043234, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.962, "cuda_time_us": 21.311, "pct_cuda_time": 0.2352274490818834, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.311, "pct_cuda_time": 0.2352274490818834, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.642, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.384, "pct_cuda_time": 0.04838989896180267, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 435.845, "cuda_time_us": 192.317, "pct_cuda_time": 2.1227646438496817, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.912, "cuda_time_us": 105.118, "pct_cuda_time": 1.1602758665754502, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.008466024749932174, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 104.351, "pct_cuda_time": 1.151809841825518, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.376, "cuda_time_us": 11.68, "pct_cuda_time": 0.1289219935843648, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.68, "pct_cuda_time": 0.1289219935843648, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 132.287, "cuda_time_us": 75.519, "pct_cuda_time": 0.8335667836898668, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 75.519, "pct_cuda_time": 0.8335667836898668, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3165.148, "cuda_time_us": 267.23, "pct_cuda_time": 2.949642495338168, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 64.8, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.416, "pct_cuda_time": 0.0487431099031297, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1924.292, "cuda_time_us": 68.319, "pct_cuda_time": 0.7540943218912858, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 131.889, "cuda_time_us": 27.999, "pct_cuda_time": 0.309048535819232, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.999, "pct_cuda_time": 0.309048535819232, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[256, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 622.703, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.64, "pct_cuda_time": 0.051215586492418884, "trace": "_C::rotary_embedding(int64[256], bfloat16[256, 4096], bfloat16[256, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 787.321, "cuda_time_us": 14.112, "pct_cuda_time": 0.15576602512521884, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.03214219566075944, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[256], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.10772933710474317, "trace": "_vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.015894492359716204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], None, None, bfloat16[256, 32, 128], int32[3], int32[3], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[256, 32, 128], bfloat16[256, 8, 128], bfloat16[256, 8, 128], bfloat16[256, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 205.953, "cuda_time_us": 21.568, "pct_cuda_time": 0.23806417445441608, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.568, "pct_cuda_time": 0.23806417445441608, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[256, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 174.008, "cuda_time_us": 4.512, "pct_cuda_time": 0.04980274272711078, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.512, "pct_cuda_time": 0.04980274272711078, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 909.983, "cuda_time_us": 189.983, "pct_cuda_time": 2.097002320816642, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 328.31, "cuda_time_us": 104.544, "pct_cuda_time": 1.1539401453153966, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.008134889492438086, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 103.807, "pct_cuda_time": 1.1458052558229586, "trace": "mm(bfloat16[256, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[256, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[256, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 187.123, "cuda_time_us": 11.392, "pct_cuda_time": 0.12574309511242154, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 11.392, "pct_cuda_time": 0.12574309511242154, "trace": "_C::silu_and_mul(bfloat16[256, 14336], bfloat16[256, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 289.968, "cuda_time_us": 74.047, "pct_cuda_time": 0.8173190803888236, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 74.047, "pct_cuda_time": 0.8173190803888236, "trace": "mm(bfloat16[256, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[256, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[256, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 140.462, "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.353, "pct_cuda_time": 0.048047725862392116, "trace": "_C::fused_add_rms_norm(bfloat16[256, 4096], bfloat16[256, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 830.066, "cuda_time_us": 352.538, "pct_cuda_time": 3.891258713548356, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.03425042346680514, "trace": "index_select(bfloat16[256, 4096], 0, int64[2])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.008123851650521617, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[2, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 348.699, "pct_cuda_time": 3.8488844384310292, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[2, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3286.462, "cuda_time_us": 115.389, "pct_cuda_time": 1.273645540899509, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.00883027353317567, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.00883027353317567, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.00883027353317567, "trace": "copy_(int32[2], int32[2], True) <- _to_copy(int32[2], 3, 0, None, None, True, None) <- to(int32[2], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.008477062591848643, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.008477062591848643, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.008466024749932174, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.008477062591848643, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.32, "pct_cuda_time": 0.04768347707914862, "trace": "copy_(float32[2, 128256], bfloat16[2, 128256], False) <- _to_copy(bfloat16[2, 128256], 6, None, None, None, False, None) <- to(bfloat16[2, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 4.672, "pct_cuda_time": 0.051568797433745914, "trace": "div_(float32[2, 128256], bfloat16[2, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 34.655, "pct_cuda_time": 0.38251641161525357, "trace": "_softmax(float32[2, 128256], -1, False) <- softmax(float32[2, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.0, "pct_cuda_time": 0.30905957366114845, "trace": "_log_softmax(float32[2, 128256], -1, False) <- log_softmax(float32[2, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.92, "pct_cuda_time": 0.02119265647962161, "trace": "copy_(int64[2], int32[2], False) <- _to_copy(int32[2], 4, None, None, None, False, None) <- to(int32[2], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 4.895, "pct_cuda_time": 0.05403023618111862, "trace": "index(float32[2, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 28.448, "pct_cuda_time": 0.31400452683972685, "trace": "argmax(float32[2, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.03320182848474052, "trace": "copy_(int64[2], int64[2], False) <- _to_copy(int64[2], 4, 0, None, None, False, None) <- to(int64[2], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 2 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6401.762000000001, "pct_cuda_time": 93.3415762365266, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 3.488, "pct_cuda_time": 0.05085715743774991, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 3.488, "pct_cuda_time": 0.05085715743774991, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6395.202, "pct_cuda_time": 93.24592745418954, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 198.74600000000004, "pct_cuda_time": 2.8978373314572954, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.032, "pct_cuda_time": 0.058789007680334765, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 194.71400000000006, "pct_cuda_time": 2.839048323776961, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 1807.9409999999998, "pct_cuda_time": 26.360877315127006, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 660.0540000000001, "pct_cuda_time": 9.623988014740993, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 660.0540000000001, "pct_cuda_time": 9.623988014740993, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 117.14999999999999, "pct_cuda_time": 1.7081181174978217, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 117.14999999999999, "pct_cuda_time": 1.7081181174978217, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 470.172, "pct_cuda_time": 6.855393184295227, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 78.23799999999999, "pct_cuda_time": 1.1407575354399877, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 350.10900000000004, "pct_cuda_time": 5.104801758421211, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 41.825, "pct_cuda_time": 0.6098338904340281, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 560.565, "pct_cuda_time": 8.17337799859297, "invocations": 32 }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cuda_time_us": 491.9909999999998, "pct_cuda_time": 7.173527449815369, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cuda_time_us": 68.57399999999998, "pct_cuda_time": 0.9998505487775979, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4388.515, "pct_cuda_time": 63.987212807605246, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2723.4190000000003, "pct_cuda_time": 39.70910230847461, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2723.4190000000003, "pct_cuda_time": 39.70910230847461, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 282.0440000000001, "pct_cuda_time": 4.112372738638974, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 282.0440000000001, "pct_cuda_time": 4.112372738638974, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1383.0520000000001, "pct_cuda_time": 20.165737760491655, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1383.0520000000001, "pct_cuda_time": 20.165737760491655, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 342.07500000000005, "pct_cuda_time": 4.98766116127245, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.010731326798791265, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 338.331, "pct_cuda_time": 4.9330713684264245, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 114.588, "pct_cuda_time": 1.670762602200942, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.503, "pct_cuda_time": 0.08023708067085372, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.32, "pct_cuda_time": 0.06298822251464439, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 4.608, "pct_cuda_time": 0.06718743734895401, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 34.719, "pct_cuda_time": 0.5062240966402636, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.159, "pct_cuda_time": 0.41057531430321087, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 2.048, "pct_cuda_time": 0.029861083266201783, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 4.928, "pct_cuda_time": 0.07185323160929803, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.839, "pct_cuda_time": 0.4059095200428669, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.464, "pct_cuda_time": 0.035926615804649016, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 83805.734, "cuda_time_us": 6401.762000000001, "pct_cuda_time": 93.3415762365266, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 432.673, "cuda_time_us": 3.488, "pct_cuda_time": 0.05085715743774991, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 3.488, "pct_cuda_time": 0.05085715743774991, "trace": "index_select(bfloat16[128256, 4096], 0, int64[2]) <- embedding(bfloat16[128256, 4096], int64[2], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 5748.045, "cuda_time_us": 204.637, "pct_cuda_time": 2.9837316876688154, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 414.24, "cuda_time_us": 4.032, "pct_cuda_time": 0.058789007680334765, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.032, "pct_cuda_time": 0.058789007680334765, "trace": "_C::rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 4221.927, "cuda_time_us": 59.903999999999996, "pct_cuda_time": 0.8734366855364021, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 897.222, "cuda_time_us": 23.712, "pct_cuda_time": 0.3457353546914925, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 23.712, "pct_cuda_time": 0.3457353546914925, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1217.87, "cuda_time_us": 3.488, "pct_cuda_time": 0.05085715743774991, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.488, "pct_cuda_time": 0.05085715743774991, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1385.569, "cuda_time_us": 14.687999999999999, "pct_cuda_time": 0.2141599565497909, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.272, "pct_cuda_time": 0.0331271392484426, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.104, "pct_cuda_time": 0.1619030608339378, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019129756467410518, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 352.434, "cuda_time_us": 18.016, "pct_cuda_time": 0.2626842168573688, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.936, "pct_cuda_time": 0.23235655416513262, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 162.578, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 793.292, "cuda_time_us": 137.62900000000002, "pct_cuda_time": 2.0067143695527765, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 277.383, "cuda_time_us": 84.863, "pct_cuda_time": 1.2373540572361728, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.863, "pct_cuda_time": 1.2373540572361728, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 244.588, "cuda_time_us": 8.703, "pct_cuda_time": 0.126895023274294, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.703, "pct_cuda_time": 0.126895023274294, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 183.274, "cuda_time_us": 44.063, "pct_cuda_time": 0.6424652890423093, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.063, "pct_cuda_time": 0.6424652890423093, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2656.953, "cuda_time_us": 200.51299999999998, "pct_cuda_time": 2.923601264138632, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.989, "cuda_time_us": 2.944, "pct_cuda_time": 0.04292530719516506, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.944, "pct_cuda_time": 0.04292530719516506, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1910.006, "cuda_time_us": 57.218, "pct_cuda_time": 0.8342731749636395, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.535, "cuda_time_us": 21.344, "pct_cuda_time": 0.3112084771649467, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.344, "pct_cuda_time": 0.3112084771649467, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 571.034, "cuda_time_us": 3.616, "pct_cuda_time": 0.05272347514188753, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05272347514188753, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 828.856, "cuda_time_us": 14.753000000000002, "pct_cuda_time": 0.21510769600892335, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03732635408275223, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.912, "pct_cuda_time": 0.15910358427773139, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.281, "pct_cuda_time": 0.01867775764843969, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.893, "cuda_time_us": 17.505, "pct_cuda_time": 0.2552335266478819, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.328, "pct_cuda_time": 0.22349154507047894, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.177, "pct_cuda_time": 0.03174198157740297, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.586, "cuda_time_us": 3.105, "pct_cuda_time": 0.04527278493240065, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04527278493240065, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 484.132, "cuda_time_us": 137.24599999999998, "pct_cuda_time": 2.001129997047426, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.974, "cuda_time_us": 84.831, "pct_cuda_time": 1.2368874778101386, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.831, "pct_cuda_time": 1.2368874778101386, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.489, "cuda_time_us": 8.96, "pct_cuda_time": 0.1306422392896328, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.1306422392896328, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.285, "cuda_time_us": 43.455, "pct_cuda_time": 0.6336002799476554, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.455, "pct_cuda_time": 0.6336002799476554, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2497.628, "cuda_time_us": 201.05200000000002, "pct_cuda_time": 2.931460211345899, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.87, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1763.383, "cuda_time_us": 57.79, "pct_cuda_time": 0.8426132822040044, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.344, "cuda_time_us": 21.503, "pct_cuda_time": 0.31352679368805514, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.503, "pct_cuda_time": 0.31352679368805514, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.345, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 771.683, "cuda_time_us": 14.559, "pct_cuda_time": 0.21227905823858972, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.399, "pct_cuda_time": 0.03497887634551664, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.848, "pct_cuda_time": 0.1581704254256626, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019129756467410518, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 162.435, "cuda_time_us": 18.08, "pct_cuda_time": 0.2636173757094376, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.392, "pct_cuda_time": 0.2244247039225478, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03919267178688984, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.789, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.472, "cuda_time_us": 137.086, "pct_cuda_time": 1.9987970999172548, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.907, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.165, "cuda_time_us": 9.12, "pct_cuda_time": 0.13297513641980482, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13297513641980482, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.534, "cuda_time_us": 43.295, "pct_cuda_time": 0.6312673828174835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.295, "pct_cuda_time": 0.6312673828174835, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2498.242, "cuda_time_us": 198.71699999999998, "pct_cuda_time": 2.897414493852451, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.73, "cuda_time_us": 3.039, "pct_cuda_time": 0.0443104648662047, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.039, "pct_cuda_time": 0.0443104648662047, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1761.831, "cuda_time_us": 55.936, "pct_cuda_time": 0.8155808367081362, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.782, "cuda_time_us": 20.352, "pct_cuda_time": 0.29674451495788023, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.352, "pct_cuda_time": 0.29674451495788023, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.587, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.34, "cuda_time_us": 14.625000000000002, "pct_cuda_time": 0.2132413783047857, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.281, "pct_cuda_time": 0.01867775764843969, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 163.995, "cuda_time_us": 17.311, "pct_cuda_time": 0.25240488887754836, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.2, "pct_cuda_time": 0.22162522736634138, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.111, "pct_cuda_time": 0.030779661511207014, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.118, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 487.99, "cuda_time_us": 136.702, "pct_cuda_time": 1.9931981468048416, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.687, "cuda_time_us": 85.535, "pct_cuda_time": 1.2471522251828953, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.535, "pct_cuda_time": 1.2471522251828953, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.895, "cuda_time_us": 8.639, "pct_cuda_time": 0.12596186442222518, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.639, "pct_cuda_time": 0.12596186442222518, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 165.389, "cuda_time_us": 42.528, "pct_cuda_time": 0.6200840571997215, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.528, "pct_cuda_time": 0.6200840571997215, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2402.644, "cuda_time_us": 201.179, "pct_cuda_time": 2.933311948442973, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.072, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.54, "cuda_time_us": 57.565999999999995, "pct_cuda_time": 0.8393472262217636, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.045, "cuda_time_us": 21.567, "pct_cuda_time": 0.31445995254012393, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.567, "pct_cuda_time": 0.31445995254012393, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.534, "cuda_time_us": 3.84, "pct_cuda_time": 0.05598953112412834, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05598953112412834, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.84, "cuda_time_us": 14.783, "pct_cuda_time": 0.21554511422083056, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.399, "pct_cuda_time": 0.03497887634551664, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020996074171548126, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.731, "cuda_time_us": 17.375999999999998, "pct_cuda_time": 0.25335262833668076, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.296, "pct_cuda_time": 0.22302496564444457, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.501, "cuda_time_us": 3.231, "pct_cuda_time": 0.04710994142241111, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.04710994142241111, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.565, "cuda_time_us": 137.374, "pct_cuda_time": 2.0029963147515644, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.121, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.281, "cuda_time_us": 9.056, "pct_cuda_time": 0.132041977567736, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.132041977567736, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.829, "cuda_time_us": 43.647, "pct_cuda_time": 0.6363997565038619, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.647, "pct_cuda_time": 0.6363997565038619, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2558.912, "cuda_time_us": 199.517, "pct_cuda_time": 2.909078979503311, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.972, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1838.436, "cuda_time_us": 56.510999999999996, "pct_cuda_time": 0.8239646857696918, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.304, "cuda_time_us": 20.703, "pct_cuda_time": 0.30186230803719505, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.703, "pct_cuda_time": 0.30186230803719505, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 462.209, "cuda_time_us": 3.616, "pct_cuda_time": 0.05272347514188753, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05272347514188753, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 907.698, "cuda_time_us": 14.783999999999999, "pct_cuda_time": 0.2155596948278941, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03546003637861462, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.072, "pct_cuda_time": 0.1614364814079034, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.775, "cuda_time_us": 17.408, "pct_cuda_time": 0.2538192077627152, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.296, "pct_cuda_time": 0.22302496564444457, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.29, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.26, "cuda_time_us": 136.82999999999998, "pct_cuda_time": 1.9950644645089795, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.474, "cuda_time_us": 84.543, "pct_cuda_time": 1.232688262975829, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.543, "pct_cuda_time": 1.232688262975829, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.431, "cuda_time_us": 8.832, "pct_cuda_time": 0.1287759215854952, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.1287759215854952, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.352, "cuda_time_us": 43.455, "pct_cuda_time": 0.6336002799476554, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.455, "pct_cuda_time": 0.6336002799476554, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2477.471, "cuda_time_us": 198.30200000000002, "pct_cuda_time": 2.8913635419210677, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.107, "cuda_time_us": 3.071, "pct_cuda_time": 0.0447770442922391, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.0447770442922391, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1733.781, "cuda_time_us": 56.032000000000004, "pct_cuda_time": 0.8169805749862394, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.364, "cuda_time_us": 20.384, "pct_cuda_time": 0.29721109438391463, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.384, "pct_cuda_time": 0.29721109438391463, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.93, "cuda_time_us": 3.68, "pct_cuda_time": 0.05365663399395633, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05365663399395633, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 768.544, "cuda_time_us": 14.624, "pct_cuda_time": 0.2132267976977221, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.888, "cuda_time_us": 17.344, "pct_cuda_time": 0.25288604891064637, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.232, "pct_cuda_time": 0.22209180679237575, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.289, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 492.758, "cuda_time_us": 136.06300000000002, "pct_cuda_time": 1.9838811388912179, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 185.079, "cuda_time_us": 84.415, "pct_cuda_time": 1.2308219452716913, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.415, "pct_cuda_time": 1.2308219452716913, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.795, "cuda_time_us": 8.768, "pct_cuda_time": 0.1278427627334264, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.768, "pct_cuda_time": 0.1278427627334264, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.956, "cuda_time_us": 42.88, "pct_cuda_time": 0.6252164308860999, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.88, "pct_cuda_time": 0.6252164308860999, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2382.754, "cuda_time_us": 200.059, "pct_cuda_time": 2.916981668531769, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.925, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1686.702, "cuda_time_us": 56.317, "pct_cuda_time": 0.8211360479993584, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 169.419, "cuda_time_us": 20.223, "pct_cuda_time": 0.29486361664667904, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.223, "pct_cuda_time": 0.29486361664667904, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.452, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 718.29, "cuda_time_us": 15.007, "pct_cuda_time": 0.21881117020307134, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.038259512934821036, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.103, "pct_cuda_time": 0.16188848022687422, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 162.839, "cuda_time_us": 17.439, "pct_cuda_time": 0.25427120658168595, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.328, "pct_cuda_time": 0.22349154507047894, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.111, "pct_cuda_time": 0.030779661511207014, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.139, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.827, "cuda_time_us": 137.726, "pct_cuda_time": 2.008128688437943, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.52, "cuda_time_us": 85.567, "pct_cuda_time": 1.2476188046089296, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.567, "pct_cuda_time": 1.2476188046089296, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.999, "cuda_time_us": 8.768, "pct_cuda_time": 0.1278427627334264, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.768, "pct_cuda_time": 0.1278427627334264, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.565, "cuda_time_us": 43.391, "pct_cuda_time": 0.6326671210955868, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.391, "pct_cuda_time": 0.6326671210955868, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2390.92, "cuda_time_us": 200.892, "pct_cuda_time": 2.929127314215727, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.606, "cuda_time_us": 3.263, "pct_cuda_time": 0.04757652084844552, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.263, "pct_cuda_time": 0.04757652084844552, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1655.428, "cuda_time_us": 56.992000000000004, "pct_cuda_time": 0.8309779577672715, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.908, "cuda_time_us": 20.896, "pct_cuda_time": 0.3046763652004651, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.896, "pct_cuda_time": 0.3046763652004651, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 460.899, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 729.684, "cuda_time_us": 14.721, "pct_cuda_time": 0.21464111658288887, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.401, "pct_cuda_time": 0.03500803755964379, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.816, "pct_cuda_time": 0.15770384599962817, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.021929233023616934, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.886, "cuda_time_us": 17.727, "pct_cuda_time": 0.2584704214159956, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.615, "pct_cuda_time": 0.22767617929772502, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 107.614, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.775, "cuda_time_us": 137.469, "pct_cuda_time": 2.004381472422604, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 176.271, "cuda_time_us": 85.246, "pct_cuda_time": 1.242938429741522, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.246, "pct_cuda_time": 1.242938429741522, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.573, "cuda_time_us": 8.64, "pct_cuda_time": 0.12597644502928879, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.64, "pct_cuda_time": 0.12597644502928879, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.52, "cuda_time_us": 43.583, "pct_cuda_time": 0.6354665976517931, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.583, "pct_cuda_time": 0.6354665976517931, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2452.202, "cuda_time_us": 200.286, "pct_cuda_time": 2.9202914663352004, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.038, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1757.805, "cuda_time_us": 56.672, "pct_cuda_time": 0.8263121635069275, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 228.372, "cuda_time_us": 20.32, "pct_cuda_time": 0.29627793553184584, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.32, "pct_cuda_time": 0.29627793553184584, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 500.55, "cuda_time_us": 3.808, "pct_cuda_time": 0.05552295169809394, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05552295169809394, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.327, "cuda_time_us": 14.624, "pct_cuda_time": 0.2132267976977221, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 159.045, "cuda_time_us": 17.919999999999998, "pct_cuda_time": 0.26128447857926557, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2276907599047886, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.304, "pct_cuda_time": 0.033593718674477004, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.193, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.956, "cuda_time_us": 137.406, "pct_cuda_time": 2.003462894177599, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.985, "cuda_time_us": 85.631, "pct_cuda_time": 1.2485519634609985, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.631, "pct_cuda_time": 1.2485519634609985, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.743, "cuda_time_us": 8.896, "pct_cuda_time": 0.12970908043756402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12970908043756402, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.254, "cuda_time_us": 42.879, "pct_cuda_time": 0.6252018502790362, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.879, "pct_cuda_time": 0.6252018502790362, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2344.56, "cuda_time_us": 199.90200000000002, "pct_cuda_time": 2.914692513222788, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.816, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1650.606, "cuda_time_us": 56.798, "pct_cuda_time": 0.8281493199969381, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.023, "cuda_time_us": 20.671, "pct_cuda_time": 0.3013957286111607, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.671, "pct_cuda_time": 0.3013957286111607, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 460.036, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 740.631, "cuda_time_us": 14.751, "pct_cuda_time": 0.21507853479479613, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.071, "pct_cuda_time": 0.16142190080083982, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 164.97, "cuda_time_us": 17.728, "pct_cuda_time": 0.2584850020230592, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2276907599047886, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.982, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.529, "cuda_time_us": 136.89600000000002, "pct_cuda_time": 1.9960267845751758, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.015, "cuda_time_us": 85.151, "pct_cuda_time": 1.2415532720704825, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.151, "pct_cuda_time": 1.2415532720704825, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.256, "cuda_time_us": 8.641, "pct_cuda_time": 0.12599102563635234, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.641, "pct_cuda_time": 0.12599102563635234, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.068, "cuda_time_us": 43.104, "pct_cuda_time": 0.6284824868683407, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.104, "pct_cuda_time": 0.6284824868683407, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2470.656, "cuda_time_us": 198.588, "pct_cuda_time": 2.8955335955412496, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 92.503, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1766.174, "cuda_time_us": 55.775, "pct_cuda_time": 0.8132333589709005, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.566, "cuda_time_us": 20.352, "pct_cuda_time": 0.29674451495788023, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.352, "pct_cuda_time": 0.29674451495788023, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 514.615, "cuda_time_us": 3.551, "pct_cuda_time": 0.05177573568275514, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.551, "pct_cuda_time": 0.05177573568275514, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 711.795, "cuda_time_us": 14.656, "pct_cuda_time": 0.21369337712375652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03546003637861462, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 160.464, "cuda_time_us": 17.216, "pct_cuda_time": 0.2510197312065088, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.104, "pct_cuda_time": 0.22022548908823816, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.646, "cuda_time_us": 3.071, "pct_cuda_time": 0.0447770442922391, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.0447770442922391, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.201, "cuda_time_us": 136.702, "pct_cuda_time": 1.9931981468048416, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.528, "cuda_time_us": 84.895, "pct_cuda_time": 1.2378206366622073, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.895, "pct_cuda_time": 1.2378206366622073, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.821, "cuda_time_us": 8.735, "pct_cuda_time": 0.1273616027003284, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.735, "pct_cuda_time": 0.1273616027003284, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.708, "cuda_time_us": 43.072, "pct_cuda_time": 0.6280159074423063, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.072, "pct_cuda_time": 0.6280159074423063, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2583.144, "cuda_time_us": 198.91, "pct_cuda_time": 2.9002285510157213, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.654, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1851.553, "cuda_time_us": 56.159, "pct_cuda_time": 0.8188323120833134, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.216, "cuda_time_us": 20.383, "pct_cuda_time": 0.2971965137768511, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.383, "pct_cuda_time": 0.2971965137768511, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.64, "cuda_time_us": 3.872, "pct_cuda_time": 0.05645611055016275, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.05645611055016275, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 901.1, "cuda_time_us": 14.687999999999999, "pct_cuda_time": 0.2141599565497909, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.008, "pct_cuda_time": 0.16050332255583458, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.461, "cuda_time_us": 17.216, "pct_cuda_time": 0.2510197312065088, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.136, "pct_cuda_time": 0.22069206851427253, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.631, "cuda_time_us": 3.105, "pct_cuda_time": 0.04527278493240065, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04527278493240065, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 486.904, "cuda_time_us": 136.51, "pct_cuda_time": 1.9903986702486351, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.001, "cuda_time_us": 85.343, "pct_cuda_time": 1.244352748626689, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.343, "pct_cuda_time": 1.244352748626689, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.588, "cuda_time_us": 8.64, "pct_cuda_time": 0.12597644502928879, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.64, "pct_cuda_time": 0.12597644502928879, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.064, "cuda_time_us": 42.527, "pct_cuda_time": 0.6200694765926579, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.527, "pct_cuda_time": 0.6200694765926579, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2408.871, "cuda_time_us": 198.75, "pct_cuda_time": 2.8978956538855494, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.184, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1707.388, "cuda_time_us": 55.872, "pct_cuda_time": 0.8146476778560674, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.982, "cuda_time_us": 20.16, "pct_cuda_time": 0.29394503840167385, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.16, "pct_cuda_time": 0.29394503840167385, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.248, "cuda_time_us": 3.584, "pct_cuda_time": 0.05225689571585312, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.584, "pct_cuda_time": 0.05225689571585312, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.588, "cuda_time_us": 14.592, "pct_cuda_time": 0.21276021827168773, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03452687752654581, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.115, "cuda_time_us": 17.536, "pct_cuda_time": 0.2556855254668528, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.22489128334858216, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.882, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 476.274, "cuda_time_us": 136.73399999999998, "pct_cuda_time": 1.993664726230876, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.847, "cuda_time_us": 84.255, "pct_cuda_time": 1.228489048141519, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.255, "pct_cuda_time": 1.228489048141519, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.143, "cuda_time_us": 8.928, "pct_cuda_time": 0.13017565986359841, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13017565986359841, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.345, "cuda_time_us": 43.551, "pct_cuda_time": 0.6350000182257587, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.551, "pct_cuda_time": 0.6350000182257587, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2392.838, "cuda_time_us": 199.73999999999998, "pct_cuda_time": 2.912330454878488, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.752, "cuda_time_us": 3.296, "pct_cuda_time": 0.04805768088154349, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04805768088154349, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1706.885, "cuda_time_us": 56.382, "pct_cuda_time": 0.8220837874584906, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 174.984, "cuda_time_us": 20.288, "pct_cuda_time": 0.2958113561058114, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.288, "pct_cuda_time": 0.2958113561058114, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.297, "cuda_time_us": 3.712, "pct_cuda_time": 0.05412321341999074, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05412321341999074, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 717.762, "cuda_time_us": 14.559, "pct_cuda_time": 0.21227905823858972, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.431, "pct_cuda_time": 0.03544545577155104, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.848, "pct_cuda_time": 0.1581704254256626, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.747, "cuda_time_us": 17.823, "pct_cuda_time": 0.2598701596940988, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.2272095998716906, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.24, "pct_cuda_time": 0.0326605598224082, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.856, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 457.12, "cuda_time_us": 137.022, "pct_cuda_time": 1.997863941065186, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.288, "cuda_time_us": 85.279, "pct_cuda_time": 1.24341958977462, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.279, "pct_cuda_time": 1.24341958977462, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.697, "cuda_time_us": 8.672, "pct_cuda_time": 0.12644302445532318, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.672, "pct_cuda_time": 0.12644302445532318, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.157, "cuda_time_us": 43.071, "pct_cuda_time": 0.6280013268352427, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.6280013268352427, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2290.271, "cuda_time_us": 199.329, "pct_cuda_time": 2.9063378253753593, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.192, "cuda_time_us": 3.105, "pct_cuda_time": 0.04527278493240065, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04527278493240065, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1597.686, "cuda_time_us": 56.257000000000005, "pct_cuda_time": 0.820261211575544, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.672, "cuda_time_us": 20.288, "pct_cuda_time": 0.2958113561058114, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.288, "pct_cuda_time": 0.2958113561058114, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.594, "cuda_time_us": 3.585, "pct_cuda_time": 0.052271476322916705, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.585, "pct_cuda_time": 0.052271476322916705, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 675.35, "cuda_time_us": 14.784, "pct_cuda_time": 0.21555969482789417, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03732635408275223, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 152.586, "cuda_time_us": 17.6, "pct_cuda_time": 0.2566186843189216, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.488, "pct_cuda_time": 0.225824442200651, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.72, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.916, "cuda_time_us": 136.799, "pct_cuda_time": 1.9946124656900088, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.848, "cuda_time_us": 85.023, "pct_cuda_time": 1.2396869543663447, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.023, "pct_cuda_time": 1.2396869543663447, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.985, "cuda_time_us": 8.512, "pct_cuda_time": 0.12411012732515118, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.512, "pct_cuda_time": 0.12411012732515118, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.383, "cuda_time_us": 43.264, "pct_cuda_time": 0.6308153839985127, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.264, "pct_cuda_time": 0.6308153839985127, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2422.23, "cuda_time_us": 199.869, "pct_cuda_time": 2.9142113531896894, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.568, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1718.765, "cuda_time_us": 56.862, "pct_cuda_time": 0.8290824788490068, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.262, "cuda_time_us": 20.48, "pct_cuda_time": 0.2986108326620178, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.48, "pct_cuda_time": 0.2986108326620178, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.193, "cuda_time_us": 3.583, "pct_cuda_time": 0.05224231510878955, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.583, "pct_cuda_time": 0.05224231510878955, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 753.849, "cuda_time_us": 14.527000000000001, "pct_cuda_time": 0.21181247881255533, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.848, "pct_cuda_time": 0.1581704254256626, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.279, "pct_cuda_time": 0.018648596434312537, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 154.534, "cuda_time_us": 18.272, "pct_cuda_time": 0.266416852265644, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.23608918957340785, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.613, "cuda_time_us": 3.232, "pct_cuda_time": 0.047124522029474696, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047124522029474696, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 483.307, "cuda_time_us": 136.703, "pct_cuda_time": 1.9932127274119056, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.499, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 113.943, "cuda_time_us": 8.864, "pct_cuda_time": 0.1292425010115296, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1292425010115296, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.572, "cuda_time_us": 43.168, "pct_cuda_time": 0.6294156457204094, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.168, "pct_cuda_time": 0.6294156457204094, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2297.492, "cuda_time_us": 198.909, "pct_cuda_time": 2.9002139704086574, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.056, "cuda_time_us": 2.975, "pct_cuda_time": 0.04337730601413589, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.975, "pct_cuda_time": 0.04337730601413589, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1635.347, "cuda_time_us": 55.937, "pct_cuda_time": 0.8155954173151998, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.06, "cuda_time_us": 20.288, "pct_cuda_time": 0.2958113561058114, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.288, "pct_cuda_time": 0.2958113561058114, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 475.887, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 716.87, "cuda_time_us": 14.817, "pct_cuda_time": 0.2160408548609921, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03872609236085544, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.881, "pct_cuda_time": 0.15865158545876054, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 151.933, "cuda_time_us": 17.183999999999997, "pct_cuda_time": 0.2505531517804743, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.072, "pct_cuda_time": 0.21975890966220374, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.552, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.535, "cuda_time_us": 136.861, "pct_cuda_time": 1.9955164633279503, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.851, "cuda_time_us": 85.118, "pct_cuda_time": 1.2410721120373844, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.118, "pct_cuda_time": 1.2410721120373844, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.315, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.494, "cuda_time_us": 42.943, "pct_cuda_time": 0.6261350091311051, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.943, "pct_cuda_time": 0.6261350091311051, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2462.195, "cuda_time_us": 200.028, "pct_cuda_time": 2.9165296697127983, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.418, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1739.492, "cuda_time_us": 56.159, "pct_cuda_time": 0.8188323120833134, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.289, "cuda_time_us": 20.192, "pct_cuda_time": 0.2944116178277082, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.192, "pct_cuda_time": 0.2944116178277082, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 499.915, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 768.777, "cuda_time_us": 14.879999999999999, "pct_cuda_time": 0.21695943310599733, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036859774656717824, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.04, "pct_cuda_time": 0.16096990198186897, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019129756467410518, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.771, "cuda_time_us": 17.439, "pct_cuda_time": 0.25427120658168595, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.359, "pct_cuda_time": 0.22394354388944981, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.153, "cuda_time_us": 3.167, "pct_cuda_time": 0.04617678257034231, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.167, "pct_cuda_time": 0.04617678257034231, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 494.461, "cuda_time_us": 137.534, "pct_cuda_time": 2.0053292118817363, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 180.319, "cuda_time_us": 85.279, "pct_cuda_time": 1.24341958977462, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.279, "pct_cuda_time": 1.24341958977462, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.405, "cuda_time_us": 8.768, "pct_cuda_time": 0.1278427627334264, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.768, "pct_cuda_time": 0.1278427627334264, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 159.416, "cuda_time_us": 43.487, "pct_cuda_time": 0.63406685937369, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.63406685937369, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2694.676, "cuda_time_us": 199.54700000000003, "pct_cuda_time": 2.909516397715219, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.158, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1801.7, "cuda_time_us": 56.094, "pct_cuda_time": 0.817884572624181, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.193, "cuda_time_us": 20.191, "pct_cuda_time": 0.29439703722064464, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.191, "pct_cuda_time": 0.29439703722064464, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 518.659, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 813.857, "cuda_time_us": 14.783, "pct_cuda_time": 0.21554511422083056, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036859774656717824, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.943, "pct_cuda_time": 0.1595555830967022, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019129756467410518, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.896, "cuda_time_us": 17.472, "pct_cuda_time": 0.254752366614784, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.359, "pct_cuda_time": 0.22394354388944981, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.113, "pct_cuda_time": 0.030808822725334167, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.492, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 658.106, "cuda_time_us": 137.437, "pct_cuda_time": 2.00391489299657, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.225, "cuda_time_us": 84.959, "pct_cuda_time": 1.238753795514276, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.959, "pct_cuda_time": 1.238753795514276, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.633, "cuda_time_us": 8.671, "pct_cuda_time": 0.1264284438482596, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.671, "pct_cuda_time": 0.1264284438482596, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 334.467, "cuda_time_us": 43.807, "pct_cuda_time": 0.638732653634034, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.807, "pct_cuda_time": 0.638732653634034, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2432.329, "cuda_time_us": 200.12699999999998, "pct_cuda_time": 2.9179731498120915, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.361, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1714.07, "cuda_time_us": 56.544, "pct_cuda_time": 0.8244458458027898, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.14, "cuda_time_us": 20.832, "pct_cuda_time": 0.3037432063483963, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.832, "pct_cuda_time": 0.3037432063483963, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 496.021, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 735.092, "cuda_time_us": 14.721, "pct_cuda_time": 0.21464111658288887, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.041, "pct_cuda_time": 0.16098448258893258, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 159.812, "cuda_time_us": 17.343, "pct_cuda_time": 0.25287146830358276, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.263, "pct_cuda_time": 0.2225438056113466, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.686, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 492.059, "cuda_time_us": 137.439, "pct_cuda_time": 2.0039440542106965, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 183.583, "cuda_time_us": 85.375, "pct_cuda_time": 1.2448193280527233, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.375, "pct_cuda_time": 1.2448193280527233, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 107.086, "cuda_time_us": 8.928, "pct_cuda_time": 0.13017565986359841, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13017565986359841, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.189, "cuda_time_us": 43.136, "pct_cuda_time": 0.6289490662943751, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.136, "pct_cuda_time": 0.6289490662943751, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2370.141, "cuda_time_us": 199.42399999999998, "pct_cuda_time": 2.907722983046398, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.134, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1696.996, "cuda_time_us": 55.903999999999996, "pct_cuda_time": 0.8151142572821019, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.107, "cuda_time_us": 20.32, "pct_cuda_time": 0.29627793553184584, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.32, "pct_cuda_time": 0.29627793553184584, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 515.425, "cuda_time_us": 3.552, "pct_cuda_time": 0.05179031628981873, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.552, "pct_cuda_time": 0.05179031628981873, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 722.8, "cuda_time_us": 14.592, "pct_cuda_time": 0.21276021827168773, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.912, "pct_cuda_time": 0.15910358427773139, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.221, "cuda_time_us": 17.439999999999998, "pct_cuda_time": 0.2542857871887495, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.104, "pct_cuda_time": 0.22022548908823816, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.336, "pct_cuda_time": 0.03406029810051141, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.77, "cuda_time_us": 3.328, "pct_cuda_time": 0.0485242603075779, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.0485242603075779, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.789, "cuda_time_us": 137.152, "pct_cuda_time": 1.9997594199834503, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.369, "cuda_time_us": 85.343, "pct_cuda_time": 1.244352748626689, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.343, "pct_cuda_time": 1.244352748626689, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.098, "cuda_time_us": 8.961, "pct_cuda_time": 0.1306568198966964, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.961, "pct_cuda_time": 0.1306568198966964, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.38, "cuda_time_us": 42.848, "pct_cuda_time": 0.6247498514600655, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.848, "pct_cuda_time": 0.6247498514600655, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2397.666, "cuda_time_us": 200.22099999999998, "pct_cuda_time": 2.919343726876068, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.642, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1726.575, "cuda_time_us": 56.190999999999995, "pct_cuda_time": 0.8192988915093479, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.368, "cuda_time_us": 20.32, "pct_cuda_time": 0.29627793553184584, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.32, "pct_cuda_time": 0.29627793553184584, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.199, "cuda_time_us": 3.711, "pct_cuda_time": 0.05410863281292715, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.711, "pct_cuda_time": 0.05410863281292715, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 746.703, "cuda_time_us": 14.656, "pct_cuda_time": 0.21369337712375652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03732635408275223, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.816, "pct_cuda_time": 0.15770384599962817, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.842, "cuda_time_us": 17.503999999999998, "pct_cuda_time": 0.2552189460408183, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.22489128334858216, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.524, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.535, "cuda_time_us": 137.95, "pct_cuda_time": 2.0113947444201834, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.735, "cuda_time_us": 85.087, "pct_cuda_time": 1.2406201132184138, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.087, "pct_cuda_time": 1.2406201132184138, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.462, "cuda_time_us": 8.896, "pct_cuda_time": 0.12970908043756402, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12970908043756402, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.301, "cuda_time_us": 43.967, "pct_cuda_time": 0.641065550764206, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.967, "pct_cuda_time": 0.641065550764206, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2373.055, "cuda_time_us": 199.64299999999997, "pct_cuda_time": 2.9109161359933213, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.338, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1677.557, "cuda_time_us": 56.156, "pct_cuda_time": 0.8187885702621227, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.824, "cuda_time_us": 20.319, "pct_cuda_time": 0.29626335492478223, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.319, "pct_cuda_time": 0.29626335492478223, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.787, "cuda_time_us": 3.68, "pct_cuda_time": 0.05365663399395633, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05365663399395633, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.139, "cuda_time_us": 14.751, "pct_cuda_time": 0.21507853479479613, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03546003637861462, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.943, "pct_cuda_time": 0.1595555830967022, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.020062915319479322, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.537, "cuda_time_us": 17.406, "pct_cuda_time": 0.253790046548588, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.327, "pct_cuda_time": 0.2234769644634154, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.079, "pct_cuda_time": 0.030313082085172614, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.607, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 469.719, "cuda_time_us": 137.503, "pct_cuda_time": 2.0048772130627652, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.877, "cuda_time_us": 85.343, "pct_cuda_time": 1.244352748626689, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.343, "pct_cuda_time": 1.244352748626689, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.34, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.92, "cuda_time_us": 43.36, "pct_cuda_time": 0.6322151222766159, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.36, "pct_cuda_time": 0.6322151222766159, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2462.502, "cuda_time_us": 200.316, "pct_cuda_time": 2.9207288845471076, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.18, "cuda_time_us": 3.135, "pct_cuda_time": 0.045710203144307904, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.045710203144307904, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1746.205, "cuda_time_us": 56.32, "pct_cuda_time": 0.8211797898205491, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.037, "cuda_time_us": 20.48, "pct_cuda_time": 0.2986108326620178, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.48, "pct_cuda_time": 0.2986108326620178, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 532.258, "cuda_time_us": 3.68, "pct_cuda_time": 0.05365663399395633, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05365663399395633, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 749.458, "cuda_time_us": 14.688, "pct_cuda_time": 0.21415995654979095, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.848, "pct_cuda_time": 0.1581704254256626, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020996074171548126, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 156.747, "cuda_time_us": 17.472, "pct_cuda_time": 0.254752366614784, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.392, "pct_cuda_time": 0.2244247039225478, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.098, "cuda_time_us": 3.264, "pct_cuda_time": 0.047591101455509086, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.047591101455509086, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 498.866, "cuda_time_us": 137.597, "pct_cuda_time": 2.006247790126742, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.391, "cuda_time_us": 85.694, "pct_cuda_time": 1.2494705417060037, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.694, "pct_cuda_time": 1.2494705417060037, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 124.864, "cuda_time_us": 8.864, "pct_cuda_time": 0.1292425010115296, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1292425010115296, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.738, "cuda_time_us": 43.039, "pct_cuda_time": 0.6275347474092083, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.039, "pct_cuda_time": 0.6275347474092083, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2369.881, "cuda_time_us": 198.589, "pct_cuda_time": 2.8955481761483135, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.352, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1673.493, "cuda_time_us": 56.128, "pct_cuda_time": 0.8183803132643426, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.867, "cuda_time_us": 20.192, "pct_cuda_time": 0.2944116178277082, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.192, "pct_cuda_time": 0.2944116178277082, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.744, "cuda_time_us": 3.744, "pct_cuda_time": 0.054589792846025144, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.054589792846025144, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 721.442, "cuda_time_us": 14.816, "pct_cuda_time": 0.21602627425392854, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.035926615804649016, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.976, "pct_cuda_time": 0.1600367431298002, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.020062915319479322, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 156.941, "cuda_time_us": 17.375999999999998, "pct_cuda_time": 0.25335262833668076, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.296, "pct_cuda_time": 0.22302496564444457, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.019, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.891, "cuda_time_us": 136.349, "pct_cuda_time": 1.9880511925113995, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.45, "cuda_time_us": 84.862, "pct_cuda_time": 1.2373394766291093, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.862, "pct_cuda_time": 1.2373394766291093, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.714, "cuda_time_us": 8.864, "pct_cuda_time": 0.1292425010115296, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1292425010115296, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.508, "cuda_time_us": 42.623, "pct_cuda_time": 0.621469214870761, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.623, "pct_cuda_time": 0.621469214870761, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2381.137, "cuda_time_us": 199.484, "pct_cuda_time": 2.9085978194702133, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.656, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1707.105, "cuda_time_us": 56.062, "pct_cuda_time": 0.8174179931981466, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.505, "cuda_time_us": 20.607, "pct_cuda_time": 0.30046256975909186, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.607, "pct_cuda_time": 0.30046256975909186, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.117, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 758.971, "cuda_time_us": 14.528, "pct_cuda_time": 0.2118270594196189, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03452687752654581, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.848, "pct_cuda_time": 0.1581704254256626, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019129756467410518, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 153.722, "cuda_time_us": 17.279, "pct_cuda_time": 0.25193830945151396, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.2, "pct_cuda_time": 0.22162522736634138, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.079, "pct_cuda_time": 0.030313082085172614, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.19, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.874, "cuda_time_us": 137.406, "pct_cuda_time": 2.003462894177599, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.674, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.671, "pct_cuda_time": 1.2345545806799665, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.99, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.436, "cuda_time_us": 43.935, "pct_cuda_time": 0.6405989713381716, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.935, "pct_cuda_time": 0.6405989713381716, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2507.085, "cuda_time_us": 200.926, "pct_cuda_time": 2.929623054855888, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.532, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1834.31, "cuda_time_us": 57.087, "pct_cuda_time": 0.8323631154383112, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.13, "cuda_time_us": 21.344, "pct_cuda_time": 0.3112084771649467, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.344, "pct_cuda_time": 0.3112084771649467, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.56, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 891.025, "cuda_time_us": 14.784, "pct_cuda_time": 0.21555969482789417, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03732635408275223, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.1595701637037658, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.21, "cuda_time_us": 17.311, "pct_cuda_time": 0.25240488887754836, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.231, "pct_cuda_time": 0.22207722618531217, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.03032766269223619, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.3, "cuda_time_us": 3.201, "pct_cuda_time": 0.04667252321050386, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.201, "pct_cuda_time": 0.04667252321050386, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.934, "cuda_time_us": 137.63, "pct_cuda_time": 2.0067289501598395, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.655, "cuda_time_us": 85.855, "pct_cuda_time": 1.2518180194432393, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.855, "pct_cuda_time": 1.2518180194432393, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.207, "cuda_time_us": 8.863, "pct_cuda_time": 0.129227920404466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.863, "pct_cuda_time": 0.129227920404466, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.779, "cuda_time_us": 42.912, "pct_cuda_time": 0.6256830103121342, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.912, "pct_cuda_time": 0.6256830103121342, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2457.137, "cuda_time_us": 200.09300000000002, "pct_cuda_time": 2.9174774091719304, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.293, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.043391886621199464, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1737.199, "cuda_time_us": 56.48, "pct_cuda_time": 0.823512686950721, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.368, "cuda_time_us": 20.352, "pct_cuda_time": 0.29674451495788023, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.352, "pct_cuda_time": 0.29674451495788023, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.028, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.721, "cuda_time_us": 14.751999999999999, "pct_cuda_time": 0.21509311540185969, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034993456952580215, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.072, "pct_cuda_time": 0.1614364814079034, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.14, "cuda_time_us": 17.728, "pct_cuda_time": 0.2584850020230592, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2276907599047886, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.092, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045724783751371484, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 487.399, "cuda_time_us": 137.501, "pct_cuda_time": 2.0048480518486387, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 182.574, "cuda_time_us": 85.598, "pct_cuda_time": 1.2480708034279004, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.598, "pct_cuda_time": 1.2480708034279004, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.351, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.1283093421594608, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.523, "cuda_time_us": 43.103, "pct_cuda_time": 0.6284679062612771, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.103, "pct_cuda_time": 0.6284679062612771, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2566.636, "cuda_time_us": 199.61300000000003, "pct_cuda_time": 2.910478717781415, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.857, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04432504547326827, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1853.055, "cuda_time_us": 56.159000000000006, "pct_cuda_time": 0.8188323120833136, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.792, "cuda_time_us": 20.48, "pct_cuda_time": 0.2986108326620178, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.48, "pct_cuda_time": 0.2986108326620178, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 521.305, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 756.307, "cuda_time_us": 14.624, "pct_cuda_time": 0.2132267976977221, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.035926615804649016, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.88, "pct_cuda_time": 0.158637004851697, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 276.712, "cuda_time_us": 17.407, "pct_cuda_time": 0.2538046271556516, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.295, "pct_cuda_time": 0.22301038503738096, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.097, "cuda_time_us": 3.167, "pct_cuda_time": 0.04617678257034231, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.167, "pct_cuda_time": 0.04617678257034231, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.691, "cuda_time_us": 137.247, "pct_cuda_time": 2.0011445776544905, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.571, "cuda_time_us": 85.407, "pct_cuda_time": 1.2452859074787574, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.407, "pct_cuda_time": 1.2452859074787574, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.735, "cuda_time_us": 9.024, "pct_cuda_time": 0.1315753981417016, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1315753981417016, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.489, "cuda_time_us": 42.816, "pct_cuda_time": 0.6242832720340311, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.816, "pct_cuda_time": 0.6242832720340311, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2367.115, "cuda_time_us": 198.906, "pct_cuda_time": 2.900170228587467, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.318, "cuda_time_us": 2.944, "pct_cuda_time": 0.04292530719516506, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.944, "pct_cuda_time": 0.04292530719516506, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1677.218, "cuda_time_us": 55.580999999999996, "pct_cuda_time": 0.810404721200567, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.863, "cuda_time_us": 20.255, "pct_cuda_time": 0.29533019607271344, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.255, "pct_cuda_time": 0.29533019607271344, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 466.655, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.05319005456792193, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 726.796, "cuda_time_us": 14.431, "pct_cuda_time": 0.21041274053445208, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03452687752654581, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.815, "pct_cuda_time": 0.1576892653925646, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.018196597615341713, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.363, "cuda_time_us": 17.247, "pct_cuda_time": 0.25147173002547957, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.135, "pct_cuda_time": 0.22067748790720898, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.03079424211827059, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.509, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04619136317740588, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.111, "cuda_time_us": 137.213, "pct_cuda_time": 2.0006488370143285, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.057, "cuda_time_us": 85.311, "pct_cuda_time": 1.2438861692006546, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.311, "pct_cuda_time": 1.2438861692006546, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.622, "cuda_time_us": 8.639, "pct_cuda_time": 0.12596186442222518, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.639, "pct_cuda_time": 0.12596186442222518, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.644, "cuda_time_us": 43.263, "pct_cuda_time": 0.6308008033914491, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.263, "pct_cuda_time": 0.6308008033914491, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2402.925, "cuda_time_us": 199.13400000000001, "pct_cuda_time": 2.9034946069979624, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.286, "cuda_time_us": 3.135, "pct_cuda_time": 0.045710203144307904, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.045710203144307904, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.297, "cuda_time_us": 56.096, "pct_cuda_time": 0.8179137338383081, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.805, "cuda_time_us": 20.256, "pct_cuda_time": 0.29534477667977704, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.256, "pct_cuda_time": 0.29534477667977704, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 467.458, "cuda_time_us": 3.776, "pct_cuda_time": 0.05505637227205954, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05505637227205954, "trace": "_C::rotary_embedding(int64[2], bfloat16[2, 4096], bfloat16[2, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 759.977, "cuda_time_us": 14.624, "pct_cuda_time": 0.2132267976977221, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03546003637861462, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.912, "pct_cuda_time": 0.15910358427773139, "trace": "_vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018663177041376114, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[2, 1, 32, 128], None, None, None, None, int32[2], None, None, int32[2, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2, 32, 128], bfloat16[2, 8, 128], bfloat16[2, 8, 128], bfloat16[2, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.747, "cuda_time_us": 17.439999999999998, "pct_cuda_time": 0.2542857871887495, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.136, "pct_cuda_time": 0.22069206851427253, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.304, "pct_cuda_time": 0.033593718674477004, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.431, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04525820432533708, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 481.627, "cuda_time_us": 136.799, "pct_cuda_time": 1.9946124656900088, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 175.684, "cuda_time_us": 84.927, "pct_cuda_time": 1.2382872160882419, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.927, "pct_cuda_time": 1.2382872160882419, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.917, "cuda_time_us": 8.992, "pct_cuda_time": 0.13110881871566724, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.13110881871566724, "trace": "_C::silu_and_mul(bfloat16[2, 14336], bfloat16[2, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.033, "cuda_time_us": 42.88, "pct_cuda_time": 0.6252164308860999, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.88, "pct_cuda_time": 0.6252164308860999, "trace": "mm(bfloat16[2, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.99, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044791624899302676, "trace": "_C::fused_add_rms_norm(bfloat16[2, 4096], bfloat16[2, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 503.39, "cuda_time_us": 342.07500000000005, "pct_cuda_time": 4.98766116127245, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04385846604723387, "trace": "index_select(bfloat16[2, 4096], 0, int64[2])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010731326798791265, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[2, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 338.331, "pct_cuda_time": 4.9330713684264245, "trace": "mm(bfloat16[2, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[2, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[2, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3550.349, "cuda_time_us": 114.588, "pct_cuda_time": 1.670762602200942, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011197906224825669, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.011183325617762094, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.011183325617762094, "trace": "copy_(int32[2], int32[2], True) <- _to_copy(int32[2], 3, 0, None, None, True, None) <- to(int32[2], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011664485650860073, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011197906224825669, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.865, "pct_cuda_time": 0.012612225109992452, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011197906224825669, "trace": "copy_(bfloat16[2], bfloat16[2], True) <- _to_copy(bfloat16[2], 15, 0, None, None, True, None) <- to(bfloat16[2], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.32, "pct_cuda_time": 0.06298822251464439, "trace": "copy_(float32[2, 128256], bfloat16[2, 128256], False) <- _to_copy(bfloat16[2, 128256], 6, None, None, None, False, None) <- to(bfloat16[2, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 4.608, "pct_cuda_time": 0.06718743734895401, "trace": "div_(float32[2, 128256], bfloat16[2, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 34.719, "pct_cuda_time": 0.5062240966402636, "trace": "_softmax(float32[2, 128256], -1, False) <- softmax(float32[2, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.159, "pct_cuda_time": 0.41057531430321087, "trace": "_log_softmax(float32[2, 128256], -1, False) <- log_softmax(float32[2, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 2.048, "pct_cuda_time": 0.029861083266201783, "trace": "copy_(int64[2], int32[2], False) <- _to_copy(int32[2], 4, None, None, None, False, None) <- to(int32[2], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 4.928, "pct_cuda_time": 0.07185323160929803, "trace": "index(float32[2, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.839, "pct_cuda_time": 0.4059095200428669, "trace": "argmax(float32[2, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.035926615804649016, "trace": "copy_(int64[2], int64[2], False) <- _to_copy(int64[2], 4, 0, None, None, False, None) <- to(int64[2], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }